Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * aoecmd.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Filesystem request handling methods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/ata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "aoe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define MAXIOC (8192)	/* default meant to avoid most soft lockups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static void ktcomplete(struct frame *, struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static int count_targets(struct aoedev *d, int *untainted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static struct buf *nextbuf(struct aoedev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static int aoe_deadsecs = 60 * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) module_param(aoe_deadsecs, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static int aoe_maxout = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) module_param(aoe_maxout, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) MODULE_PARM_DESC(aoe_maxout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	"Only aoe_maxout outstanding packets for every MAC on eX.Y.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /* The number of online cpus during module initialization gives us a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * convenient heuristic cap on the parallelism used for ktio threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * doing I/O completion.  It is not important that the cap equal the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * actual number of running CPUs at any given time, but because of CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * hotplug, we take care to use ncpus instead of using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * num_online_cpus() after module initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static int ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) /* mutex lock used for synchronization while thread spawning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static DEFINE_MUTEX(ktio_spawn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static wait_queue_head_t *ktiowq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) static struct ktstate *kts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) /* io completion queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) struct iocq_ktio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct list_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static struct iocq_ktio *iocq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static struct page *empty_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) new_skb(ulong len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		skb_reserve(skb, MAX_HEADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		skb->protocol = __constant_htons(ETH_P_AOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static struct frame *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) getframe_deferred(struct aoedev *d, u32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct list_head *head, *pos, *nx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	head = &d->rexmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	list_for_each_safe(pos, nx, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		if (f->tag == tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 			list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static struct frame *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) getframe(struct aoedev *d, u32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct list_head *head, *pos, *nx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	n = tag % NFACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	head = &d->factive[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	list_for_each_safe(pos, nx, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		if (f->tag == tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 			list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * Leave the top bit clear so we have tagspace for userland.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * This driver reserves tag -1 to mean "unused frame."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) newtag(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	register ulong n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	n = jiffies & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	return n |= (++d->lasttag & 0x7fff) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	u32 host_tag = newtag(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	memcpy(h->dst, t->addr, sizeof h->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	h->type = __constant_cpu_to_be16(ETH_P_AOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	h->verfl = AOE_HVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	h->major = cpu_to_be16(d->aoemajor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	h->minor = d->aoeminor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	h->cmd = AOECMD_ATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	h->tag = cpu_to_be32(host_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	return host_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) put_lba(struct aoe_atahdr *ah, sector_t lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	ah->lba0 = lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	ah->lba1 = lba >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	ah->lba2 = lba >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	ah->lba3 = lba >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	ah->lba4 = lba >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	ah->lba5 = lba >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static struct aoeif *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) ifrotate(struct aoetgt *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct aoeif *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	ifp = t->ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	ifp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		ifp = t->ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (ifp->nd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	return t->ifp = ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) skb_pool_put(struct aoedev *d, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	__skb_queue_tail(&d->skbpool, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) skb_pool_get(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct sk_buff *skb = skb_peek(&d->skbpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		__skb_unlink(skb, &d->skbpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	    (skb = new_skb(ETH_ZLEN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) aoe_freetframe(struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	f->buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	memset(&f->iter, 0, sizeof(f->iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	f->r_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	f->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	list_add(&f->head, &t->ffree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static struct frame *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) newtframe(struct aoedev *d, struct aoetgt *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	if (list_empty(&t->ffree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		if (t->falloc >= NSKBPOOLMAX*2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		t->falloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		f->t = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		pos = t->ffree.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	skb = f->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		f->skb = skb = new_skb(ETH_ZLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) bail:			aoe_freetframe(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		skb = skb_pool_get(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		skb_pool_put(d, f->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		f->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	skb->truesize -= skb->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	skb_shinfo(skb)->nr_frags = skb->data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	skb_trim(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static struct frame *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) newframe(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct aoetgt *t, **tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	int totout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	int use_tainted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	int has_untainted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (!d->targets || !d->targets[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		printk(KERN_ERR "aoe: NULL TARGETS!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	tt = d->tgt;	/* last used target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	for (use_tainted = 0, has_untainted = 0;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		tt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		if (tt >= &d->targets[d->ntargets] || !*tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			tt = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		t = *tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		if (!t->taint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			has_untainted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			totout += t->nout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		if (t->nout < t->maxout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		&& (use_tainted || !t->taint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		&& t->ifp->nd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			f = newtframe(d, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			if (f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				ifrotate(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 				d->tgt = tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 				return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		if (tt == d->tgt) {	/* we've looped and found nada */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			if (!use_tainted && !has_untainted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 				use_tainted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (totout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		d->kicked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		d->flags |= DEVFL_KICKME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	int frag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	__bio_for_each_segment(bv, bio, iter, iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		skb_fill_page_desc(skb, frag++, bv.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 				   bv.bv_offset, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) fhash(struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	struct aoedev *d = f->t->d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	n = f->tag % NFACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	list_add_tail(&f->head, &d->factive[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) ata_rw_frameinit(struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct aoe_hdr *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct aoe_atahdr *ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	char writebit, extbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	skb = f->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	h = (struct aoe_hdr *) skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	ah = (struct aoe_atahdr *) (h + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	skb_put(skb, sizeof(*h) + sizeof(*ah));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	memset(h, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	writebit = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	extbit = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	f->tag = aoehdr_atainit(t->d, t, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	fhash(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	t->nout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	f->waited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	f->waited_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	/* set up ata header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	ah->scnt = f->iter.bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	put_lba(ah, f->iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (t->d->flags & DEVFL_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		ah->aflags |= AOEAFL_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		extbit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		ah->lba3 &= 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		skb_fillup(skb, f->buf->bio, f->iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		ah->aflags |= AOEAFL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		skb->len += f->iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		skb->data_len = f->iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		skb->truesize += f->iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		t->wpkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		t->rpkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		writebit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	skb->dev = t->ifp->nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) aoecmd_ata_rw(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	buf = nextbuf(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	f = newframe(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/* initialize the headers & frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	f->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	f->iter = buf->iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	f->iter.bi_size = min_t(unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 				d->maxbcnt ?: DEFAULTBCNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				f->iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (!buf->iter.bi_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		d->ip.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	/* mark all tracking fields and load out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	buf->nframesout += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	ata_rw_frameinit(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	skb = skb_clone(f->skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		f->sent = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		__skb_queue_head_init(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		__skb_queue_tail(&queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		aoenet_xmit(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /* some callers cannot sleep, and they can call this function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * transmitting the packets later, when interrupts are on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	struct aoe_hdr *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct aoe_cfghdr *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	struct net_device *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	for_each_netdev_rcu(&init_net, ifp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		dev_hold(ifp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (!is_aoe_netif(ifp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		skb = new_skb(sizeof *h + sizeof *ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			printk(KERN_INFO "aoe: skb alloc failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		skb_put(skb, sizeof *h + sizeof *ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		skb->dev = ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		__skb_queue_tail(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		h = (struct aoe_hdr *) skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		memset(h, 0, sizeof *h + sizeof *ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		memset(h->dst, 0xff, sizeof h->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		memcpy(h->src, ifp->dev_addr, sizeof h->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		h->type = __constant_cpu_to_be16(ETH_P_AOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		h->verfl = AOE_HVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		h->major = cpu_to_be16(aoemajor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		h->minor = aoeminor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		h->cmd = AOECMD_CFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		dev_put(ifp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) resend(struct aoedev *d, struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct aoe_hdr *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	n = newtag(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	skb = f->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (ifrotate(t) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		/* probably can't happen, but set it up to fail anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		pr_info("aoe: resend: no interfaces to rotate to.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		ktcomplete(f, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	h = (struct aoe_hdr *) skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (!(f->flags & FFL_PROBE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		snprintf(buf, sizeof(buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			"%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			"retransmit", d->aoemajor, d->aoeminor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			f->tag, jiffies, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			h->src, h->dst, t->nout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		aoechr_error(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	f->tag = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	fhash(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	h->tag = cpu_to_be32(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	memcpy(h->dst, t->addr, sizeof h->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	skb->dev = t->ifp->nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	f->sent = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	__skb_queue_head_init(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	__skb_queue_tail(&queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	aoenet_xmit(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) tsince_hr(struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	/* delta is normally under 4.2 seconds, avoid 64-bit division */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (likely(delta <= UINT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return (u32)delta / NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	/* avoid overflow after 71 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	return div_u64(delta, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) tsince(u32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	n = jiffies & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	n -= tag & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		n += 1<<16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	return jiffies_to_usecs(n + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static struct aoeif *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) getif(struct aoetgt *t, struct net_device *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct aoeif *p, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	p = t->ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	e = p + NAOEIFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	for (; p < e; p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		if (p->nd == nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) ejectif(struct aoetgt *t, struct aoeif *ifp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct aoeif *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct net_device *nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	ulong n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	nd = ifp->nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	e = t->ifs + NAOEIFS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	n = (e - ifp) * sizeof *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	memmove(ifp, ifp+1, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	e->nd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	dev_put(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static struct frame *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) reassign_frame(struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	struct frame *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	nf = newframe(f->t->d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (!nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (nf->t == f->t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		aoe_freetframe(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	skb = nf->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	nf->skb = f->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	nf->buf = f->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	nf->iter = f->iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	nf->waited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	nf->waited_total = f->waited_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	nf->sent = f->sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	f->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	return nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) probe(struct aoetgt *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	size_t n, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	int frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	d = t->d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	f = newtframe(d, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (!f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		pr_err("%s %pm for e%ld.%d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			"aoe: cannot probe remote address",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			t->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			(long) d->aoemajor, d->aoeminor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 			"no frame available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	f->flags |= FFL_PROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	ifrotate(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	ata_rw_frameinit(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	skb = f->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		if (n < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			m = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			m = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		skb_fill_page_desc(skb, frag, empty_page, 0, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	skb->len += f->iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	skb->data_len = f->iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	skb->truesize += f->iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	skb = skb_clone(f->skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		f->sent = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		__skb_queue_head_init(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		__skb_queue_tail(&queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		aoenet_xmit(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) rto(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	t = 2 * d->rttavg >> RTTSCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	t += 8 * d->rttdev >> RTTDSCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (t == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		t = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) rexmit_deferred(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct frame *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct list_head *pos, *nx, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	int since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	int untainted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	count_targets(d, &untainted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	head = &d->rexmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	list_for_each_safe(pos, nx, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		if (t->taint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			if (!(f->flags & FFL_PROBE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 				nf = reassign_frame(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				if (nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 					if (t->nout_probes == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 					&& untainted > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 						probe(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 						t->nout_probes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 					list_replace(&f->head, &nf->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 					pos = &nf->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 					aoe_freetframe(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 					f = nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 					t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			} else if (untainted < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 				/* don't probe w/o other untainted aoetgts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 				goto stop_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			} else if (tsince_hr(f) < t->taint * rto(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 				/* reprobe slowly when taint is high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		} else if (f->flags & FFL_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) stop_probe:		/* don't probe untainted aoetgts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			aoe_freetframe(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			/* leaving d->kicked, because this is routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			f->t->d->flags |= DEVFL_KICKME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (t->nout >= t->maxout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		t->nout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		if (f->flags & FFL_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			t->nout_probes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		since = tsince_hr(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		f->waited += since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		f->waited_total += since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		resend(d, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) /* An aoetgt accumulates demerits quickly, and successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * probing redeems the aoetgt slowly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) scorn(struct aoetgt *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	n = t->taint++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	t->taint += t->taint * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (n > t->taint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		t->taint = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (t->taint > MAX_TAINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		t->taint = MAX_TAINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) count_targets(struct aoedev *d, int *untainted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	int i, good;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		if (d->targets[i]->taint == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			good++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (untainted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		*untainted = good;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) rexmit_timer(struct timer_list *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	struct aoeif *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	struct list_head *head, *pos, *nx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	LIST_HEAD(flist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	register long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	ulong flags, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	int utgts;	/* number of aoetgt descriptors (not slots) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	int since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	d = from_timer(d, timer, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/* timeout based on observed timings and variations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	timeout = rto(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	utgts = count_targets(d, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (d->flags & DEVFL_TKILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	/* collect all frames to rexmit into flist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	for (i = 0; i < NFACTIVE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		head = &d->factive[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		list_for_each_safe(pos, nx, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			if (tsince_hr(f) < timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 				break;	/* end of expired frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			/* move to flist for later processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			list_move_tail(pos, &flist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	/* process expired frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	while (!list_empty(&flist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		pos = flist.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		since = tsince_hr(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		n = f->waited_total + since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		n /= USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		if (aoe_deadsecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		&& n > aoe_deadsecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		&& !(f->flags & FFL_PROBE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			/* Waited too long.  Device failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			 * Hang all frames on first hash bucket for downdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			 * to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			list_splice(&flist, &d->factive[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			aoedev_downdev(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		n = f->waited + since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		n /= USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		if (aoe_deadsecs && utgts > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		&& (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			scorn(t); /* avoid this target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		if (t->maxout != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			t->ssthresh = t->maxout / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			t->maxout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		if (f->flags & FFL_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			t->nout_probes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			ifp = getif(t, f->skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			if (ifp && ++ifp->lost > (t->nframes << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			&& (ifp != t->ifs || t->ifs[1].nd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				ejectif(t, ifp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				ifp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		list_move_tail(pos, &d->rexmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		t->nout--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	rexmit_deferred(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if ((d->flags & DEVFL_KICKME) && d->blkq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		d->flags &= ~DEVFL_KICKME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		blk_mq_run_hw_queues(d->blkq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	d->timer.expires = jiffies + TIMERTICK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	add_timer(&d->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) bufinit(struct buf *buf, struct request *rq, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	memset(buf, 0, sizeof(*buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	buf->rq = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	buf->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	buf->iter = bio->bi_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static struct buf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) nextbuf(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct aoe_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	q = d->blkq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (q == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		return NULL;	/* initializing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (d->ip.buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return d->ip.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	rq = d->ip.rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (rq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		rq = list_first_entry_or_null(&d->rq_list, struct request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 						queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		if (rq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		list_del_init(&rq->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		d->ip.rq = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		d->ip.nxbio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		req->nr_bios = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		__rq_for_each_bio(bio, rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			req->nr_bios++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	bio = d->ip.nxbio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	bufinit(buf, rq, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	bio = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	d->ip.nxbio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (bio == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		d->ip.rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return d->ip.buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) /* enters with d->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) aoecmd_work(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	rexmit_deferred(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	while (aoecmd_ata_rw(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) /* this function performs work that has been deferred until sleeping is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) aoecmd_sleepwork(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct aoedev *d = container_of(work, struct aoedev, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct block_device *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	u64 ssize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (d->flags & DEVFL_GDALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		aoeblk_gdalloc(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (d->flags & DEVFL_NEWSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		ssize = get_capacity(d->gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		bd = bdget_disk(d->gd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		if (bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			bd_set_nr_sectors(bd, ssize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			bdput(bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		spin_lock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		d->flags |= DEVFL_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		d->flags &= ~DEVFL_NEWSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		spin_unlock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) ata_ident_fixstring(u16 *id, int ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	u16 s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	while (ns-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		s = *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		*id++ = s >> 8 | s << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	u64 ssize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	u16 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	/* word 83: command set supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	n = get_unaligned_le16(&id[83 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/* word 86: command set/feature enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	n |= get_unaligned_le16(&id[86 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (n & (1<<10)) {	/* bit 10: LBA 48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		d->flags |= DEVFL_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		/* word 100: number lba48 sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		ssize = get_unaligned_le64(&id[100 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		/* set as in ide-disk.c:init_idedisk_capacity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		d->geo.cylinders = ssize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		d->geo.cylinders /= (255 * 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		d->geo.heads = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		d->geo.sectors = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		d->flags &= ~DEVFL_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		/* number lba28 sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		ssize = get_unaligned_le32(&id[60 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		/* NOTE: obsolete in ATA 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		d->geo.heads = get_unaligned_le16(&id[55 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	ata_ident_fixstring((u16 *) &id[10<<1], 10);	/* serial */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	ata_ident_fixstring((u16 *) &id[23<<1], 4);	/* firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	ata_ident_fixstring((u16 *) &id[27<<1], 20);	/* model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	memcpy(d->ident, id, sizeof(d->ident));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (d->ssize != ssize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			"aoe: %pm e%ld.%d v%04x has %llu sectors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			t->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			d->aoemajor, d->aoeminor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			d->fw_ver, (long long)ssize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	d->ssize = ssize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	d->geo.start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (d->gd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		set_capacity(d->gd, ssize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		d->flags |= DEVFL_NEWSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		d->flags |= DEVFL_GDALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	schedule_work(&d->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	register long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	n = rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	/* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	n -= d->rttavg >> RTTSCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	d->rttavg += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		n = -n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	n -= d->rttdev >> RTTDSCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	d->rttdev += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (!t || t->maxout >= t->nframes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (t->maxout < t->ssthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		t->maxout += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		t->maxout += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		t->next_cwnd = t->maxout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static struct aoetgt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) gettgt(struct aoedev *d, char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	struct aoetgt **t, **e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	t = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	e = t + d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	for (; t < e && *t; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			return *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	int soff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	iter.bi_size = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	__bio_for_each_segment(bv, bio, iter, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		skb_copy_bits(skb, soff, p, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		kunmap_atomic(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		soff += bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	int bok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	blk_status_t err = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	q = d->blkq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (rq == d->ip.rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		d->ip.rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		bok = !fastfail && !bio->bi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (!bok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			err = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	__blk_mq_end_request(rq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* cf. http://lkml.org/lkml/2006/10/31/28 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (!fastfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		blk_mq_run_hw_queues(q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) aoe_end_buf(struct aoedev *d, struct buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct request *rq = buf->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct aoe_req *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (buf == d->ip.buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		d->ip.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	mempool_free(buf, d->bufpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (--req->nr_bios == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		aoe_end_request(d, rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ktiocomplete(struct frame *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct aoe_hdr *hin, *hout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct aoe_atahdr *ahin, *ahout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	struct aoeif *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	int untainted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	t = f->t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	d = t->d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	skb = f->r_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	buf = f->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (f->flags & FFL_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	if (!skb)		/* just fail the buf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		goto noskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	hout = (struct aoe_hdr *) skb_mac_header(f->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	ahout = (struct aoe_atahdr *) (hout+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	hin = (struct aoe_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	skb_pull(skb, sizeof(*hin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	ahin = (struct aoe_atahdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	skb_pull(skb, sizeof(*ahin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	if (ahin->cmdstat & 0xa9) {	/* these bits cleared on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			ahout->cmdstat, ahin->cmdstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			d->aoemajor, d->aoeminor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) noskb:		if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			buf->bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	n = ahout->scnt << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	switch (ahout->cmdstat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	case ATA_CMD_PIO_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	case ATA_CMD_PIO_READ_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (skb->len < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			pr_err("%s e%ld.%d.  skb->len=%d need=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 				"aoe: runt data size in read from",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 				(long) d->aoemajor, d->aoeminor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			       skb->len, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			buf->bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		if (n > f->iter.bi_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			pr_err_ratelimited("%s e%ld.%d.  bytes=%ld need=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 				"aoe: too-large data size in read from",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				(long) d->aoemajor, d->aoeminor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 				n, f->iter.bi_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			buf->bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		bvcpy(skb, f->buf->bio, f->iter, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	case ATA_CMD_PIO_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	case ATA_CMD_PIO_WRITE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		spin_lock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		ifp = getif(t, skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (ifp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			ifp->lost = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		spin_unlock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	case ATA_CMD_ID_ATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		if (skb->len < 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			pr_info("%s e%ld.%d.  skb->len=%d need=512\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				"aoe: runt data size in ataid from",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				(long) d->aoemajor, d->aoeminor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 				skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		spin_lock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		ataid_complete(d, t, skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		spin_unlock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			ahout->cmdstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			be16_to_cpu(get_unaligned(&hin->major)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			hin->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	spin_lock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (t->taint > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	&& --t->taint > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	&& t->nout_probes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		count_targets(d, &untainted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		if (untainted > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			probe(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			t->nout_probes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	aoe_freetframe(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		aoe_end_buf(d, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	spin_unlock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	aoedev_put(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* Enters with iocq.lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * Returns true iff responses needing processing remain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ktio(int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	int actual_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	for (i = 0; ; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (i == MAXIOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		if (list_empty(&iocq[id].head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		pos = iocq[id].head.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		spin_unlock_irq(&iocq[id].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		ktiocomplete(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		/* Figure out if extra threads are required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		actual_id = f->t->d->aoeminor % ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (!kts[actual_id].active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			BUG_ON(id != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			mutex_lock(&ktio_spawn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			if (!kts[actual_id].active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 				&& aoe_ktstart(&kts[actual_id]) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 				kts[actual_id].active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			mutex_unlock(&ktio_spawn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		spin_lock_irq(&iocq[id].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) kthread(void *vp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	struct ktstate *k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	int more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	k = vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	current->flags |= PF_NOFREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	set_user_nice(current, -10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	complete(&k->rendez);	/* tell spawner we're running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		spin_lock_irq(k->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		more = k->fn(k->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		if (!more) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			add_wait_queue(k->waitq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			__set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		spin_unlock_irq(k->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		if (!more) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			remove_wait_queue(k->waitq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	} while (!kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	complete(&k->rendez);	/* tell spawner we're stopping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) aoe_ktstop(struct ktstate *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	kthread_stop(k->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	wait_for_completion(&k->rendez);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) aoe_ktstart(struct ktstate *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	init_completion(&k->rendez);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	task = kthread_run(kthread, k, "%s", k->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (task == NULL || IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	k->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	wait_for_completion(&k->rendez); /* allow kthread to start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	init_completion(&k->rendez);	/* for waiting for exit later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* pass it off to kthreads for processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ktcomplete(struct frame *f, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	f->r_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	id = f->t->d->aoeminor % ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	spin_lock_irqsave(&iocq[id].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (!kts[id].active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		spin_unlock_irqrestore(&iocq[id].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		/* The thread with id has not been spawned yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		 * so delegate the work to the main thread and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		 * try spawning a new thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		spin_lock_irqsave(&iocq[id].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	list_add_tail(&f->head, &iocq[id].head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	spin_unlock_irqrestore(&iocq[id].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	wake_up(&ktiowq[id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) aoecmd_ata_rsp(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	struct aoe_hdr *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	u32 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	char ebuf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	u16 aoemajor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	h = (struct aoe_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	aoemajor = be16_to_cpu(get_unaligned(&h->major));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (d == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			"for unknown device %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			aoemajor, h->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		aoechr_error(ebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	n = be32_to_cpu(get_unaligned(&h->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	f = getframe(d, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	if (f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		calc_rttavg(d, f->t, tsince_hr(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		f->t->nout--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		if (f->flags & FFL_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			f->t->nout_probes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		f = getframe_deferred(d, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		if (f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			calc_rttavg(d, NULL, tsince_hr(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			calc_rttavg(d, NULL, tsince(n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			aoedev_put(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			snprintf(ebuf, sizeof(ebuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 				 "%15s e%d.%d    tag=%08x@%08lx s=%pm d=%pm\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 				 "unexpected rsp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 				 get_unaligned_be16(&h->major),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 				 h->minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 				 get_unaligned_be32(&h->tag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 				 jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 				 h->src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 				 h->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			aoechr_error(ebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	aoecmd_work(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	ktcomplete(f, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 * Note here that we do not perform an aoedev_put, as we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 * leaving this reference for the ktio to release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	__skb_queue_head_init(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	aoenet_xmit(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) aoecmd_ata_id(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	struct aoe_hdr *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	struct aoe_atahdr *ah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	f = newframe(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	t = *d->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	/* initialize the headers & frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	skb = f->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	h = (struct aoe_hdr *) skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	ah = (struct aoe_atahdr *) (h+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	skb_put(skb, sizeof *h + sizeof *ah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	memset(h, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	f->tag = aoehdr_atainit(d, t, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	fhash(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	t->nout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	f->waited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	f->waited_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	/* set up ata header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	ah->scnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	ah->cmdstat = ATA_CMD_ID_ATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	ah->lba3 = 0xa0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	skb->dev = t->ifp->nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	d->rttavg = RTTAVG_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	d->rttdev = RTTDEV_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	d->timer.function = rexmit_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		f->sent = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static struct aoetgt **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) grow_targets(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	ulong oldn, newn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	struct aoetgt **tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	oldn = d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	newn = oldn * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (!tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	memmove(tt, d->targets, sizeof(*d->targets) * oldn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	d->tgt = tt + (d->tgt - d->targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	kfree(d->targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	d->targets = tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	d->ntargets = newn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	return &d->targets[oldn];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static struct aoetgt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) addtgt(struct aoedev *d, char *addr, ulong nframes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	struct aoetgt *t, **tt, **te;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	tt = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	te = tt + d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	for (; tt < te && *tt; tt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (tt == te) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		tt = grow_targets(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		if (!tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	t->nframes = nframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	t->d = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	memcpy(t->addr, addr, sizeof t->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	t->ifp = t->ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	aoecmd_wreset(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	t->maxout = t->nframes / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	INIT_LIST_HEAD(&t->ffree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	return *tt = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)  nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	pr_info("aoe: cannot allocate memory to add target\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) setdbcnt(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	struct aoetgt **t, **e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	int bcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	t = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	e = t + d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	for (; t < e && *t; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		if (bcnt == 0 || bcnt > (*t)->minbcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			bcnt = (*t)->minbcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (bcnt != d->maxbcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		d->maxbcnt = bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			d->aoemajor, d->aoeminor, bcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	struct aoeif *p, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	int minbcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	d = t->d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	minbcnt = bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	p = t->ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	e = p + NAOEIFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	for (; p < e; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		if (p->nd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			break;		/* end of the valid interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		if (p->nd == nd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			p->bcnt = bcnt;	/* we're updating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			nd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		} else if (minbcnt > p->bcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			minbcnt = p->bcnt; /* find the min interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	if (nd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		if (p == e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		dev_hold(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		p->nd = nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		p->bcnt = bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	t->minbcnt = minbcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	setdbcnt(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) aoecmd_cfg_rsp(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	struct aoe_hdr *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	struct aoe_cfghdr *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	struct aoetgt *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	ulong flags, aoemajor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	struct sk_buff *sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	struct sk_buff_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	u16 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	sl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	h = (struct aoe_hdr *) skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	ch = (struct aoe_cfghdr *) (h+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	 * Enough people have their dip switches set backwards to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	 * warrant a loud message for this special case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	aoemajor = get_unaligned_be16(&h->major);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (aoemajor == 0xfff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		printk(KERN_ERR "aoe: Warning: shelf address is all ones.  "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			"Check shelf dip switches.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	if (aoemajor == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			aoemajor, (int) h->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (h->minor == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			aoemajor, (int) h->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	n = be16_to_cpu(ch->bufcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (n > aoe_maxout)	/* keep it reasonable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		n = aoe_maxout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (d == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		pr_info("aoe: device allocation failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	t = gettgt(d, h->src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		t->nframes = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		if (n < t->maxout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			aoecmd_wreset(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		t = addtgt(d, h->src, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	n = skb->dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	n /= 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (n > ch->scnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		n = ch->scnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	n = n ? n * 512 : DEFAULTBCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	setifbcnt(t, skb->dev, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	/* don't change users' perspective */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (d->nopen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		d->fw_ver = be16_to_cpu(ch->fwver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		sl = aoecmd_ata_id(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	aoedev_put(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	if (sl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		__skb_queue_head_init(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		__skb_queue_tail(&queue, sl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		aoenet_xmit(&queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) aoecmd_wreset(struct aoetgt *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	t->maxout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	t->ssthresh = t->nframes / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	t->next_cwnd = t->nframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) aoecmd_cleanslate(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	struct aoetgt **t, **te;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	d->rttavg = RTTAVG_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	d->rttdev = RTTDEV_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	d->maxbcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	t = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	te = t + d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	for (; t < te && *t; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		aoecmd_wreset(*t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) aoe_failbuf(struct aoedev *d, struct buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	buf->iter.bi_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	buf->bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (buf->nframesout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		aoe_end_buf(d, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) aoe_flush_iocq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	for (i = 0; i < ncpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		if (kts[i].active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			aoe_flush_iocq_by_index(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) aoe_flush_iocq_by_index(int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	LIST_HEAD(flist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	spin_lock_irqsave(&iocq[id].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	list_splice_init(&iocq[id].head, &flist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	spin_unlock_irqrestore(&iocq[id].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	while (!list_empty(&flist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		pos = flist.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		d = f->t->d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		skb = f->r_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		if (f->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			f->buf->nframesout--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			aoe_failbuf(d, f->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		aoe_freetframe(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		aoedev_put(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) aoecmd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	/* get_zeroed_page returns page with ref count 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	p = (void *) get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	empty_page = virt_to_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	ncpus = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (!iocq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	if (!kts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		goto kts_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (!ktiowq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		goto ktiowq_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	mutex_init(&ktio_spawn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	for (i = 0; i < ncpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		INIT_LIST_HEAD(&iocq[i].head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		spin_lock_init(&iocq[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		init_waitqueue_head(&ktiowq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		kts[i].fn = ktio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		kts[i].waitq = &ktiowq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		kts[i].lock = &iocq[i].lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		kts[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		kts[i].active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	kts[0].active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	if (aoe_ktstart(&kts[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		goto ktstart_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) ktstart_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	kfree(ktiowq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) ktiowq_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	kfree(kts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) kts_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	kfree(iocq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) aoecmd_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	for (i = 0; i < ncpus; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		if (kts[i].active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			aoe_ktstop(&kts[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	aoe_flush_iocq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	/* Free up the iocq and thread speicific configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	* allocated during startup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	kfree(iocq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	kfree(kts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	kfree(ktiowq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	free_page((unsigned long) page_address(empty_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	empty_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }