Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * aoedev.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * AoE device utility functions; maintains device list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kdev_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "aoe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static void freetgt(struct aoedev *d, struct aoetgt *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static void skbpoolfree(struct aoedev *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) static int aoe_dyndevs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) module_param(aoe_dyndevs, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) MODULE_PARM_DESC(aoe_dyndevs, "Use dynamic minor numbers for devices.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static struct aoedev *devlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static DEFINE_SPINLOCK(devlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* Because some systems will have one, many, or no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *   - partitions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *   - slots per shelf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *   - or shelves,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * we need some flexibility in the way the minor numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * are allocated.  So they are dynamic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static DEFINE_SPINLOCK(used_minors_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static DECLARE_BITMAP(used_minors, N_DEVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) minor_get_dyn(ulong *sysminor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	ulong n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	spin_lock_irqsave(&used_minors_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	n = find_first_zero_bit(used_minors, N_DEVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (n < N_DEVS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		set_bit(n, used_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		error = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	spin_unlock_irqrestore(&used_minors_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	*sysminor = n * AOE_PARTITIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	ulong n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		/* for backwards compatibility when !aoe_dyndevs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		 * a static number of supported slots per shelf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		NPERSHELF = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (aoemin >= NPERSHELF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		pr_err("aoe: %s %d slots per shelf\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			"static minor device numbers support only",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			NPERSHELF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		error = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	n = aoemaj * NPERSHELF + aoemin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (n >= N_DEVS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		pr_err("aoe: %s with e%ld.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			"cannot use static minor device numbers",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			aoemaj, aoemin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		error = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	spin_lock_irqsave(&used_minors_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (test_bit(n, used_minors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		pr_err("aoe: %s %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			"existing device already has static minor number",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		error = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		set_bit(n, used_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	spin_unlock_irqrestore(&used_minors_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	*sysminor = n * AOE_PARTITIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) minor_get(ulong *sysminor, ulong aoemaj, int aoemin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (aoe_dyndevs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return minor_get_dyn(sysminor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return minor_get_static(sysminor, aoemaj, aoemin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) minor_free(ulong minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	minor /= AOE_PARTITIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	BUG_ON(minor >= N_DEVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	spin_lock_irqsave(&used_minors_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	BUG_ON(!test_bit(minor, used_minors));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	clear_bit(minor, used_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	spin_unlock_irqrestore(&used_minors_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * Users who grab a pointer to the device with aoedev_by_aoeaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * automatically get a reference count and must be responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * for performing a aoedev_put.  With the addition of async
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * kthread processing I'm no longer confident that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * guarantee consistency in the face of device flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * For the time being, we only bother to add extra references for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * frames sitting on the iocq.  When the kthreads finish processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * these frames, they will aoedev_put the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) aoedev_put(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	spin_lock_irqsave(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	d->ref--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	spin_unlock_irqrestore(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dummy_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	d = from_timer(d, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (d->flags & DEVFL_TKILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	d->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	add_timer(&d->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) aoe_failip(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct aoe_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	aoe_failbuf(d, d->ip.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	rq = d->ip.rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (rq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	while ((bio = d->ip.nxbio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		d->ip.nxbio = bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		req->nr_bios--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!req->nr_bios)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		aoe_end_request(d, rq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) downdev_frame(struct list_head *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (f->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		f->buf->nframesout--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		aoe_failbuf(f->t->d, f->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	aoe_freetframe(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) aoedev_downdev(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct aoetgt *t, **tt, **te;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct list_head *head, *pos, *nx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	d->flags &= ~DEVFL_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	/* clean out active and to-be-retransmitted buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	for (i = 0; i < NFACTIVE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		head = &d->factive[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		list_for_each_safe(pos, nx, head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			downdev_frame(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	head = &d->rexmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	list_for_each_safe(pos, nx, head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		downdev_frame(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/* reset window dressings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	tt = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	te = tt + d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	for (; tt < te && (t = *tt); tt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		aoecmd_wreset(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		t->nout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	/* clean out the in-process request (if any) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	aoe_failip(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	/* fast fail all pending I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (d->blkq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		/* UP is cleared, freeze+quiesce to insure all are errored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		blk_mq_freeze_queue(d->blkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		blk_mq_quiesce_queue(d->blkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		blk_mq_unquiesce_queue(d->blkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		blk_mq_unfreeze_queue(d->blkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (d->gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		set_capacity(d->gd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* return whether the user asked for this particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * device to be flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) user_req(char *s, size_t slen, struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	const char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	size_t lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (!d->gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	p = kbasename(d->gd->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	lim = sizeof(d->gd->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	lim -= p - d->gd->disk_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	if (slen < lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		lim = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return !strncmp(s, p, lim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) freedev(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct aoetgt **t, **e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	int freeing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (d->flags & DEVFL_TKILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	&& !(d->flags & DEVFL_FREEING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		d->flags |= DEVFL_FREEING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		freeing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (!freeing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	del_timer_sync(&d->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (d->gd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		aoedisk_rm_debugfs(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		del_gendisk(d->gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		put_disk(d->gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		blk_mq_free_tag_set(&d->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		blk_cleanup_queue(d->blkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	t = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	e = t + d->ntargets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	for (; t < e && *t; t++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		freetgt(d, *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	mempool_destroy(d->bufpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	skbpoolfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	minor_free(d->sysminor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	d->flags |= DEVFL_FREED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) enum flush_parms {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	NOT_EXITING = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	EXITING = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) flush(const char __user *str, size_t cnt, int exiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct aoedev *d, **dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	char buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	int all = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int specified = 0;	/* flush a specific device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	unsigned int skipflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (!exiting && cnt >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (cnt > sizeof buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			cnt = sizeof buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		if (copy_from_user(buf, str, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		all = !strncmp(buf, "all", 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		if (!all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			specified = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	/* pass one: do aoedev_downdev, which might sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) restart1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	spin_lock_irqsave(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	for (d = devlist; d; d = d->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (d->flags & DEVFL_TKILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		if (exiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			/* unconditionally take each device down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		} else if (specified) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			if (!user_req(buf, cnt, d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 				goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		} else if ((!all && (d->flags & DEVFL_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		|| d->flags & skipflags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		|| d->nopen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		|| d->ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			goto cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		spin_unlock_irqrestore(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		aoedev_downdev(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		d->flags |= DEVFL_TKILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		goto restart1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	spin_unlock_irqrestore(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	/* pass two: call freedev, which might sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	 * for aoedevs marked with DEVFL_TKILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) restart2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	spin_lock_irqsave(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	for (d = devlist; d; d = d->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (d->flags & DEVFL_TKILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		&& !(d->flags & DEVFL_FREEING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			spin_unlock_irqrestore(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			freedev(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			goto restart2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	/* pass three: remove aoedevs marked with DEVFL_FREED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	for (dd = &devlist, d = *dd; d; d = *dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		struct aoedev *doomed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		if (d->flags & DEVFL_FREED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			*dd = d->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			doomed = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			dd = &d->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		if (doomed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			kfree(doomed->targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		kfree(doomed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	spin_unlock_irqrestore(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) aoedev_flush(const char __user *str, size_t cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	return flush(str, cnt, NOT_EXITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* This has been confirmed to occur once with Tms=3*1000 due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * driver changing link and not processing its transmit ring.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * problem is hard enough to solve by returning an error that I'm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * still punting on "solving" this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) skbfree(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	enum { Sms = 250, Tms = 30 * 1000};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	int i = Tms / Sms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		msleep(Sms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	if (i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			"aoe: %s holds ref: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			skb->dev ? skb->dev->name : "netif",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			"cannot free skb -- memory leaked.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	skb->truesize -= skb->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	skb_shinfo(skb)->nr_frags = skb->data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	skb_trim(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) skbpoolfree(struct aoedev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	struct sk_buff *skb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	skb_queue_walk_safe(&d->skbpool, skb, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		skbfree(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	__skb_queue_head_init(&d->skbpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* find it or allocate it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct aoedev *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	struct aoedev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	ulong flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	ulong sysminor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	spin_lock_irqsave(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	for (d=devlist; d; d=d->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (d->aoemajor == maj && d->aoeminor == min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			if (d->flags & DEVFL_TKILL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 				spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 				d = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			d->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	d = kcalloc(1, sizeof *d, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (!d->targets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		d = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	d->ntargets = NTARGETS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	INIT_WORK(&d->work, aoecmd_sleepwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	spin_lock_init(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	INIT_LIST_HEAD(&d->rq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	skb_queue_head_init(&d->skbpool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	timer_setup(&d->timer, dummy_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	d->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	add_timer(&d->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	d->bufpool = NULL;	/* defer to aoeblk_gdalloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	d->tgt = d->targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	d->ref = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	for (i = 0; i < NFACTIVE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		INIT_LIST_HEAD(&d->factive[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	INIT_LIST_HEAD(&d->rexmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	d->sysminor = sysminor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	d->aoemajor = maj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	d->aoeminor = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	d->rttavg = RTTAVG_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	d->rttdev = RTTDEV_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	d->next = devlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	devlist = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	spin_unlock_irqrestore(&devlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) freetgt(struct aoedev *d, struct aoetgt *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	struct frame *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	struct list_head *pos, *nx, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	struct aoeif *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		if (!ifp->nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		dev_put(ifp->nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	head = &t->ffree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	list_for_each_safe(pos, nx, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		list_del(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		f = list_entry(pos, struct frame, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		skbfree(f->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		kfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	kfree(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) aoedev_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	flush(NULL, 0, EXITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) aoedev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }