Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Functions related to io context handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * For io context allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static struct kmem_cache *iocontext_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * get_io_context - increment reference count to io_context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * @ioc: io_context to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Increment reference count to @ioc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) void get_io_context(struct io_context *ioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	atomic_long_inc(&ioc->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static void icq_free_icq_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	kmem_cache_free(icq->__rcu_icq_cache, icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * and queue locked for legacy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static void ioc_exit_icq(struct io_cq *icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct elevator_type *et = icq->q->elevator->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	if (icq->flags & ICQ_EXITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (et->ops.exit_icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		et->ops.exit_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	icq->flags |= ICQ_EXITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * and queue locked for legacy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static void ioc_destroy_icq(struct io_cq *icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct io_context *ioc = icq->ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct request_queue *q = icq->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct elevator_type *et = q->elevator->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	lockdep_assert_held(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	hlist_del_init(&icq->ioc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	list_del_init(&icq->q_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * Both setting lookup hint to and clearing it from @icq are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * under queue_lock.  If it's not pointing to @icq now, it never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * will.  Hint assignment itself can race safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (rcu_access_pointer(ioc->icq_hint) == icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		rcu_assign_pointer(ioc->icq_hint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	ioc_exit_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * @icq->q might have gone away by the time RCU callback runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * making it impossible to determine icq_cache.  Record it in @icq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	icq->__rcu_icq_cache = et->icq_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	icq->flags |= ICQ_DESTROYED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * Slow path for ioc release in put_io_context().  Performs double-lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * dancing to unlink all icq's and then frees ioc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void ioc_release_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct io_context *ioc = container_of(work, struct io_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 					      release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	spin_lock_irq(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	while (!hlist_empty(&ioc->icq_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 						struct io_cq, ioc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		struct request_queue *q = icq->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		if (spin_trylock(&q->queue_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			ioc_destroy_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			spin_unlock(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			/* Make sure q and icq cannot be freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			/* Re-acquire the locks in the correct order. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			spin_unlock(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			spin_lock(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			spin_lock(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			 * The icq may have been destroyed when the ioc lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			 * was released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			if (!(icq->flags & ICQ_DESTROYED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				ioc_destroy_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			spin_unlock(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	spin_unlock_irq(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	kmem_cache_free(iocontext_cachep, ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * put_io_context - put a reference of io_context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * @ioc: io_context to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * Decrement reference count of @ioc and release it if the count reaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void put_io_context(struct io_context *ioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	bool free_ioc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (ioc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 * Releasing ioc requires reverse order double locking and we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * already be holding a queue_lock.  Do it asynchronously from wq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (atomic_long_dec_and_test(&ioc->refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		spin_lock_irqsave(&ioc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		if (!hlist_empty(&ioc->icq_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			queue_work(system_power_efficient_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 					&ioc->release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			free_ioc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		spin_unlock_irqrestore(&ioc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (free_ioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		kmem_cache_free(iocontext_cachep, ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * put_io_context_active - put active reference on ioc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * @ioc: ioc of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * Undo get_io_context_active().  If active reference reaches zero after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * put, @ioc can never issue further IOs and ioscheds are notified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void put_io_context_active(struct io_context *ioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct io_cq *icq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (!atomic_dec_and_test(&ioc->active_ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		put_io_context(ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	spin_lock_irq(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		if (icq->flags & ICQ_EXITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		ioc_exit_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	spin_unlock_irq(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	put_io_context(ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Called by the exiting task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void exit_io_context(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct io_context *ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	task_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	ioc = task->io_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	task->io_context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	task_unlock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	atomic_dec(&ioc->nr_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	put_io_context_active(ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void __ioc_clear_queue(struct list_head *icq_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	while (!list_empty(icq_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		struct io_cq *icq = list_entry(icq_list->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 						struct io_cq, q_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		struct io_context *ioc = icq->ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		spin_lock_irqsave(&ioc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if (icq->flags & ICQ_DESTROYED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			spin_unlock_irqrestore(&ioc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		ioc_destroy_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		spin_unlock_irqrestore(&ioc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * ioc_clear_queue - break any ioc association with the specified queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * @q: request_queue being cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * Walk @q->icq_list and exit all io_cq's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void ioc_clear_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	LIST_HEAD(icq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	list_splice_init(&q->icq_list, &icq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	__ioc_clear_queue(&icq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct io_context *ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				    node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (unlikely(!ioc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* initialize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	atomic_long_set(&ioc->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	atomic_set(&ioc->nr_tasks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	atomic_set(&ioc->active_ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	spin_lock_init(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	INIT_HLIST_HEAD(&ioc->icq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	INIT_WORK(&ioc->release_work, ioc_release_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 * Try to install.  ioc shouldn't be installed if someone else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	 * already did or @task, which isn't %current, is exiting.  Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * that we need to allow ioc creation on exiting %current as exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 * path may issue IOs from e.g. exit_files().  The exit path is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * responsible for not issuing IO after exit_io_context().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	task_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (!task->io_context &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	    (task == current || !(task->flags & PF_EXITING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		task->io_context = ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		kmem_cache_free(iocontext_cachep, ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	ret = task->io_context ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	task_unlock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * get_task_io_context - get io_context of a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * @task: task of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * @gfp_flags: allocation flags, used if allocation is necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * @node: allocation node, used if allocation is necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * Return io_context of @task.  If it doesn't exist, it is created with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * @gfp_flags and @node.  The returned io_context has its reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * This function always goes through task_lock() and it's better to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * %current->io_context + get_io_context() for %current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct io_context *get_task_io_context(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 				       gfp_t gfp_flags, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct io_context *ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		task_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		ioc = task->io_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		if (likely(ioc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			get_io_context(ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			task_unlock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			return ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		task_unlock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	} while (!create_task_io_context(task, gfp_flags, node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * ioc_lookup_icq - lookup io_cq from ioc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * @ioc: the associated io_context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  * @q: the associated request_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * with @q->queue_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct io_cq *icq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	lockdep_assert_held(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	 * icq's are indexed from @ioc using radix tree and hint pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 * both of which are protected with RCU.  All removals are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	 * holding both q and ioc locks, and we're holding q lock - if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	 * find a icq which points to us, it's guaranteed to be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	icq = rcu_dereference(ioc->icq_hint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (icq && icq->q == q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (icq && icq->q == q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		icq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	return icq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) EXPORT_SYMBOL(ioc_lookup_icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * ioc_create_icq - create and link io_cq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * @ioc: io_context of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * @q: request_queue of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * @gfp_mask: allocation mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * will be created using @gfp_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * The caller is responsible for ensuring @ioc won't go away and @q is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * alive and will stay alive until this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			     gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct elevator_type *et = q->elevator->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct io_cq *icq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	/* allocate stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 				    q->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (!icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		kmem_cache_free(et->icq_cache, icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	icq->ioc = ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	icq->q = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	INIT_LIST_HEAD(&icq->q_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	INIT_HLIST_NODE(&icq->ioc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	/* lock both q and ioc and try to link @icq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	spin_lock(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		list_add(&icq->q_node, &q->icq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		if (et->ops.init_icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			et->ops.init_icq(icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		kmem_cache_free(et->icq_cache, icq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		icq = ioc_lookup_icq(ioc, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		if (!icq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			printk(KERN_ERR "cfq: icq link failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	spin_unlock(&ioc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	return icq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int __init blk_ioc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) subsys_initcall(blk_ioc_init);