Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef INT_BLK_MQ_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define INT_BLK_MQ_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include "blk-stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "blk-mq-tag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) struct blk_mq_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) struct blk_mq_ctxs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	struct blk_mq_ctx __percpu	*queue_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct blk_mq_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 		spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 		struct list_head	rq_lists[HCTX_MAX_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	} ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	unsigned int		cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	unsigned short		index_hw[HCTX_MAX_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	/* incremented at dispatch time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	unsigned long		rq_dispatched[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned long		rq_merged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	/* incremented at completion time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct request_queue	*queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct blk_mq_ctxs      *ctxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct kobject		kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	ANDROID_OEM_DATA_ARRAY(1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) } ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) void blk_mq_exit_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) void blk_mq_wake_waiters(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			     unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				bool kick_requeue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 					struct blk_mq_ctx *start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) void blk_mq_put_rq_ref(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * Internal helpers for allocating/freeing the request map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		     unsigned int hctx_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 					unsigned int hctx_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 					unsigned int nr_tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 					unsigned int reserved_tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 					unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		     unsigned int hctx_idx, unsigned int depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * Internal helpers for request insertion into sw queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 				bool at_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 				  bool run_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 				struct list_head *list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /* Used by blk_insert_cloned_request() to issue request directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 				    struct list_head *list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * CPU -> queue mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * @q: request queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * @type: the hctx type index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * @cpu: CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 							  enum hctx_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 							  unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * @q: request queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * @flags: request command flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * @cpu: cpu ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 						     unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 						     struct blk_mq_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	enum hctx_type type = HCTX_TYPE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (flags & REQ_HIPRI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		type = HCTX_TYPE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		type = HCTX_TYPE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return ctx->hctxs[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * sysfs helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) extern void blk_mq_sysfs_init(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) extern void blk_mq_sysfs_deinit(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) extern int blk_mq_sysfs_register(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern void blk_mq_sysfs_unregister(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void blk_mq_release(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 					   unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return per_cpu_ptr(q->queue_ctx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * This assumes per-cpu software queueing queues. They could be per-node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * as well, for instance. For now this is hardcoded as-is. Note that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * care about preemption, since we know the ctx's are persistent. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * mean that we can't rely on ctx always matching the currently running CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return __blk_mq_get_ctx(q, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct blk_mq_alloc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	/* input parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	blk_mq_req_flags_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	unsigned int shallow_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	unsigned int cmd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/* input & output parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct blk_mq_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (data->q->elevator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return data->hctx->sched_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return data->hctx->tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	return hctx->nr_ctx && hctx->tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			 unsigned int inflight[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (q->mq_ops->put_budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		q->mq_ops->put_budget(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (q->mq_ops->get_budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return q->mq_ops->get_budget(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (blk_mq_is_sbitmap_shared(hctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		atomic_inc(&hctx->nr_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (blk_mq_is_sbitmap_shared(hctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		atomic_dec(&hctx->nr_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (blk_mq_is_sbitmap_shared(hctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return atomic_read(&hctx->nr_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 					   struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	rq->tag = BLK_MQ_NO_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		__blk_mq_dec_active_requests(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline void blk_mq_put_driver_tag(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		qmap->mq_map[cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * blk_mq_plug() - Get caller context plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * @q: request queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * @bio : the bio being submitted by the caller context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * Plugging, by design, may delay the insertion of BIOs into the elevator in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * order to increase BIO merging opportunities. This however can cause BIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * insertion order to change from the order in which submit_bio() is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * executed in the case of multiple contexts concurrently issuing BIOs to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * device, even if these context are synchronized to tightly control BIO issuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * order. While this is not a problem with regular block devices, this ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * change can cause write BIO failures with zoned block devices as these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * require sequential write patterns to zones. Prevent this from happening by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * ignoring the plug state of a BIO issuing context if the target request queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * is for a zoned block device and the BIO to plug is a write operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * Return current->plug if the bio can be plugged and NULL otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 					   struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * For regular block devices or read operations, use the context plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * which may be NULL if blk_start_plug() was not executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		return current->plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	/* Zoned block device write operation case: do not plug the BIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * For shared tag users, we track the number of currently active users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * and attempt to provide a fair share of the tag depth for each of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				  struct sbitmap_queue *bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	unsigned int depth, users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 * Don't try dividing an ant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (bt->sb.depth == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		struct blk_mq_tag_set *set = q->tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		users = atomic_read(&set->active_queues_shared_sbitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		users = atomic_read(&hctx->tags->active_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	if (!users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * Allow at least some tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	depth = max((bt->sb.depth + users - 1) / users, 4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	return __blk_mq_active_requests(hctx) < depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif