Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) #ifndef BLK_MQ_SCHED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3) #define BLK_MQ_SCHED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include "blk-mq-tag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) void blk_mq_sched_assign_ioc(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) void blk_mq_sched_request_inserted(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) 		unsigned int nr_segs, struct request **merged_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 		unsigned int nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void blk_mq_sched_insert_request(struct request *rq, bool at_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) 				 bool run_queue, bool async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) 				  struct blk_mq_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) 				  struct list_head *list, bool run_queue_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void blk_mq_sched_free_requests(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 		unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 	return __blk_mq_sched_bio_merge(q, bio, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 			 struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 	struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 	if (e && e->type->ops.allow_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 		return e->type->ops.allow_merge(q, rq, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 	struct elevator_queue *e = rq->q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 	if (e && e->type->ops.completed_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 		e->type->ops.completed_request(rq, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void blk_mq_sched_requeue_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 	struct request_queue *q = rq->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 	struct elevator_queue *e = q->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) 	if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 		e->type->ops.requeue_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 	struct elevator_queue *e = hctx->queue->elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 	if (e && e->type->ops.has_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 		return e->type->ops.has_work(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif