Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef BLK_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define BLK_INTERNAL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/part_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/blk-crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "blk-crypto-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "blk-mq-sched.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /* Max future timer expiry for timeouts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define BLK_MAX_TIMEOUT		(5 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) extern struct dentry *blk_debugfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) struct blk_flush_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	unsigned int		flush_pending_idx:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	unsigned int		flush_running_idx:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	blk_status_t 		rq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	unsigned long		flush_pending_since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct list_head	flush_queue[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct list_head	flush_data_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct request		*flush_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct lock_class_key	key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	spinlock_t		mq_flush_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) extern struct kmem_cache *blk_requestq_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) extern struct kobj_type blk_queue_ktype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) extern struct ida blk_queue_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static inline struct blk_flush_queue *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static inline void __blk_get_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	kobject_get(&q->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) bool is_flush_rq(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 					      gfp_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) void blk_free_flush_queue(struct blk_flush_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) void blk_freeze_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline bool biovec_phys_mergeable(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		struct bio_vec *vec1, struct bio_vec *vec2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned long mask = queue_segment_boundary(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (addr1 + vec1->bv_len != addr2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static inline bool __bvec_gap_to_prev(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		struct bio_vec *bprv, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	return (offset & queue_virt_boundary(q)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * Check if adding a bio_vec after bprv with offset would create a gap in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * the SG list. Most drivers don't care about this, but some do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static inline bool bvec_gap_to_prev(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		struct bio_vec *bprv, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (!queue_virt_boundary(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return __bvec_gap_to_prev(q, bprv, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	rq->nr_phys_segments = nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	rq->__data_len = bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	rq->bio = rq->biotail = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	rq->ioprio = bio_prio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (bio->bi_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		rq->rq_disk = bio->bi_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void blk_flush_integrity(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bool __bio_integrity_endio(struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void bio_integrity_free(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline bool bio_integrity_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (bio_integrity(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		return __bio_integrity_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool blk_integrity_merge_rq(struct request_queue *, struct request *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		struct request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bool blk_integrity_merge_bio(struct request_queue *, struct request *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		struct bio *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline bool integrity_req_gap_back_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		struct bio *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct bio_integrity_payload *bip_next = bio_integrity(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 				bip_next->bip_vec[0].bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline bool integrity_req_gap_front_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	struct bio_integrity_payload *bip = bio_integrity(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				bip_next->bip_vec[0].bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void blk_integrity_add(struct gendisk *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void blk_integrity_del(struct gendisk *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #else /* CONFIG_BLK_DEV_INTEGRITY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline bool blk_integrity_merge_rq(struct request_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		struct request *r1, struct request *r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline bool blk_integrity_merge_bio(struct request_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		struct request *r, struct bio *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline bool integrity_req_gap_back_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		struct bio *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline bool integrity_req_gap_front_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void blk_flush_integrity(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static inline bool bio_integrity_endio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline void bio_integrity_free(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline void blk_integrity_add(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline void blk_integrity_del(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #endif /* CONFIG_BLK_DEV_INTEGRITY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long blk_rq_timeout(unsigned long timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void blk_add_timer(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		unsigned int nr_segs, struct request **same_queue_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			struct bio *bio, unsigned int nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void blk_account_io_start(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void blk_account_io_done(struct request *req, u64 now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * Plug flush limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define BLK_MAX_REQUEST_COUNT	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * Internal elevator interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void blk_insert_flush(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void elevator_init_mq(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int elevator_switch_mq(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			      struct elevator_type *new_e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void __elevator_exit(struct request_queue *, struct elevator_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int elv_register_queue(struct request_queue *q, bool uevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void elv_unregister_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline void elevator_exit(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		struct elevator_queue *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	lockdep_assert_held(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	blk_mq_sched_free_requests(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	__elevator_exit(q, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		const char *buf, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ssize_t part_timeout_store(struct device *, struct device_attribute *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int ll_back_merge_fn(struct request *req, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		unsigned int nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				struct request *next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned int blk_recalc_rq_segments(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void blk_rq_set_mixed_merge(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int blk_dev_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * Contribute to IO statistics IFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  *	a) it's attached to a gendisk, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *	b) the queue had IO stats enabled when this request was started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline bool blk_do_io_stat(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static inline void req_set_nomerge(struct request_queue *q, struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	req->cmd_flags |= REQ_NOMERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (req == q->last_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		q->last_merge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * is defined as 'unsigned int', meantime it has to aligned to with logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * block size which is the minimum accepted unit by hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * The max bio size which is aligned to q->limits.discard_granularity. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * is a hint to split large discard bio in generic block layer, then if device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * driver needs to split the discard bio into smaller ones, their bi_size can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * be very probably and easily aligned to discard_granularity of the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline unsigned int bio_aligned_discard_max_sectors(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 					struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return round_down(UINT_MAX, q->limits.discard_granularity) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Internal io_context interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void get_io_context(struct io_context *ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			     gfp_t gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void ioc_clear_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * Internal throttling interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #ifdef CONFIG_BLK_DEV_THROTTLING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) extern int blk_throtl_init(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) extern void blk_throtl_exit(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) extern void blk_throtl_register_queue(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) extern void blk_throtl_charge_bio_split(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) bool blk_throtl_bio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #else /* CONFIG_BLK_DEV_THROTTLING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline int blk_throtl_init(struct request_queue *q) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline void blk_throtl_exit(struct request_queue *q) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline void blk_throtl_register_queue(struct request_queue *q) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static inline bool blk_throtl_bio(struct bio *bio) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #endif /* CONFIG_BLK_DEV_THROTTLING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	const char *page, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) extern void blk_throtl_bio_endio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) extern void blk_throtl_stat_add(struct request *rq, u64 time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline void blk_throtl_bio_endio(struct bio *bio) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef CONFIG_BOUNCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) extern int init_emergency_isa_pool(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline int init_emergency_isa_pool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #endif /* CONFIG_BOUNCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #ifdef CONFIG_BLK_CGROUP_IOLATENCY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) extern int blk_iolatency_init(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) void blk_queue_free_zone_bitmaps(struct request_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void blk_free_devt(dev_t devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void blk_invalidate_devt(dev_t devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) char *disk_name(struct gendisk *hd, int partno, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define ADDPART_FLAG_NONE	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define ADDPART_FLAG_RAID	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define ADDPART_FLAG_WHOLEDISK	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void delete_partition(struct hd_struct *part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int bdev_add_partition(struct block_device *bdev, int partno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		sector_t start, sector_t length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int bdev_del_partition(struct block_device *bdev, int partno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int bdev_resize_partition(struct block_device *bdev, int partno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		sector_t start, sector_t length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int disk_expand_part_tbl(struct gendisk *disk, int target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int hd_ref_init(struct hd_struct *part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* no need to get/put refcount of part0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static inline int hd_struct_try_get(struct hd_struct *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (part->partno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return percpu_ref_tryget_live(&part->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static inline void hd_struct_put(struct hd_struct *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	if (part->partno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		percpu_ref_put(&part->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static inline void hd_free_part(struct hd_struct *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	free_percpu(part->dkstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	kfree(part->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	percpu_ref_exit(&part->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * Any access of part->nr_sects which is not protected by partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * bd_mutex or gendisk bdev bd_mutex, should be done using this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * accessor function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * Code written along the lines of i_size_read() and i_size_write().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline sector_t part_nr_sects_read(struct hd_struct *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	sector_t nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		seq = read_seqcount_begin(&part->nr_sects_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		nr_sects = part->nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	} while (read_seqcount_retry(&part->nr_sects_seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	sector_t nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	nr_sects = part->nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	return part->nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * Should be called with mutex lock held (typically bd_mutex) of partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * to provide mutual exlusion among writers otherwise seqcount might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * left in wrong state leaving the readers spinning infinitely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	write_seqcount_begin(&part->nr_sects_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	part->nr_sects = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	write_seqcount_end(&part->nr_sects_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	part->nr_sects = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	part->nr_sects = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int bio_add_hw_page(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		struct page *page, unsigned int len, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		unsigned int max_sectors, bool *same_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #endif /* BLK_INTERNAL_H */