^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Functions related to segment and merge handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef __GENKSYMS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/blk-cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "blk-rq-qos.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline bool bio_will_gap(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct request *prev_rq, struct bio *prev, struct bio *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct bio_vec pb, nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (!bio_has_data(prev) || !queue_virt_boundary(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Don't merge if the 1st bio starts with non-zero offset, otherwise it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * is quite difficult to respect the sg gap limit. We work hard to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * merge a huge number of small single bios in case of mkfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (prev_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) bio_get_first_bvec(prev_rq->bio, &pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) bio_get_first_bvec(prev, &pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (pb.bv_offset & queue_virt_boundary(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * We don't need to worry about the situation that the merged segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * ends in unaligned virt boundary:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * - if 'pb' ends aligned, the merged segment ends aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * - if 'pb' ends unaligned, the next bio must include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * one single bvec of 'nb', otherwise the 'nb' can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * merge with 'pb'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) bio_get_last_bvec(prev, &pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bio_get_first_bvec(next, &nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (biovec_phys_mergeable(q, &pb, &nb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return bio_will_gap(req->q, req, req->biotail, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return bio_will_gap(req->q, NULL, bio, req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct bio *blk_bio_discard_split(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct bio_set *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned *nsegs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int max_discard_sectors, granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sector_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned split_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *nsegs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Zero-sector (unknown) and one-sector granularities are the same. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) granularity = max(q->limits.discard_granularity >> 9, 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) max_discard_sectors = min(q->limits.max_discard_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) bio_allowed_max_sectors(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) max_discard_sectors -= max_discard_sectors % granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (unlikely(!max_discard_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* XXX: warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (bio_sectors(bio) <= max_discard_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) split_sectors = max_discard_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * If the next starting sector would be misaligned, stop the discard at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * the previous aligned sector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) alignment = (q->limits.discard_alignment >> 9) % granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) tmp = sector_div(tmp, granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (split_sectors > tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) split_sectors -= tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return bio_split(bio, split_sectors, GFP_NOIO, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct bio *bio, struct bio_set *bs, unsigned *nsegs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *nsegs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!q->limits.max_write_zeroes_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static struct bio *blk_bio_write_same_split(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct bio_set *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned *nsegs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *nsegs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!q->limits.max_write_same_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Return the maximum number of sectors from the start of a bio that may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * submitted as a single request to a block device. If enough sectors remain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * align the end to the physical block size. Otherwise align the end to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * logical block size. This approach minimizes the number of non-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * requests that are submitted to a block device if the start of a bio is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * aligned to a physical block boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline unsigned get_max_io_size(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned max_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) max_sectors += start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) max_sectors &= ~(pbs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (max_sectors > start_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return max_sectors - start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return sectors & ~(lbs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline unsigned get_max_segment_size(const struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct page *start_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long mask = queue_segment_boundary(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) offset = mask & (page_to_phys(start_page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * overflow may be triggered in case of zero page physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * on 32bit arch, use queue's max segment size when that happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return min_not_zero(mask - offset + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) (unsigned long)queue_max_segment_size(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * bvec_split_segs - verify whether or not a bvec should be split in the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @q: [in] request queue associated with the bio associated with @bv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @bv: [in] bvec to examine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @nsegs: [in,out] Number of segments in the bio being built. Incremented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * by the number of segments from @bv that may be appended to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * bio without exceeding @max_segs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @sectors: [in,out] Number of sectors in the bio being built. Incremented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * by the number of sectors from @bv that may be appended to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * bio without exceeding @max_sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @max_segs: [in] upper bound for *@nsegs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @max_sectors: [in] upper bound for *@sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * When splitting a bio, it can happen that a bvec is encountered that is too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * big to fit in a single segment and hence that it has to be split in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * middle. This function verifies whether or not that should happen. The value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * %true is returned if and only if appending the entire @bv to a bio with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * the block driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static bool bvec_split_segs(const struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const struct bio_vec *bv, unsigned *nsegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned *sectors, unsigned max_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned max_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned len = min(bv->bv_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned seg_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) while (len && *nsegs < max_segs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) seg_size = get_max_segment_size(q, bv->bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bv->bv_offset + total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) seg_size = min(seg_size, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (*nsegs)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) total_len += seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) len -= seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *sectors += total_len >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* tell the caller to split the bvec if it is too big to fit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return len > 0 || bv->bv_len > max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * blk_bio_segment_split - split a bio in two bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @q: [in] request queue pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @bio: [in] bio to be split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @bs: [in] bio set to allocate the clone from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @segs: [out] number of segments in the bio with the first half of the sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Clone @bio, update the bi_iter of the clone to represent the first sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * of @bio and update @bio->bi_iter to represent the remaining sectors. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * following is guaranteed for the cloned bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * - That it has at most get_max_io_size(@q, @bio) sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * - That it has at most queue_max_segments(@q) segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Except for discard requests the cloned bio will point at the bi_io_vec of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * the original bio. It is the responsibility of the caller to ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * original bio is not freed before the cloned bio. The caller is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * responsible for ensuring that @bs is only destroyed after processing of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * split bio has finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static struct bio *blk_bio_segment_split(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct bio_set *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned *segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct bio_vec bv, bvprv, *bvprvp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned nsegs = 0, sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) const unsigned max_sectors = get_max_io_size(q, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) const unsigned max_segs = queue_max_segments(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bio_for_each_bvec(bv, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * If the queue doesn't support SG gaps and adding this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * offset would create a gap, disallow it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) goto split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (nsegs < max_segs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) sectors + (bv.bv_len >> 9) <= max_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) nsegs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) sectors += bv.bv_len >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) max_sectors)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bvprv = bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bvprvp = &bvprv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *segs = nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) split:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *segs = nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return bio_split(bio, sectors, GFP_NOIO, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * __blk_queue_split - split a bio and submit the second half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @bio: [in, out] bio to be split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @nr_segs: [out] number of segments in the first bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Split a bio into two bios, chain the two bios, submit the second half and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * store a pointer to the first half in *@bio. If the second bio is still too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * big it will be split by a recursive call to this function. Since this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * the responsibility of the caller to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @bio->bi_disk->queue->bio_split is only released after processing of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * split bio has finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct request_queue *q = (*bio)->bi_disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct bio *split = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) switch (bio_op(*bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) case REQ_OP_SECURE_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) split = blk_bio_write_same_split(q, *bio, &q->bio_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * All drivers must accept single-segments bios that are <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * PAGE_SIZE. This is a quick and dirty check that relies on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * the fact that bi_io_vec[0] is always valid if a bio has data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * The check might lead to occasional false negatives when bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * are cloned, but compared to the performance impact of cloned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * bios themselves the loop below doesn't matter anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!q->limits.chunk_sectors &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) (*bio)->bi_vcnt == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ((*bio)->bi_io_vec[0].bv_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *nr_segs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (split) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* there isn't chance to merge the splitted bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) split->bi_opf |= REQ_NOMERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bio_chain(split, *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) submit_bio_noacct(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *bio = split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) blk_throtl_charge_bio_split(*bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * blk_queue_split - split a bio and submit the second half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @bio: [in, out] bio to be split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Split a bio into two bios, chains the two bios, submit the second half and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * store a pointer to the first half in *@bio. Since this function may allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * after processing of the split bio has finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void blk_queue_split(struct bio **bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned int nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) __blk_queue_split(bio, &nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) EXPORT_SYMBOL(blk_queue_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned int blk_recalc_rq_segments(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned int nr_phys_segs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned int nr_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!rq->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) switch (bio_op(rq->bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case REQ_OP_SECURE_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (queue_max_discard_segments(rq->q) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct bio *bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) for_each_bio(bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) nr_phys_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return nr_phys_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) rq_for_each_bvec(bv, rq, iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) UINT_MAX, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return nr_phys_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct scatterlist *sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!*sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * If the driver previously mapped a shorter list, we could see a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * termination bit prematurely unless it fully inits the sg table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * on each mapping. We KNOW that there must be more entries here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * or the driver would be buggy, so force clear the termination bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * to avoid doing a full sg_init_table() in drivers for each command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sg_unmark_end(*sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return sg_next(*sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static unsigned blk_bvec_map_sg(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct bio_vec *bvec, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct scatterlist **sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unsigned nbytes = bvec->bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned nsegs = 0, total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) while (nbytes > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned offset = bvec->bv_offset + total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned len = min(get_max_segment_size(q, bvec->bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) offset), nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct page *page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Unfortunately a fair number of drivers barf on scatterlists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * that have an offset larger than PAGE_SIZE, despite other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * subsystems dealing with that invariant just fine. For now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * stick to the legacy format where we never present those from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * the block layer, but the code below should be removed once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * these offenders (mostly MMC/SD drivers) are fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) page += (offset >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) offset &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *sg = blk_next_sg(sg, sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sg_set_page(*sg, page, len, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) total += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) nbytes -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) nsegs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static inline int __blk_bvec_map_sg(struct bio_vec bv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct scatterlist *sglist, struct scatterlist **sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *sg = blk_next_sg(sg, sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* only try to merge bvecs into one sg if they are from two bios */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct bio_vec *bvprv, struct scatterlist **sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int nbytes = bvec->bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!*sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if ((*sg)->length + nbytes > queue_max_segment_size(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!biovec_phys_mergeable(q, bvprv, bvec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (*sg)->length += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct scatterlist **sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct bio_vec bvec, bvprv = { NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int nsegs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) bool new_bio = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) for_each_bio(bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bio_for_each_bvec(bvec, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Only try to merge bvecs from two bios given we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * have done bio internal merge when adding pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * to bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (new_bio &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto next_bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) next_bvec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) new_bio = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (likely(bio->bi_iter.bi_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) bvprv = bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) new_bio = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * map a request to scatterlist, return number of sg entries setup. Caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * must make sure sg can hold rq->nr_phys_segments entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct scatterlist *sglist, struct scatterlist **last_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int nsegs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) else if (rq->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (*last_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) sg_mark_end(*last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Something must have been wrong if the figured number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * segment is bigger than number of req's physical segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) EXPORT_SYMBOL(__blk_rq_map_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline unsigned int blk_rq_get_max_segments(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (req_op(rq) == REQ_OP_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return queue_max_discard_segments(rq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return queue_max_segments(rq->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned int nr_phys_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!blk_cgroup_mergeable(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (blk_integrity_merge_bio(req->q, req, bio) == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* discard request merge won't add new segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (req_op(req) == REQ_OP_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * This will form the start of a new hw segment. Bump both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) req->nr_phys_segments += nr_phys_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) no_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) req_set_nomerge(req->q, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (req_gap_back_merge(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (blk_integrity_rq(req) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) integrity_req_gap_back_merge(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (!bio_crypt_ctx_back_mergeable(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (blk_rq_sectors(req) + bio_sectors(bio) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) req_set_nomerge(req->q, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return ll_new_hw_segment(req, bio, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int ll_front_merge_fn(struct request *req, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (req_gap_front_merge(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (blk_integrity_rq(req) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) integrity_req_gap_front_merge(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!bio_crypt_ctx_front_mergeable(req, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (blk_rq_sectors(req) + bio_sectors(bio) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) req_set_nomerge(req->q, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return ll_new_hw_segment(req, bio, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) unsigned short segments = blk_rq_nr_discard_segments(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (segments >= queue_max_discard_segments(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (blk_rq_sectors(req) + bio_sectors(next->bio) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) blk_rq_get_max_sectors(req, blk_rq_pos(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) no_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) req_set_nomerge(q, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int total_phys_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (req_gap_back_merge(req, next->bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Will it become too large?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) blk_rq_get_max_sectors(req, blk_rq_pos(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (total_phys_segments > blk_rq_get_max_segments(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!blk_cgroup_mergeable(req, next->bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (blk_integrity_merge_rq(q, req, next) == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!bio_crypt_ctx_merge_rq(req, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Merge is OK... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) req->nr_phys_segments = total_phys_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * blk_rq_set_mixed_merge - mark a request as mixed merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * @rq: request to mark as mixed merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * @rq is about to be mixed merged. Make sure the attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * which can be mixed are set in each bio and mark @rq as mixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) void blk_rq_set_mixed_merge(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (rq->rq_flags & RQF_MIXED_MERGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * @rq will no longer represent mixable attributes for all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * contained bios. It will just track those of the first one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * Distributes the attributs to each bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) for (bio = rq->bio; bio; bio = bio->bi_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bio->bi_opf |= ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rq->rq_flags |= RQF_MIXED_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static void blk_account_io_merge_request(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (blk_do_io_stat(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) part_stat_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) part_stat_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) hd_struct_put(req->part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static enum elv_merge blk_try_req_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (blk_discard_mergable(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return ELEVATOR_DISCARD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return ELEVATOR_BACK_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * For non-mq, this has to be called with the request spinlock acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * For mq with scheduling, the appropriate queue wide lock should be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static struct request *attempt_merge(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct request *req, struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!rq_mergeable(req) || !rq_mergeable(next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (req_op(req) != req_op(next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (rq_data_dir(req) != rq_data_dir(next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) || req->rq_disk != next->rq_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (req_op(req) == REQ_OP_WRITE_SAME &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) !blk_write_same_mergeable(req->bio, next->bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * Don't allow merge of different write hints, or for a hint with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * non-hint IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (req->write_hint != next->write_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (req->ioprio != next->ioprio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * If we are allowed to merge, then append bio list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * from next to rq and release next. merge_requests_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * will have updated segment counts, update sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * counts here. Handle DISCARDs separately, as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * have separate settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) switch (blk_try_req_merge(req, next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case ELEVATOR_DISCARD_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!req_attempt_discard_merge(q, req, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) case ELEVATOR_BACK_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!ll_merge_requests_fn(q, req, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * If failfast settings disagree or any of the two is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * a mixed merge, mark both as mixed before proceeding. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * makes sure that all involved bios have mixable attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * set properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) (req->cmd_flags & REQ_FAILFAST_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) (next->cmd_flags & REQ_FAILFAST_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) blk_rq_set_mixed_merge(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) blk_rq_set_mixed_merge(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * At this point we have either done a back merge or front merge. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * need the smaller start_time_ns of the merged requests to be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * current request for accounting purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (next->start_time_ns < req->start_time_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) req->start_time_ns = next->start_time_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) req->biotail->bi_next = next->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) req->biotail = next->biotail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) req->__data_len += blk_rq_bytes(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!blk_discard_mergable(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) elv_merge_requests(q, req, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * 'next' is going away, so update stats accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) blk_account_io_merge_request(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) trace_block_rq_merge(q, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * ownership of bio passed from next to req, return 'next' for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * the caller to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) next->bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static struct request *attempt_back_merge(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct request *next = elv_latter_request(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return attempt_merge(q, rq, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static struct request *attempt_front_merge(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct request *prev = elv_former_request(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return attempt_merge(q, prev, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct request *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct request *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) free = attempt_merge(q, rq, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) blk_put_request(free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!rq_mergeable(rq) || !bio_mergeable(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (req_op(rq) != bio_op(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* different data direction or already started, don't merge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (bio_data_dir(bio) != rq_data_dir(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* must be same device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (rq->rq_disk != bio->bi_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* don't merge across cgroup boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!blk_cgroup_mergeable(rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* only merge integrity protected bio into ditto rq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Only merge if the crypt contexts are compatible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!bio_crypt_rq_ctx_compatible(rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* must be using the same buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (req_op(rq) == REQ_OP_WRITE_SAME &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) !blk_write_same_mergeable(rq->bio, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Don't allow merge of different write hints, or for a hint with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * non-hint IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (rq->write_hint != bio->bi_write_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (rq->ioprio != bio_prio(bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (blk_discard_mergable(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return ELEVATOR_DISCARD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return ELEVATOR_BACK_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return ELEVATOR_FRONT_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return ELEVATOR_NO_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static void blk_account_io_merge_bio(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!blk_do_io_stat(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) part_stat_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) part_stat_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) enum bio_merge_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) BIO_MERGE_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) BIO_MERGE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) BIO_MERGE_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static enum bio_merge_status bio_attempt_back_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct bio *bio, unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (!ll_back_merge_fn(req, bio, nr_segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return BIO_MERGE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) trace_block_bio_backmerge(req->q, req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) rq_qos_merge(req->q, req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) blk_rq_set_mixed_merge(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) req->biotail->bi_next = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) req->biotail = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) req->__data_len += bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) bio_crypt_free_ctx(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) blk_account_io_merge_bio(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return BIO_MERGE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static enum bio_merge_status bio_attempt_front_merge(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct bio *bio, unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!ll_front_merge_fn(req, bio, nr_segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return BIO_MERGE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) trace_block_bio_frontmerge(req->q, req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) rq_qos_merge(req->q, req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) blk_rq_set_mixed_merge(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) bio->bi_next = req->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) req->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) req->__sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) req->__data_len += bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) bio_crypt_do_front_merge(req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) blk_account_io_merge_bio(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return BIO_MERGE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct request *req, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) unsigned short segments = blk_rq_nr_discard_segments(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (segments >= queue_max_discard_segments(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (blk_rq_sectors(req) + bio_sectors(bio) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) blk_rq_get_max_sectors(req, blk_rq_pos(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) goto no_merge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rq_qos_merge(q, req, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) req->biotail->bi_next = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) req->biotail = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) req->__data_len += bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) req->nr_phys_segments = segments + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) blk_account_io_merge_bio(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return BIO_MERGE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) no_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) req_set_nomerge(q, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return BIO_MERGE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) unsigned int nr_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) bool sched_allow_merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!blk_rq_merge_ok(rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return BIO_MERGE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) switch (blk_try_merge(rq, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) case ELEVATOR_BACK_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return bio_attempt_back_merge(rq, bio, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case ELEVATOR_FRONT_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return bio_attempt_front_merge(rq, bio, nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) case ELEVATOR_DISCARD_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return bio_attempt_discard_merge(q, rq, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return BIO_MERGE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return BIO_MERGE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * blk_attempt_plug_merge - try to merge with %current's plugged list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * @q: request_queue new bio is being queued at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * @bio: new bio being queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * @nr_segs: number of segments in @bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * @same_queue_rq: pointer to &struct request that gets filled in when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * another request associated with @q is found on the plug list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * (optional, may be %NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * Determine whether @bio being queued on @q can be merged with a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * on %current's plugged list. Returns %true if merge was successful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * otherwise %false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * Plugging coalesces IOs from the same issuer for the same purpose without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * going through @q->queue_lock. As such it's more of an issuing mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * than scheduling, and the request, while may have elvpriv data, is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * added on the elevator at this point. In addition, we don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * reliable access to the elevator outside queue lock. Only check basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * merging parameters without querying the elevator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * Caller must ensure !blk_queue_nomerges(q) beforehand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) unsigned int nr_segs, struct request **same_queue_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct blk_plug *plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct list_head *plug_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) plug = blk_mq_plug(q, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!plug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) plug_list = &plug->mq_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) list_for_each_entry_reverse(rq, plug_list, queuelist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (rq->q == q && same_queue_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Only blk-mq multiple hardware queues case checks the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * rq in the same queue, there should be only one such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * rq in a queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) *same_queue_rq = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (rq->q != q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) BIO_MERGE_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * Iterate list of requests and see if we can merge this bio with any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct bio *bio, unsigned int nr_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int checked = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) list_for_each_entry_reverse(rq, list, queuelist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!checked--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) case BIO_MERGE_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) case BIO_MERGE_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) case BIO_MERGE_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) EXPORT_SYMBOL_GPL(blk_bio_list_merge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) unsigned int nr_segs, struct request **merged_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) switch (elv_merge(q, &rq, bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) case ELEVATOR_BACK_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (!blk_mq_sched_allow_merge(q, rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) *merged_request = attempt_back_merge(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!*merged_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case ELEVATOR_FRONT_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (!blk_mq_sched_allow_merge(q, rq, bio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) *merged_request = attempt_front_merge(q, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!*merged_request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) case ELEVATOR_DISCARD_MERGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);