Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Functions related to generic helpers functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	struct bio *new = bio_alloc(gfp, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 		bio_chain(bio, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		sector_t nr_sects, gfp_t gfp_mask, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		struct bio **biop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct bio *bio = *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	unsigned int op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	sector_t bs_mask, part_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	if (bdev_read_only(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (flags & BLKDEV_DISCARD_SECURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		if (!blk_queue_secure_erase(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		op = REQ_OP_SECURE_ERASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		if (!blk_queue_discard(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		op = REQ_OP_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	/* In case the discard granularity isn't set by buggy device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		char dev_name[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		bdevname(bdev, dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if ((sector | nr_sects) & bs_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if (!nr_sects)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* In case the discard request is in a partition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (bdev_is_partition(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		part_offset = bdev->bd_part->start_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	while (nr_sects) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		sector_t granularity_aligned_lba, req_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		sector_t sector_mapped = sector + part_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		granularity_aligned_lba = round_up(sector_mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 				q->limits.discard_granularity >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		 * Check whether the discard bio starts at a discard_granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 * aligned LBA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		 * - If no: set (granularity_aligned_lba - sector_mapped) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		 *   bi_size of the first split bio, then the second bio will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		 *   start at a discard_granularity aligned LBA on the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		 * - If yes: use bio_aligned_discard_max_sectors() as the max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		 *   possible bi_size of the first split bio. Then when this bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		 *   is split in device drive, the split ones are very probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		 *   to be aligned to discard_granularity of the device's queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		if (granularity_aligned_lba == sector_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			req_sects = min_t(sector_t, nr_sects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 					  bio_aligned_discard_max_sectors(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			req_sects = min_t(sector_t, nr_sects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 					  granularity_aligned_lba - sector_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		bio = blk_next_bio(bio, 0, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		bio_set_op_attrs(bio, op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		bio->bi_iter.bi_size = req_sects << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		sector += req_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		nr_sects -= req_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		 * We can loop for a long time in here, if someone does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 * full device discards (like mkfs). Be nice and allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		 * us to schedule out to avoid softlocking if preempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		 * is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	*biop = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) EXPORT_SYMBOL(__blkdev_issue_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * blkdev_issue_discard - queue a discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * @bdev:	blockdev to issue discard for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * @sector:	start sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * @nr_sects:	number of sectors to discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * @gfp_mask:	memory allocation flags (for bio_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  *    Issue a discard request for the sectors in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			&bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!ret && bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		ret = submit_bio_wait(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		if (ret == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) EXPORT_SYMBOL(blkdev_issue_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * __blkdev_issue_write_same - generate number of bios with same page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * @bdev:	target blockdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * @sector:	start sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @nr_sects:	number of sectors to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * @gfp_mask:	memory allocation flags (for bio_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * @page:	page containing data to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @biop:	pointer to anchor bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		struct bio **biop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	unsigned int max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct bio *bio = *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	sector_t bs_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (bdev_read_only(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if ((sector | nr_sects) & bs_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!bdev_write_same(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	max_write_same_sectors = bio_allowed_max_sectors(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	while (nr_sects) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		bio = blk_next_bio(bio, 1, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		bio->bi_vcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		bio->bi_io_vec->bv_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		bio->bi_io_vec->bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		if (nr_sects > max_write_same_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			nr_sects -= max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			sector += max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			bio->bi_iter.bi_size = nr_sects << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			nr_sects = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	*biop = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  * blkdev_issue_write_same - queue a write same operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  * @bdev:	target blockdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * @sector:	start sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * @nr_sects:	number of sectors to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * @gfp_mask:	memory allocation flags (for bio_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * @page:	page containing data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *    Issue a write same request for the sectors in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 				sector_t nr_sects, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 				struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			&bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (ret == 0 && bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		ret = submit_bio_wait(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) EXPORT_SYMBOL(blkdev_issue_write_same);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int __blkdev_issue_write_zeroes(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		struct bio **biop, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct bio *bio = *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	unsigned int max_write_zeroes_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (bdev_read_only(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (max_write_zeroes_sectors == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	while (nr_sects) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		bio = blk_next_bio(bio, 0, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		if (flags & BLKDEV_ZERO_NOUNMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			bio->bi_opf |= REQ_NOUNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		if (nr_sects > max_write_zeroes_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			nr_sects -= max_write_zeroes_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			sector += max_write_zeroes_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			bio->bi_iter.bi_size = nr_sects << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			nr_sects = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	*biop = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * Convert a number of 512B sectors to a number of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * The result is limited to a number of pages that can fit into a BIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * Also make sure that the result is always at least 1 (page) for the cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * where nr_sects is lower than the number of sectors in a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return min(pages, (sector_t)BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int __blkdev_issue_zero_pages(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		struct bio **biop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct bio *bio = *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	int bi_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	unsigned int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (bdev_read_only(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	while (nr_sects != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 				   gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		while (nr_sects != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			nr_sects -= bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			sector += bi_size >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			if (bi_size < sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	*biop = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * __blkdev_issue_zeroout - generate number of zero filed write bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * @bdev:	blockdev to issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * @sector:	start sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * @nr_sects:	number of sectors to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * @gfp_mask:	memory allocation flags (for bio_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * @biop:	pointer to anchor bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * @flags:	controls detailed behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  *  Zero-fill a block range, either using hardware offload or by explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  *  writing zeroes to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  *  If a device is using logical block provisioning, the underlying space will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	sector_t bs_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	if ((sector | nr_sects) & bs_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			biop, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 					 biop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) EXPORT_SYMBOL(__blkdev_issue_zeroout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * blkdev_issue_zeroout - zero-fill a block range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * @bdev:	blockdev to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * @sector:	start sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * @nr_sects:	number of sectors to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * @gfp_mask:	memory allocation flags (for bio_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * @flags:	controls detailed behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  *  Zero-fill a block range, either using hardware offload or by explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  *  valid values for %flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	sector_t bs_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if ((sector | nr_sects) & bs_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (try_write_zeroes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 						  gfp_mask, &bio, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 						gfp_mask, &bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		/* No zeroing offload support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (ret == 0 && bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		ret = submit_bio_wait(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (ret && try_write_zeroes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			try_write_zeroes = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		if (!bdev_write_zeroes_sectors(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			 * Zeroing offload support was indicated, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			 * device reported ILLEGAL REQUEST (for some devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			 * there is no non-destructive way to verify whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			 * WRITE ZEROES is actually supported).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) EXPORT_SYMBOL(blkdev_issue_zeroout);