^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Functions related to setting various queue properties from drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/lcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "blk-wbt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long blk_max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) EXPORT_SYMBOL(blk_max_low_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long blk_max_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) q->rq_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * blk_set_default_limits - reset limits to default values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @lim: the queue_limits structure to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Returns a queue_limit struct to its default state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void blk_set_default_limits(struct queue_limits *lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) lim->max_segments = BLK_MAX_SEGMENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) lim->max_discard_segments = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) lim->max_integrity_segments = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) lim->virt_boundary_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) lim->max_dev_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) lim->chunk_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) lim->max_write_same_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) lim->max_write_zeroes_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) lim->max_zone_append_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) lim->max_discard_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) lim->max_hw_discard_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) lim->discard_granularity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) lim->discard_alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) lim->discard_misaligned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) lim->alignment_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) lim->io_opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) lim->misaligned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) lim->zoned = BLK_ZONED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) EXPORT_SYMBOL(blk_set_default_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * blk_set_stacking_limits - set default limits for stacking devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @lim: the queue_limits structure to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Returns a queue_limit struct to its default state. Should be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * by stacking drivers like DM that have no internal limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void blk_set_stacking_limits(struct queue_limits *lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) blk_set_default_limits(lim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Inherit limits from component devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) lim->max_segments = USHRT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) lim->max_discard_segments = USHRT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) lim->max_hw_sectors = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) lim->max_segment_size = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) lim->max_sectors = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) lim->max_dev_sectors = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) lim->max_write_same_sectors = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) lim->max_write_zeroes_sectors = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) lim->max_zone_append_sectors = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL(blk_set_stacking_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * blk_queue_bounce_limit - set bounce buffer limit for queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @max_addr: the maximum address the device can handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Different hardware can have different requirements as to what pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * it can do I/O directly to. A low level driver can call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * blk_queue_bounce_limit to have lower memory pages allocated as bounce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * buffers for doing I/O to pages residing above @max_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned long b_pfn = max_addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) q->bounce_gfp = GFP_NOIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Assume anything <= 4GB can be handled by IOMMU. Actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * some IOMMUs can handle everything, but I don't know of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * way to test this here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (b_pfn < blk_max_low_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) q->limits.bounce_pfn = b_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) init_emergency_isa_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) q->bounce_gfp = GFP_NOIO | GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) q->limits.bounce_pfn = b_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) EXPORT_SYMBOL(blk_queue_bounce_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * blk_queue_max_hw_sectors - set max sectors for a request for this queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @max_hw_sectors: max hardware sectors in the usual 512b unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Enables a low level driver to set a hard upper limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * max_hw_sectors, on the size of requests. max_hw_sectors is set by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * the device driver based upon the capabilities of the I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * max_dev_sectors is a hard limit imposed by the storage device for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * READ/WRITE requests. It is set by the disk driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * max_sectors is a soft limit imposed by the block layer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * filesystem type requests. This value can be overridden on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * The soft limit can not exceed max_hw_sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct queue_limits *limits = &q->limits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if ((max_hw_sectors << 9) < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) max_hw_sectors = 1 << (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) printk(KERN_INFO "%s: set to minimum %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __func__, max_hw_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) limits->max_hw_sectors = max_hw_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) limits->max_sectors = max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) EXPORT_SYMBOL(blk_queue_max_hw_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * blk_queue_chunk_sectors - set size of the chunk for this queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @chunk_sectors: chunk sectors in the usual 512b unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * If a driver doesn't want IOs to cross a given chunk size, it can set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * this limit and prevent merging across chunks. Note that the block layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * must accept a page worth of data at any offset. So if the crossing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * chunks is a hard limitation in the driver, it must still be prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * to split single page bios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) q->limits.chunk_sectors = chunk_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) EXPORT_SYMBOL(blk_queue_chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * blk_queue_max_discard_sectors - set max sectors for a single discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @max_discard_sectors: maximum number of sectors to discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void blk_queue_max_discard_sectors(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int max_discard_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) q->limits.max_hw_discard_sectors = max_discard_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) q->limits.max_discard_sectors = max_discard_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) EXPORT_SYMBOL(blk_queue_max_discard_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * blk_queue_max_write_same_sectors - set max sectors for a single write same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @max_write_same_sectors: maximum number of sectors to write per command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void blk_queue_max_write_same_sectors(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int max_write_same_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) q->limits.max_write_same_sectors = max_write_same_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * blk_queue_max_write_zeroes_sectors - set max sectors for a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * write zeroes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @max_write_zeroes_sectors: maximum number of sectors to write per command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int max_write_zeroes_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @max_zone_append_sectors: maximum number of sectors to write per command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void blk_queue_max_zone_append_sectors(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned int max_zone_append_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (WARN_ON(!blk_queue_is_zoned(q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) max_sectors = min(q->limits.chunk_sectors, max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Signal eventual driver bugs resulting in the max_zone_append sectors limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * or the max_hw_sectors limit not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) WARN_ON(!max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) q->limits.max_zone_append_sectors = max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * blk_queue_max_segments - set max hw segments for a request for this queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @max_segments: max number of segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Enables a low level driver to set an upper limit on the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * hw data segments in a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!max_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) max_segments = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) printk(KERN_INFO "%s: set to minimum %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) __func__, max_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) q->limits.max_segments = max_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) EXPORT_SYMBOL(blk_queue_max_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * blk_queue_max_discard_segments - set max segments for discard requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * @max_segments: max number of segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Enables a low level driver to set an upper limit on the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * segments in a discard request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void blk_queue_max_discard_segments(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned short max_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) q->limits.max_discard_segments = max_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @max_size: max size of segment in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Enables a low level driver to set an upper limit on the size of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * coalesced segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (max_size < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) max_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) printk(KERN_INFO "%s: set to minimum %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) __func__, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* see blk_queue_virt_boundary() for the explanation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) WARN_ON_ONCE(q->limits.virt_boundary_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) q->limits.max_segment_size = max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) EXPORT_SYMBOL(blk_queue_max_segment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * blk_queue_logical_block_size - set logical block size for the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @size: the logical block size, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * This should be set to the lowest possible block size that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * storage device can address. The default of 512 covers most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) q->limits.logical_block_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (q->limits.physical_block_size < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) q->limits.physical_block_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (q->limits.io_min < q->limits.physical_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) q->limits.io_min = q->limits.physical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) EXPORT_SYMBOL(blk_queue_logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * blk_queue_physical_block_size - set physical block size for the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @size: the physical block size, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * This should be set to the lowest possible sector size that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * hardware can operate on without reverting to read-modify-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) q->limits.physical_block_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (q->limits.physical_block_size < q->limits.logical_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) q->limits.physical_block_size = q->limits.logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (q->limits.io_min < q->limits.physical_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) q->limits.io_min = q->limits.physical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) EXPORT_SYMBOL(blk_queue_physical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * blk_queue_alignment_offset - set physical block alignment offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * @offset: alignment offset in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Some devices are naturally misaligned to compensate for things like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * the legacy DOS partition table 63-sector offset. Low-level drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * should call this function for devices whose first sector is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * naturally aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) q->limits.alignment_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) offset & (q->limits.physical_block_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) q->limits.misaligned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) EXPORT_SYMBOL(blk_queue_alignment_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void blk_queue_update_readahead(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * For read-ahead of large files to be effective, we need to read ahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * at least twice the optimal I/O size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) q->backing_dev_info->ra_pages =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) q->backing_dev_info->io_pages =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) queue_max_sectors(q) >> (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * blk_limits_io_min - set minimum request size for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @limits: the queue limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @min: smallest I/O size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Some devices have an internal block size bigger than the reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * hardware sector size. This function can be used to signal the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * smallest I/O the device can perform without incurring a performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * penalty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) limits->io_min = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (limits->io_min < limits->logical_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) limits->io_min = limits->logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (limits->io_min < limits->physical_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) limits->io_min = limits->physical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) EXPORT_SYMBOL(blk_limits_io_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * blk_queue_io_min - set minimum request size for the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @min: smallest I/O size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Storage devices may report a granularity or preferred minimum I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * size which is the smallest request the device can perform without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * incurring a performance penalty. For disk drives this is often the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * physical block size. For RAID arrays it is often the stripe chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * size. A properly aligned multiple of minimum_io_size is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * preferred request size for workloads where a high number of I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * operations is desired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) void blk_queue_io_min(struct request_queue *q, unsigned int min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) blk_limits_io_min(&q->limits, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) EXPORT_SYMBOL(blk_queue_io_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * blk_limits_io_opt - set optimal request size for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @limits: the queue limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @opt: smallest I/O size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Storage devices may report an optimal I/O size, which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * device's preferred unit for sustained I/O. This is rarely reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * for disk drives. For RAID arrays it is usually the stripe width or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * the internal track size. A properly aligned multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * optimal_io_size is the preferred request size for workloads where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * sustained throughput is desired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) limits->io_opt = opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) EXPORT_SYMBOL(blk_limits_io_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * blk_queue_io_opt - set optimal request size for the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @opt: optimal request size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Storage devices may report an optimal I/O size, which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * device's preferred unit for sustained I/O. This is rarely reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * for disk drives. For RAID arrays it is usually the stripe width or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * the internal track size. A properly aligned multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * optimal_io_size is the preferred request size for workloads where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * sustained throughput is desired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) blk_limits_io_opt(&q->limits, opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) q->backing_dev_info->ra_pages =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) EXPORT_SYMBOL(blk_queue_io_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) sectors = PAGE_SIZE >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * blk_stack_limits - adjust queue_limits for stacked devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @t: the stacking driver limits (top device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @b: the underlying queue limits (bottom, component device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * @start: first data sector within component device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * This function is used by stacking drivers like MD and DM to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * that all component devices have compatible block sizes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * alignments. The stacking driver must provide a queue_limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * struct (top) and then iteratively call the stacking function for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * all component (bottom) devices. The stacking function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * attempt to combine the values and ensure proper alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Returns 0 if the top and bottom queue_limits are compatible. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * top device's block sizes and alignment offsets may be adjusted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * ensure alignment with the bottom device. If no compatible sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * and alignments exist, -1 is returned and the resulting top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * queue_limits will have the misaligned flag set to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * the alignment_offset is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) sector_t start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) unsigned int top, bottom, alignment, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) t->max_write_same_sectors = min(t->max_write_same_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) b->max_write_same_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) b->max_write_zeroes_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) t->max_zone_append_sectors = min(t->max_zone_append_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) b->max_zone_append_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) b->seg_boundary_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) b->virt_boundary_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) t->max_segments = min_not_zero(t->max_segments, b->max_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) t->max_discard_segments = min_not_zero(t->max_discard_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) b->max_discard_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) b->max_integrity_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) t->max_segment_size = min_not_zero(t->max_segment_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) b->max_segment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) t->misaligned |= b->misaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) alignment = queue_limit_alignment_offset(b, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Bottom device has different alignment. Check that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * compatible with the current top alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (t->alignment_offset != alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) top = max(t->physical_block_size, t->io_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) + t->alignment_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) bottom = max(b->physical_block_size, b->io_min) + alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Verify that top and bottom intervals line up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (max(top, bottom) % min(top, bottom)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) t->misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) t->logical_block_size = max(t->logical_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) b->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) t->physical_block_size = max(t->physical_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) b->physical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) t->io_min = max(t->io_min, b->io_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Set non-power-of-2 compatible chunk_sectors boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (b->chunk_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Physical block size a multiple of the logical block size? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (t->physical_block_size & (t->logical_block_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) t->physical_block_size = t->logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) t->misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* Minimum I/O a multiple of the physical block size? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (t->io_min & (t->physical_block_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) t->io_min = t->physical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) t->misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Optimal I/O a multiple of the physical block size? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (t->io_opt & (t->physical_block_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) t->io_opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) t->misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* chunk_sectors a multiple of the physical block size? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) t->chunk_sectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) t->misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) t->raid_partial_stripes_expensive =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) max(t->raid_partial_stripes_expensive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) b->raid_partial_stripes_expensive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Find lowest common alignment_offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) % max(t->physical_block_size, t->io_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Verify that new alignment_offset is on a logical block boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (t->alignment_offset & (t->logical_block_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) t->misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Discard alignment and granularity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (b->discard_granularity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) alignment = queue_limit_discard_alignment(b, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (t->discard_granularity != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) t->discard_alignment != alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) top = t->discard_granularity + t->discard_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) bottom = b->discard_granularity + alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Verify that top and bottom intervals line up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if ((max(top, bottom) % min(top, bottom)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) t->discard_misaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) b->max_discard_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) b->max_hw_discard_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) t->discard_granularity = max(t->discard_granularity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) b->discard_granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) t->discard_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) t->zoned = max(t->zoned, b->zoned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) EXPORT_SYMBOL(blk_stack_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * disk_stack_limits - adjust queue limits for stacked drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @disk: MD/DM gendisk (top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @bdev: the underlying block device (bottom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * @offset: offset to beginning of data within component device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Merges the limits for a top level gendisk and a bottom level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * block_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) sector_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct request_queue *t = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) get_start_sect(bdev) + (offset >> 9)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) disk_name(disk, 0, top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bdevname(bdev, bottom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) top, bottom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) blk_queue_update_readahead(disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) EXPORT_SYMBOL(disk_stack_limits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * blk_queue_update_dma_pad - update pad mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * @mask: pad mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * Update dma pad mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * Appending pad buffer to a request modifies the last entry of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * scatter list such that it includes the pad buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (mask > q->dma_pad_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) q->dma_pad_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) EXPORT_SYMBOL(blk_queue_update_dma_pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * blk_queue_segment_boundary - set boundary rules for segment merging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * @mask: the memory boundary mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (mask < PAGE_SIZE - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mask = PAGE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) printk(KERN_INFO "%s: set to minimum %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) __func__, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) q->limits.seg_boundary_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) EXPORT_SYMBOL(blk_queue_segment_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * blk_queue_virt_boundary - set boundary rules for bio merging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * @mask: the memory boundary mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) q->limits.virt_boundary_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Devices that require a virtual boundary do not support scatter/gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * I/O natively, but instead require a descriptor list entry for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * page (which might not be idential to the Linux PAGE_SIZE). Because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * of that they are not limited by our notion of "segment size".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) q->limits.max_segment_size = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) EXPORT_SYMBOL(blk_queue_virt_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * blk_queue_dma_alignment - set dma length and memory alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * @mask: alignment mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * set required memory and length alignment for direct dma transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * this is used when building direct io requests for the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) void blk_queue_dma_alignment(struct request_queue *q, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) q->dma_alignment = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) EXPORT_SYMBOL(blk_queue_dma_alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * blk_queue_update_dma_alignment - update dma length and memory alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @mask: alignment mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * update required memory and length alignment for direct dma transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * If the requested alignment is larger than the current alignment, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * the current queue alignment is updated to the new value, otherwise it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * is left alone. The design of this is to allow multiple objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * (driver, device, transport etc) to set their respective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * alignments without having them interfere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) BUG_ON(mask > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (mask > q->dma_alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) q->dma_alignment = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) EXPORT_SYMBOL(blk_queue_update_dma_alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * blk_set_queue_depth - tell the block layer about the device queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * @depth: queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) q->queue_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) rq_qos_queue_depth_changed(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) EXPORT_SYMBOL(blk_set_queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * blk_queue_write_cache - configure queue's write cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * @wc: write back cache on or off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * @fua: device supports FUA writes, if true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * Tell the block layer about the write cache of @q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) blk_queue_flag_set(QUEUE_FLAG_WC, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) blk_queue_flag_clear(QUEUE_FLAG_WC, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) blk_queue_flag_set(QUEUE_FLAG_FUA, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) EXPORT_SYMBOL_GPL(blk_queue_write_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * blk_queue_required_elevator_features - Set a queue required elevator features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * @q: the request queue for the target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @features: Required elevator features OR'ed together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Tell the block layer that for the device controlled through @q, only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * only elevators that can be used are those that implement at least the set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * features specified by @features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) void blk_queue_required_elevator_features(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) unsigned int features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) q->required_elevator_features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * @q: the request queue for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * @dev: the device pointer for dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Tell the block layer about merging the segments by dma map of @q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unsigned long boundary = dma_get_merge_boundary(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* No need to update max_segment_size. see blk_queue_virt_boundary() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) blk_queue_virt_boundary(q, boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * blk_queue_set_zoned - configure a disk queue zoned model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * @disk: the gendisk of the queue to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * @model: the zoned model to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * Set the zoned model of the request queue of @disk according to @model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * When @model is BLK_ZONED_HM (host managed), this should be called only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * If @model specifies BLK_ZONED_HA (host aware), the effective model used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * on the disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) switch (model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) case BLK_ZONED_HM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Host managed devices are supported only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * CONFIG_BLK_DEV_ZONED is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) case BLK_ZONED_HA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Host aware devices can be treated either as regular block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * devices (similar to drive managed devices) or as zoned block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * devices to take advantage of the zone command set, similarly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * to host managed devices. We try the latter if there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * partitions and zoned block device support is enabled, else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * we do nothing special as far as the block layer is concerned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) disk_has_partitions(disk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) model = BLK_ZONED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case BLK_ZONED_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) model = BLK_ZONED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) disk->queue->limits.zoned = model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static int __init blk_settings_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) blk_max_low_pfn = max_low_pfn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) blk_max_pfn = max_pfn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) subsys_initcall(blk_settings_init);