^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SCSI Zoned Block commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014-2015 SUSE Linux GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Written by: Hannes Reinecke <hare@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "sd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (zone->type == ZBC_ZONE_TYPE_CONV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) switch (zone->cond) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) case BLK_ZONE_COND_IMP_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) case BLK_ZONE_COND_EXP_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) case BLK_ZONE_COND_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return zone->wp - zone->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) case BLK_ZONE_COND_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return zone->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) case BLK_ZONE_COND_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) case BLK_ZONE_COND_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) case BLK_ZONE_COND_READONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Offline and read-only zones do not have a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * write pointer. Use 0 as for an empty zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int idx, report_zones_cb cb, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct blk_zone zone = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) zone.type = buf[0] & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) zone.cond = (buf[1] >> 4) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (buf[1] & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) zone.reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (buf[1] & 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) zone.non_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) zone.capacity = zone.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (zone.type != ZBC_ZONE_TYPE_CONV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) zone.cond == ZBC_ZONE_COND_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) zone.wp = zone.start + zone.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ret = cb(&zone, idx, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (sdkp->rev_wp_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @sdkp: The target disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @buf: vmalloc-ed buffer to use for the reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @buflen: the buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @lba: Start LBA of the report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @partial: Do partial report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * For internal use during device validation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Using partial=true can significantly speed up execution of a report zones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * command because the disk does not have to count all possible report matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * zones and will only report the count of zones fitting in the command reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int buflen, sector_t lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bool partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const int timeout = sdp->request_queue->rq_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned char cmd[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int rep_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) memset(cmd, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) cmd[0] = ZBC_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cmd[1] = ZI_REPORT_ZONES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) put_unaligned_be64(lba, &cmd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) put_unaligned_be32(buflen, &cmd[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) buf, buflen, &sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) timeout, SD_MAX_RETRIES, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sd_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) "REPORT ZONES start lba %llu failed\n", lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) sd_print_result(sdkp, "REPORT ZONES", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (driver_byte(result) == DRIVER_SENSE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) scsi_sense_valid(&sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) sd_print_sense_hdr(sdkp, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) rep_len = get_unaligned_be32(&buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (rep_len < 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sd_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) "REPORT ZONES report invalid length %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) rep_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Allocate a buffer for report zones reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @sdkp: The target disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @nr_zones: Maximum number of zones to report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @buflen: Size of the buffer allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Try to allocate a reply buffer for the number of requested zones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * The size of the buffer allocated may be smaller than requested to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * satify the device constraint (max_hw_sectors, max_segments, etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Return the address of the allocated buffer and update @buflen with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * the size of the allocated buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned int nr_zones, size_t *buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct request_queue *q = sdkp->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) size_t bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Report zone buffer size should be at most 64B times the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * zones requested plus the 64B reply header, but should be aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * to SECTOR_SIZE for ATA devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Make sure that this size does not exceed the hardware capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Furthermore, since the report zone command cannot be split, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * sure that the allocated buffer can always be mapped by limiting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * number of pages allocated to the HBA max segments limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) nr_zones = min(nr_zones, sdkp->nr_zones);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bufsize = min_t(size_t, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) queue_max_hw_sectors(q) << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) while (bufsize >= SECTOR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) buf = __vmalloc(bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *buflen = bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @sdkp: The target disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int nr_zones, report_zones_cb cb, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct scsi_disk *sdkp = scsi_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) size_t offset, buflen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int zone_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!sd_is_zoned(sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Not a zoned device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Device gone or invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) while (zone_idx < nr_zones && sector < capacity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) sectors_to_logical(sdkp->device, sector), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) for (i = 0; i < nr && zone_idx < nr_zones; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) offset += 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cb, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) zone_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) sector += sd_zbc_zone_sectors(sdkp) * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret = zone_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) sector_t sector = blk_rq_pos(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!sd_is_zoned(sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Not a zoned device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (sdkp->device->changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Unaligned request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define SD_ZBC_INVALID_WP_OFST (~0u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct scsi_disk *sdkp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) lockdep_assert_held(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct scsi_disk *sdkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int zno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) for (zno = 0; zno < sdkp->nr_zones; zno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) SD_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) zno * sdkp->zone_blocks, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) spin_lock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) zno, sd_zbc_update_wp_offset_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_unlock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) scsi_device_put(sdkp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @cmd: the command to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @lba: the LBA to patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @nr_blocks: the number of LBAs to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * patching of the lba for an emulated ZONE_APPEND command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * schedule a REPORT ZONES command and return BLK_STS_IOERR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned int nr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned int wp_offset, zno = blk_rq_zone_no(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ret = sd_zbc_cmnd_checks(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!blk_rq_zone_is_seq(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Unlock of the write lock will happen in sd_zbc_complete() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!blk_req_zone_write_trylock(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return BLK_STS_ZONE_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spin_lock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) wp_offset = sdkp->zones_wp_offset[zno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) switch (wp_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) case SD_ZBC_INVALID_WP_OFST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * We are about to schedule work to update a zone write pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * offset, which will cause the zone append command to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * requeued. So make sure that the scsi device does not go away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * while the work is being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (scsi_device_get(sdkp->device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ret = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) schedule_work(&sdkp->zone_wp_offset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) case SD_ZBC_UPDATING_WP_OFST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ret = BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) wp_offset = sectors_to_logical(sdkp->device, wp_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (wp_offset + nr_blocks > sdkp->zone_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ret = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) *lba += wp_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) blk_req_zone_write_unlock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @cmd: the command to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @op: Operation to be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @all: All zones control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned char op, bool all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sector_t sector = blk_rq_pos(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) sector_t block = sectors_to_logical(sdkp->device, sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ret = sd_zbc_cmnd_checks(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) cmd->cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) memset(cmd->cmnd, 0, cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cmd->cmnd[0] = ZBC_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cmd->cmnd[1] = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) cmd->cmnd[14] = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) put_unaligned_be64(block, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rq->timeout = SD_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) cmd->sc_data_direction = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) cmd->transfersize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) cmd->allowed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static bool sd_zbc_need_zone_wp_update(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) switch (req_op(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) case REQ_OP_ZONE_APPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) case REQ_OP_ZONE_FINISH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) case REQ_OP_ZONE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case REQ_OP_ZONE_RESET_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return blk_rq_zone_is_seq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @cmd: Completed command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * @good_bytes: Command reply bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Called from sd_zbc_complete() to handle the update of the cached zone write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * pointer value in case an update is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned int good_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int result = cmd->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned int zno = blk_rq_zone_no(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) enum req_opf op = req_op(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * If we got an error for a command that needs updating the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * pointer offset cache, we must mark the zone wp offset entry as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * invalid to force an update from disk the next time a zone append
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * command is issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_lock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (result && op != REQ_OP_ZONE_RESET_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (op == REQ_OP_ZONE_APPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Force complete completion (no retry) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) good_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) scsi_set_resid(cmd, blk_rq_bytes(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Force an update of the zone write pointer offset on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * the next zone append access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto unlock_wp_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) case REQ_OP_ZONE_APPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) rq->__sector += sdkp->zones_wp_offset[zno];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) sdkp->zones_wp_offset[zno] +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) good_bytes >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) case REQ_OP_ZONE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sdkp->zones_wp_offset[zno] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) case REQ_OP_ZONE_FINISH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case REQ_OP_ZONE_RESET_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) memset(sdkp->zones_wp_offset, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sdkp->nr_zones * sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unlock_wp_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) spin_unlock_bh(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return good_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * sd_zbc_complete - ZBC command post processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @cmd: Completed command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @good_bytes: Command reply bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @sshdr: command sense header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Called from sd_done() to handle zone commands errors and updates to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * device queue zone write pointer offset cahce.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int result = cmd->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (op_is_zone_mgmt(req_op(rq)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) result &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sshdr->sense_key == ILLEGAL_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sshdr->asc == 0x24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * INVALID FIELD IN CDB error: a zone management command was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * attempted on a conventional zone. Nothing to worry about,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * so be quiet about the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) rq->rq_flags |= RQF_QUIET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) } else if (sd_zbc_need_zone_wp_update(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (req_op(rq) == REQ_OP_ZONE_APPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) blk_req_zone_write_unlock(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return good_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * @sdkp: Target disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @buf: Buffer where to store the VPD page data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * Read VPD page B6, get information and check that reads are unconstrained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) "Read zoned characteristics VPD page failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (sdkp->device->type != TYPE_ZBC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Host-aware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) sdkp->urswrz = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) sdkp->zones_max_open = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Host-managed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sdkp->urswrz = buf[4] & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) sdkp->zones_optimal_open = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) sdkp->zones_optimal_nonseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * Check for unconstrained reads: host-managed devices with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * constrained reads (drives failing read after write pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * are not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!sdkp->urswrz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (sdkp->first_scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) "constrained reads devices are not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * sd_zbc_check_capacity - Check the device capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * @sdkp: Target disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * @buf: command buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * @zblocks: zone size in number of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Get the device zone size and check that the device capacity as reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * by READ CAPACITY matches the max_lba value (plus one) of the report zones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * command reply for devices with RC_BASIS == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Returns 0 upon success or an error code upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u32 *zblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u64 zone_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) sector_t max_lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned char *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Do a report zone to get max_lba and the size of the first zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (sdkp->rc_basis == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* The max_lba field is the capacity of this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) max_lba = get_unaligned_be64(&buf[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (sdkp->capacity != max_lba + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (sdkp->first_scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) sd_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) "Changing capacity from %llu to max LBA+1 %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) (unsigned long long)sdkp->capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) (unsigned long long)max_lba + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sdkp->capacity = max_lba + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Get the size of the first reported zone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rec = buf + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) zone_blocks = get_unaligned_be64(&rec[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (sdkp->first_scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) "Zone size too large\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *zblocks = zone_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static void sd_zbc_print_zones(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!sd_is_zoned(sdkp) || !sdkp->capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (sdkp->capacity & (sdkp->zone_blocks - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) "%u zones of %u logical blocks + 1 runt zone\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) sdkp->nr_zones - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sdkp->zone_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) "%u zones of %u logical blocks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) sdkp->nr_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) sdkp->zone_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static int sd_zbc_init_disk(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sdkp->zones_wp_offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) spin_lock_init(&sdkp->zones_wp_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) sdkp->rev_wp_offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) mutex_init(&sdkp->rev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!sdkp->zone_wp_update_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) void sd_zbc_release_disk(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) kvfree(sdkp->zones_wp_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) sdkp->zones_wp_offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) kfree(sdkp->zone_wp_update_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) sdkp->zone_wp_update_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct scsi_disk *sdkp = scsi_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct gendisk *disk = sdkp->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct request_queue *q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u32 zone_blocks = sdkp->rev_zone_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) unsigned int nr_zones = sdkp->rev_nr_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) u32 max_append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * For all zoned disks, initialize zone append emulation data if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * already done. This is necessary also for host-aware disks used as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * regular disks due to the presence of partitions as these partitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * may be deleted and the disk zoned model changed back from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * BLK_ZONED_NONE to BLK_ZONED_HA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ret = sd_zbc_init_disk(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * There is nothing to do for regular disks, including host-aware disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * that have partitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!blk_queue_is_zoned(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Make sure revalidate zones are serialized to ensure exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * updates of the scsi disk data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mutex_lock(&sdkp->rev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (sdkp->zone_blocks == zone_blocks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) sdkp->nr_zones == nr_zones &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) disk->queue->nr_zones == nr_zones)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) flags = memalloc_noio_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) sdkp->zone_blocks = zone_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) sdkp->nr_zones = nr_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!sdkp->rev_wp_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) memalloc_noio_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) memalloc_noio_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) kvfree(sdkp->rev_wp_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sdkp->rev_wp_offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) sdkp->zone_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sdkp->nr_zones = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) q->limits.max_segments << (PAGE_SHIFT - 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) blk_queue_max_zone_append_sectors(q, max_append);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sd_zbc_print_zones(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mutex_unlock(&sdkp->rev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct gendisk *disk = sdkp->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct request_queue *q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int nr_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u32 zone_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!sd_is_zoned(sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Device managed or normal SCSI disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * no special handling required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Check zoned block device characteristics (unconstrained reads) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* Check the device capacity reported by report zones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* The drive satisfies the kernel restrictions: set it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (sdkp->zones_max_open == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) blk_queue_max_open_zones(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) blk_queue_max_open_zones(q, sdkp->zones_max_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) blk_queue_max_active_zones(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* READ16/WRITE16 is mandatory for ZBC disks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) sdkp->device->use_16_for_rw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) sdkp->device->use_10_for_rw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) sdkp->rev_nr_zones = nr_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) sdkp->rev_zone_blocks = zone_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }