Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) //#define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/virtio_blk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/string_helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/blk-mq-virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <uapi/linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #define PART_BITS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define VQ_NAME_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define MAX_DISCARD_SEGMENTS 256u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static int major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static DEFINE_IDA(vd_index_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static struct workqueue_struct *virtblk_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) struct virtio_blk_vq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	char name[VQ_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) } ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) struct virtio_blk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	 * This mutex must be held by anything that may run after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	 * virtblk_remove() sets vblk->vdev to NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	 * blk-mq, virtqueue processing, and sysfs attribute code paths are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	 * shut down before vblk->vdev is set to NULL and therefore do not need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	 * to hold this mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct mutex vdev_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	/* The disk structure for the kernel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	/* Block layer tags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	/* Process context for config space updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct work_struct config_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	 * Tracks references from block_device_operations open/release and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	 * virtio_driver probe/remove so this object can be freed once no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	 * longer in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	/* What host tells us, plus 2 for header & tailer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	unsigned int sg_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	/* Ida index - used to track minor number allocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	/* num of vqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	int num_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct virtio_blk_vq *vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) struct virtblk_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct virtio_blk_outhdr out_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct scatterlist sg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	switch (vbr->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	case VIRTIO_BLK_S_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	case VIRTIO_BLK_S_UNSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return BLK_STS_NOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		struct scatterlist *data_sg, bool have_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct scatterlist hdr, status, *sgs[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	unsigned int num_out = 0, num_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	sgs[num_out++] = &hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (have_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			sgs[num_out++] = data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			sgs[num_out + num_in++] = data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	sgs[num_out + num_in++] = &status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	unsigned short segments = blk_rq_nr_discard_segments(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	unsigned short n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct virtio_blk_discard_write_zeroes *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	if (unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (!range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 * Single max discard segment means multi-range discard isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	 * supported, and block layer only runs contiguity merge like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 * normal RW request. So we can't reply on bio for retrieving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * each range info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	if (queue_max_discard_segments(req->q) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		range[0].flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		range[0].sector = cpu_to_le64(blk_rq_pos(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		__rq_for_each_bio(bio, req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			u64 sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			range[n].flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			range[n].num_sectors = cpu_to_le32(num_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			range[n].sector = cpu_to_le64(sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	WARN_ON_ONCE(n != segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	req->special_vec.bv_page = virt_to_page(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	req->special_vec.bv_offset = offset_in_page(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	req->special_vec.bv_len = sizeof(*range) * segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static inline void virtblk_request_done(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		kfree(page_address(req->special_vec.bv_page) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		      req->special_vec.bv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	blk_mq_end_request(req, virtblk_result(vbr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static void virtblk_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	struct virtio_blk *vblk = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	bool req_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	int qid = vq->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct virtblk_req *vbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 			struct request *req = blk_mq_rq_from_pdu(vbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			if (likely(!blk_should_fake_timeout(req->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			req_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		if (unlikely(virtqueue_is_broken(vq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	} while (!virtqueue_enable_cb(vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	/* In case queue is stopped waiting for more buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (req_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	struct virtio_blk *vblk = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	bool kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	spin_lock_irq(&vq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	kick = virtqueue_kick_prepare(vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	spin_unlock_irq(&vq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		virtqueue_notify(vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			   const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct virtio_blk *vblk = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct request *req = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	int qid = hctx->queue_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	bool notify = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	bool unmap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	switch (req_op(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	case REQ_OP_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		type = VIRTIO_BLK_T_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		type = VIRTIO_BLK_T_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		type = VIRTIO_BLK_T_WRITE_ZEROES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		unmap = !(req->cmd_flags & REQ_NOUNMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	case REQ_OP_DRV_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		type = VIRTIO_BLK_T_GET_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	BUG_ON(type != VIRTIO_BLK_T_DISCARD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	       type != VIRTIO_BLK_T_WRITE_ZEROES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	       (req->nr_phys_segments + 2 > vblk->sg_elems));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	vbr->out_hdr.sector = type ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		err = virtblk_setup_discard_write_zeroes(req, unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		if (rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		virtqueue_kick(vblk->vqs[qid].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		/* Don't stop the queue if -ENOMEM: we may have failed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		 * bounce the buffer due to global resource outage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		if (err == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			blk_mq_stop_hw_queue(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		case -ENOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		notify = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		virtqueue_notify(vblk->vqs[qid].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /* return id (s/n) string for *disk to *id_str
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static int virtblk_get_id(struct gendisk *disk, char *id_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	struct virtio_blk *vblk = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	struct request_queue *q = vblk->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	req = blk_get_request(q, REQ_OP_DRV_IN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		return PTR_ERR(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	blk_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static void virtblk_get(struct virtio_blk *vblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	refcount_inc(&vblk->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static void virtblk_put(struct virtio_blk *vblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (refcount_dec_and_test(&vblk->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		ida_simple_remove(&vd_index_ida, vblk->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		mutex_destroy(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		kfree(vblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static int virtblk_open(struct block_device *bd, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct virtio_blk *vblk = bd->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	mutex_lock(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (vblk->vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		virtblk_get(vblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	mutex_unlock(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static void virtblk_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct virtio_blk *vblk = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	virtblk_put(vblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) /* We provide getgeo only to please some old bootloader/partitioning tools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct virtio_blk *vblk = bd->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	mutex_lock(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (!vblk->vdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/* see if the host passed in geometry config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		virtio_cread(vblk->vdev, struct virtio_blk_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			     geometry.cylinders, &geo->cylinders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		virtio_cread(vblk->vdev, struct virtio_blk_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			     geometry.heads, &geo->heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		virtio_cread(vblk->vdev, struct virtio_blk_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			     geometry.sectors, &geo->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		/* some standard values, similar to sd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		geo->heads = 1 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		geo->sectors = 1 << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	mutex_unlock(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static const struct block_device_operations virtblk_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	.owner  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	.open = virtblk_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	.release = virtblk_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	.getgeo = virtblk_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static int index_to_minor(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	return index << PART_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static int minor_to_index(int minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	return minor >> PART_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static ssize_t serial_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	/* sysfs gives us a PAGE_SIZE buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	buf[VIRTIO_BLK_ID_BYTES] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	err = virtblk_get_id(disk, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (err == -EIO) /* Unsupported? Make it empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) static DEVICE_ATTR_RO(serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) /* The queue's logical block size must be set before calling this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	struct virtio_device *vdev = vblk->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct request_queue *q = vblk->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	char cap_str_2[10], cap_str_10[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	unsigned long long nblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	u64 capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	/* Host must always specify the capacity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* If capacity is too big, truncate with warning. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if ((sector_t)capacity != capacity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			 (unsigned long long)capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		capacity = (sector_t)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	string_get_size(nblocks, queue_logical_block_size(q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	string_get_size(nblocks, queue_logical_block_size(q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	dev_notice(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		   "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		   vblk->disk->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		   resize ? "new size: " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		   nblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		   queue_logical_block_size(q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		   cap_str_10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		   cap_str_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static void virtblk_config_changed_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct virtio_blk *vblk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		container_of(work, struct virtio_blk, config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	virtblk_update_capacity(vblk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) static void virtblk_config_changed(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	struct virtio_blk *vblk = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	queue_work(virtblk_wq, &vblk->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static int init_vq(struct virtio_blk *vblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	vq_callback_t **callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	const char **names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	unsigned short num_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct virtio_device *vdev = vblk->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	struct irq_affinity desc = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 				   struct virtio_blk_config, num_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 				   &num_vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		num_vqs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (!vblk->vqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (!names || !callbacks || !vqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	for (i = 0; i < num_vqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		callbacks[i] = virtblk_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		names[i] = vblk->vqs[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* Discover virtqueues and write information to configuration.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	for (i = 0; i < num_vqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		spin_lock_init(&vblk->vqs[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		vblk->vqs[i].vq = vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	vblk->num_vqs = num_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	kfree(callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	kfree(names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		kfree(vblk->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * Legacy naming scheme used for virtio devices.  We are stuck with it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * virtio blk but don't ever use it for any new driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	const int base = 'z' - 'a' + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	char *begin = buf + strlen(prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	char *end = buf + buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	int unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	p = end - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	*p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	unit = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		if (p == begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		*--p = 'a' + (index % unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		index = (index / unit) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	} while (index >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	memmove(begin, p, end - p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	memcpy(buf, prefix, strlen(prefix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static int virtblk_get_cache_mode(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	u8 writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				   struct virtio_blk_config, wce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 				   &writeback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 * If WCE is not configurable and flush is not available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 * assume no writeback cache is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return writeback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static void virtblk_update_cache_mode(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	u8 writeback = virtblk_get_cache_mode(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct virtio_blk *vblk = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	blk_queue_write_cache(vblk->disk->queue, writeback, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	revalidate_disk_size(vblk->disk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static const char *const virtblk_cache_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	"write through", "write back"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) cache_type_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		 const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct virtio_blk *vblk = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct virtio_device *vdev = vblk->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	i = sysfs_match_string(virtblk_cache_types, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (i < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	virtblk_update_cache_mode(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	struct virtio_blk *vblk = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	u8 writeback = virtblk_get_cache_mode(vblk->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static DEVICE_ATTR_RW(cache_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static struct attribute *virtblk_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	&dev_attr_serial.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	&dev_attr_cache_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		struct attribute *a, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct gendisk *disk = dev_to_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct virtio_blk *vblk = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct virtio_device *vdev = vblk->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (a == &dev_attr_cache_type.attr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	    !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return S_IRUGO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	return a->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) static const struct attribute_group virtblk_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	.attrs = virtblk_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	.is_visible = virtblk_attrs_are_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static const struct attribute_group *virtblk_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	&virtblk_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		unsigned int hctx_idx, unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct virtio_blk *vblk = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	sg_init_table(vbr->sg, vblk->sg_elems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) static int virtblk_map_queues(struct blk_mq_tag_set *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	struct virtio_blk *vblk = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 					vblk->vdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) static const struct blk_mq_ops virtio_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	.queue_rq	= virtio_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	.commit_rqs	= virtio_commit_rqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	.complete	= virtblk_request_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	.init_request	= virtblk_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	.map_queues	= virtblk_map_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static unsigned int virtblk_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static int virtblk_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct virtio_blk *vblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	int err, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	u32 v, blk_size, max_size, sg_elems, opt_io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	u16 min_io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	u8 physical_block_exp, alignment_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (!vdev->config->get) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	index = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/* We need to know how many segments before we allocate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				   struct virtio_blk_config, seg_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				   &sg_elems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/* We need at least one SG element, whatever they say. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (err || !sg_elems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		sg_elems = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	/* We need an extra sg elements at head and tail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	sg_elems += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (!vblk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		goto out_free_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* This reference is dropped in virtblk_remove(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	refcount_set(&vblk->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	mutex_init(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	vblk->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	vblk->sg_elems = sg_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	err = init_vq(vblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		goto out_free_vblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/* FIXME: How many partitions?  How long is a piece of string? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	vblk->disk = alloc_disk(1 << PART_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	if (!vblk->disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		goto out_free_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/* Default queue sizing is to fill the ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (!virtblk_queue_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		/* ... but without indirect descs, we use 2 descs per req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			virtblk_queue_depth /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	vblk->tag_set.ops = &virtio_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	vblk->tag_set.queue_depth = virtblk_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	vblk->tag_set.numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	vblk->tag_set.cmd_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		sizeof(struct virtblk_req) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		sizeof(struct scatterlist) * sg_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	vblk->tag_set.driver_data = vblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	err = blk_mq_alloc_tag_set(&vblk->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		goto out_put_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	q = blk_mq_init_queue(&vblk->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (IS_ERR(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		goto out_free_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	vblk->disk->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	q->queuedata = vblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	vblk->disk->major = major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	vblk->disk->first_minor = index_to_minor(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	vblk->disk->private_data = vblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	vblk->disk->fops = &virtblk_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	vblk->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	/* configure queue flush support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	virtblk_update_cache_mode(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	/* If disk is read-only in the host, the guest should obey */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		set_disk_ro(vblk->disk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	/* We can handle whatever the host told us to handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	blk_queue_max_segments(q, vblk->sg_elems-2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	/* No real sector limit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	blk_queue_max_hw_sectors(q, -1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	max_size = virtio_max_dma_size(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/* Host can optionally specify maximum segment size and number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * segments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				   struct virtio_blk_config, size_max, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		max_size = min(max_size, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	blk_queue_max_segment_size(q, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	/* Host can optionally specify the block size of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 				   struct virtio_blk_config, blk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				   &blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		err = blk_validate_block_size(blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			dev_err(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				"virtio_blk: invalid block size: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			goto out_cleanup_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		blk_queue_logical_block_size(q, blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		blk_size = queue_logical_block_size(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/* Use topology information if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				   struct virtio_blk_config, physical_block_exp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				   &physical_block_exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (!err && physical_block_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		blk_queue_physical_block_size(q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				blk_size * (1 << physical_block_exp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 				   struct virtio_blk_config, alignment_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 				   &alignment_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (!err && alignment_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		blk_queue_alignment_offset(q, blk_size * alignment_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				   struct virtio_blk_config, min_io_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				   &min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (!err && min_io_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		blk_queue_io_min(q, blk_size * min_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 				   struct virtio_blk_config, opt_io_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				   &opt_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (!err && opt_io_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		blk_queue_io_opt(q, blk_size * opt_io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		q->limits.discard_granularity = blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		virtio_cread(vdev, struct virtio_blk_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			     discard_sector_alignment, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		virtio_cread(vdev, struct virtio_blk_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			     max_discard_sectors, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			     &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		 * max_discard_seg == 0 is out of spec but we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		 * handled it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		if (!v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			v = sg_elems - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		blk_queue_max_discard_segments(q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 					       min(v, MAX_DISCARD_SEGMENTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		virtio_cread(vdev, struct virtio_blk_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			     max_write_zeroes_sectors, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	virtblk_update_capacity(vblk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) out_cleanup_disk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	blk_cleanup_queue(vblk->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) out_free_tags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	blk_mq_free_tag_set(&vblk->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) out_put_disk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	put_disk(vblk->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) out_free_vq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	kfree(vblk->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) out_free_vblk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	kfree(vblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) out_free_index:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	ida_simple_remove(&vd_index_ida, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static void virtblk_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	struct virtio_blk *vblk = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	/* Make sure no work handler is accessing the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	flush_work(&vblk->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	del_gendisk(vblk->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	blk_cleanup_queue(vblk->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	blk_mq_free_tag_set(&vblk->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	mutex_lock(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	/* Stop all the virtqueues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	vblk->vdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	put_disk(vblk->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	kfree(vblk->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	mutex_unlock(&vblk->vdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	virtblk_put(vblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static int virtblk_freeze(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct virtio_blk *vblk = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/* Ensure we don't receive any more interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	/* Make sure no work handler is accessing the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	flush_work(&vblk->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	blk_mq_quiesce_queue(vblk->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	kfree(vblk->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) static int virtblk_restore(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct virtio_blk *vblk = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	ret = init_vq(vdev->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	blk_mq_unquiesce_queue(vblk->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) static const struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	{ 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) static unsigned int features_legacy[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static unsigned int features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static struct virtio_driver virtio_blk = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	.feature_table			= features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	.feature_table_size		= ARRAY_SIZE(features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	.feature_table_legacy		= features_legacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	.driver.name			= KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	.driver.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	.id_table			= id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	.probe				= virtblk_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	.remove				= virtblk_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	.config_changed			= virtblk_config_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	.freeze				= virtblk_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	.restore			= virtblk_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int __init init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (!virtblk_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	major = register_blkdev(0, "virtblk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (major < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		error = major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		goto out_destroy_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	error = register_virtio_driver(&virtio_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		goto out_unregister_blkdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) out_unregister_blkdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	unregister_blkdev(major, "virtblk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) out_destroy_workqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	destroy_workqueue(virtblk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void __exit fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	unregister_virtio_driver(&virtio_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	unregister_blkdev(major, "virtblk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	destroy_workqueue(virtblk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) module_init(init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) module_exit(fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) MODULE_DEVICE_TABLE(virtio, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) MODULE_DESCRIPTION("Virtio block driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) MODULE_LICENSE("GPL");