Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * blkfront.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * XenLinux virtual block device driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (c) 2004, Christian Limpach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (c) 2004, Andrew Warfield
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Copyright (c) 2005, Christopher Clark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Copyright (c) 2005, XenSource Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/cdrom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <xen/platform_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <xen/interface/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <xen/interface/io/blkif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <xen/interface/io/protocols.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * The minimal size of segment supported by the block framework is PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * When Linux is using a different page size than Xen, it may not be possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * to put all the data in a single segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * This can happen when the backend doesn't support indirect descriptor and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * therefore the maximum amount of data that a request can carry is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * Note that we only support one extra request. So the Linux page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * 88KB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) enum blkif_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	BLKIF_STATE_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	BLKIF_STATE_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	BLKIF_STATE_SUSPENDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	BLKIF_STATE_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) struct grant {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	grant_ref_t gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) enum blk_req_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	REQ_PROCESSING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	REQ_WAITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	REQ_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	REQ_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	REQ_EOPNOTSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) struct blk_shadow {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct blkif_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct grant **grants_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct grant **indirect_grants;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	unsigned int num_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	enum blk_req_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	#define NO_ASSOCIATED_ID ~0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	 * Id of the sibling if we ever need 2 requests when handling a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	 * block I/O request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	unsigned long associated_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct blkif_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	blk_status_t	error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) static inline struct blkif_req *blkif_req(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	return blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static DEFINE_MUTEX(blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static const struct block_device_operations xlvbd_block_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static struct delayed_work blkfront_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static LIST_HEAD(info_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * Maximum number of segments in indirect requests, the actual value used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * the frontend driver is the minimum of this value and the value provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * by the backend driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static unsigned int xen_blkif_max_segments = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) MODULE_PARM_DESC(max_indirect_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		 "Maximum amount of segments in indirect requests (default is 32)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static unsigned int xen_blkif_max_queues = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * Maximum order of pages to be used for the shared ring between front and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * backend, 4KB page granularity is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static unsigned int xen_blkif_max_ring_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define BLK_RING_SIZE(info)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * characters are enough. Define to 20 to keep consistent with backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define RINGREF_NAME_LEN (20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define QUEUE_NAME_LEN (17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  *  Per-ring info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  *  Every blkfront device can associate with one or more blkfront_ring_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  *  depending on how many hardware queues/rings to be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) struct blkfront_ring_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	/* Lock to protect data in every ring buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	spinlock_t ring_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct blkif_front_ring ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	unsigned int evtchn, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct gnttab_free_callback callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	struct list_head indirect_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	struct list_head grants;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	unsigned int persistent_gnts_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	unsigned long shadow_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	struct blkfront_info *dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct blk_shadow shadow[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * We have one of these per vbd, whether ide, scsi or 'other'.  They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * hang in private_data off the gendisk structure. We may end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * putting all kinds of interesting stuff here :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) struct blkfront_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct xenbus_device *xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct gendisk *gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	u16 sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	unsigned int physical_sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	int vdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	blkif_vdev_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	enum blkif_state connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/* Number of pages per ring buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	unsigned int nr_ring_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	struct request_queue *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	unsigned int feature_flush:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	unsigned int feature_fua:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	unsigned int feature_discard:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	unsigned int feature_secdiscard:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	unsigned int feature_persistent:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	unsigned int discard_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	unsigned int discard_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	/* Number of 4KB segments handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	unsigned int max_indirect_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	int is_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	unsigned int nr_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	unsigned int rinfo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/* Save uncomplete reqs and bios for migration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct list_head requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct bio_list bio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	struct list_head info_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static unsigned int nr_minors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static unsigned long *minors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static DEFINE_SPINLOCK(minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define GRANT_INVALID_REF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define PARTS_PER_DISK		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #define PARTS_PER_EXT_DISK      256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define BLKIF_MAJOR(dev) ((dev)>>8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define BLKIF_MINOR(dev) ((dev) & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) #define EXT_SHIFT 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define EXTENDED (1<<EXT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define EMULATED_HD_DISK_MINOR_OFFSET (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) #define EMULATED_SD_DISK_MINOR_OFFSET (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define DEV_NAME	"xvd"	/* name in /dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * Grants are always the same size as a Xen page (i.e 4KB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * A physical segment is always the same size as a Linux page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * Number of grants per physical segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #define GRANTS_PER_PSEG	(PAGE_SIZE / XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #define GRANTS_PER_INDIRECT_FRAME \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #define INDIRECT_GREFS(_grants)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void blkfront_gather_backend_features(struct blkfront_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static int negotiate_mq(struct blkfront_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) #define for_each_rinfo(info, ptr, idx)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	for ((ptr) = (info)->rinfo, (idx) = 0;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	     (idx) < (info)->nr_rings;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	     (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static inline struct blkfront_ring_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) get_rinfo(const struct blkfront_info *info, unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	BUG_ON(i >= info->nr_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	return (void *)info->rinfo + i * info->rinfo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	unsigned long free = rinfo->shadow_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	return free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			      unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (rinfo->shadow[id].req.u.rw.id != id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (rinfo->shadow[id].request == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	rinfo->shadow[id].request = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	rinfo->shadow_free = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	struct page *granted_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct grant *gnt_list_entry, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	while (i < num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (!gnt_list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		if (info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			granted_page = alloc_page(GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			if (!granted_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 				kfree(gnt_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 				goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			gnt_list_entry->page = granted_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		gnt_list_entry->gref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		list_add(&gnt_list_entry->node, &rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	list_for_each_entry_safe(gnt_list_entry, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	                         &rinfo->grants, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		list_del(&gnt_list_entry->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (info->feature_persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			__free_page(gnt_list_entry->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		kfree(gnt_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	BUG_ON(i != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	struct grant *gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	BUG_ON(list_empty(&rinfo->grants));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 					  node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	list_del(&gnt_list_entry->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (gnt_list_entry->gref != GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		rinfo->persistent_gnts_c--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static inline void grant_foreign_access(const struct grant *gnt_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 					const struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 						 info->xbdev->otherend_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 						 gnt_list_entry->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 						 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static struct grant *get_grant(grant_ref_t *gref_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			       unsigned long gfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			       struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct grant *gnt_list_entry = get_free_grant(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (gnt_list_entry->gref != GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		return gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	/* Assign a gref to this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	BUG_ON(gnt_list_entry->gref == -ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (info->feature_persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		grant_foreign_access(gnt_list_entry, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		/* Grant access to the GFN passed by the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 						info->xbdev->otherend_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 						gfn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	return gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static struct grant *get_indirect_grant(grant_ref_t *gref_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 					struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct grant *gnt_list_entry = get_free_grant(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (gnt_list_entry->gref != GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		return gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/* Assign a gref to this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	BUG_ON(gnt_list_entry->gref == -ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (!info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		struct page *indirect_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		/* Fetch a pre-allocated page to use for indirect grefs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		BUG_ON(list_empty(&rinfo->indirect_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		indirect_page = list_first_entry(&rinfo->indirect_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 						 struct page, lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		list_del(&indirect_page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		gnt_list_entry->page = indirect_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	grant_foreign_access(gnt_list_entry, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	return gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static const char *op_name(int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	static const char *const names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		[BLKIF_OP_READ] = "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		[BLKIF_OP_WRITE] = "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		[BLKIF_OP_WRITE_BARRIER] = "barrier",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		[BLKIF_OP_FLUSH_DISKCACHE] = "flush",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		[BLKIF_OP_DISCARD] = "discard" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (op < 0 || op >= ARRAY_SIZE(names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (!names[op])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		return "reserved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	return names[op];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	unsigned int end = minor + nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	if (end > nr_minors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		unsigned long *bitmap, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		if (bitmap == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		spin_lock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		if (end > nr_minors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			old = minors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			memcpy(bitmap, minors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			       BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			minors = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			old = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		spin_unlock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		kfree(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	spin_lock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (find_next_bit(minors, end, minor) >= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		bitmap_set(minors, minor, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	spin_unlock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) static void xlbd_release_minors(unsigned int minor, unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	unsigned int end = minor + nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	BUG_ON(end > nr_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	spin_lock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	bitmap_clear(minors,  minor, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	spin_unlock(&minor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static void blkif_restart_queue_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	schedule_work(&rinfo->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	/* We don't have real geometry info, but let's at least return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	   values consistent with the size of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	sector_t nsect = get_capacity(bd->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	sector_t cylinders = nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	hg->heads = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	hg->sectors = 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	sector_div(cylinders, hg->heads * hg->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	hg->cylinders = cylinders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		hg->cylinders = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		       unsigned command, unsigned long argument)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct blkfront_info *info = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		command, (long)argument);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	switch (command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	case CDROMMULTISESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			if (put_user(0, (char __user *)(argument + i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	case CDROM_GET_CAPABILITY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		struct gendisk *gd = info->gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (gd->flags & GENHD_FL_CD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		  command);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return -EINVAL; /* same return as native Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 					    struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 					    struct blkif_request **ring_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	rinfo->ring.req_prod_pvt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	id = get_id_from_freelist(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	rinfo->shadow[id].request = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	rinfo->shadow[id].status = REQ_PROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	rinfo->shadow[id].req.u.rw.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct blkif_request *ring_req, *final_ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	/* Fill out a communications ring structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	id = blkif_ring_get_request(rinfo, req, &final_ring_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	ring_req = &rinfo->shadow[id].req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	ring_req->operation = BLKIF_OP_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	ring_req->u.discard.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		ring_req->u.discard.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	/* Copy the request to the ring page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	*final_ring_req = *ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	rinfo->shadow[id].status = REQ_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) struct setup_rw_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	unsigned int grant_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct blkif_request_segment *segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct blkif_request *ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	grant_ref_t gref_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/* Only used when persistent grant is used and it's a read request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	bool need_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	unsigned int bvec_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	char *bvec_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	bool require_extra_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct blkif_request *extra_ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 				     unsigned int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct setup_rw_req *setup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	int n, ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct grant *gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	unsigned int fsect, lsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	/* Convenient aliases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	unsigned int grant_idx = setup->grant_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	struct blkif_request *ring_req = setup->ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	struct blkfront_ring_info *rinfo = setup->rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 * We always use the shadow of the first request to store the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	 * of grant associated to the block I/O request. This made the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	 * completion more easy to handle even if the block I/O request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct blk_shadow *shadow = &rinfo->shadow[setup->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (unlikely(setup->require_extra_req &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		     grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		 * We are using the second request, setup grant_idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		 * to be the index of the segment array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		ring_req = setup->extra_ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	    (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		if (setup->segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			kunmap_atomic(setup->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		shadow->indirect_grants[n] = gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		setup->segments = kmap_atomic(gnt_list_entry->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	ref = gnt_list_entry->gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	 * All the grants are stored in the shadow of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * request. Therefore we have to use the global index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	shadow->grants_used[setup->grant_idx] = gnt_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (setup->need_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		void *shared_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		shared_data = kmap_atomic(gnt_list_entry->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 * this does not wipe data stored outside the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 * range sg->offset..sg->offset+sg->length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 * Therefore, blkback *could* see data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 * previous requests. This is OK as long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		 * persistent grants are shared with just one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		 * domain. It may need refactoring if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 * changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		memcpy(shared_data + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		       setup->bvec_data + setup->bvec_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		       len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		kunmap_atomic(shared_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		setup->bvec_off += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	fsect = offset >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	lsect = fsect + (len >> 9) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (ring_req->operation != BLKIF_OP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		ring_req->u.rw.seg[grant_idx] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			(struct blkif_request_segment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				.gref       = ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				.first_sect = fsect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				.last_sect  = lsect };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			(struct blkif_request_segment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				.gref       = ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				.first_sect = fsect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				.last_sect  = lsect };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	(setup->grant_idx)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void blkif_setup_extra_req(struct blkif_request *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				  struct blkif_request *second)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	uint16_t nr_segments = first->u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	 * The second request is only present when the first request uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	 * all its segments. It's always the continuity of the first one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	second->u.rw.sector_number = first->u.rw.sector_number +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		(BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	second->u.rw.handle = first->u.rw.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	second->operation = first->operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	struct blkif_request *ring_req, *extra_ring_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	unsigned long id, extra_id = NO_ASSOCIATED_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	bool require_extra_req = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct setup_rw_req setup = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		.grant_idx = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		.segments = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		.rinfo = rinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		.need_copy = rq_data_dir(req) && info->feature_persistent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 * Used to store if we are able to queue the request by just using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * existing persistent grants, or if we have to get new grants,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * as there are not sufficiently many free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	bool new_persistent_gnts = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	int num_sg, max_grefs, num_grant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		 * If we are using indirect segments we need to account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		 * for the indirect grefs used in the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		max_grefs += INDIRECT_GREFS(max_grefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/* Check if we have enough persistent grants to allocate a requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (rinfo->persistent_gnts_c < max_grefs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		new_persistent_gnts = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		if (gnttab_alloc_grant_references(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		    max_grefs - rinfo->persistent_gnts_c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		    &setup.gref_head) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			gnttab_request_free_callback(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 				&rinfo->callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 				blkif_restart_queue_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 				rinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				max_grefs - rinfo->persistent_gnts_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	/* Fill out a communications ring structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	id = blkif_ring_get_request(rinfo, req, &final_ring_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	ring_req = &rinfo->shadow[id].req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	num_grant = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* Calculate the number of grant used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	       num_grant += gnttab_count_grant(sg->offset, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	require_extra_req = info->max_indirect_segments == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	rinfo->shadow[id].num_sg = num_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	    likely(!require_extra_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		 * The indirect operation can only be a BLKIF_OP_READ or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 * BLKIF_OP_WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		ring_req->operation = BLKIF_OP_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			BLKIF_OP_WRITE : BLKIF_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		ring_req->u.indirect.handle = info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		ring_req->u.indirect.nr_segments = num_grant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		ring_req->u.rw.handle = info->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		ring_req->operation = rq_data_dir(req) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			BLKIF_OP_WRITE : BLKIF_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			 * Ideally we can do an unordered flush-to-disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			 * In case the backend onlysupports barriers, use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			 * A barrier request a superset of FUA, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			 * implement it the same way.  (It's also a FLUSH+FUA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			 * since it is guaranteed ordered WRT previous writes.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			if (info->feature_flush && info->feature_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				ring_req->operation =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 					BLKIF_OP_WRITE_BARRIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			else if (info->feature_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				ring_req->operation =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 					BLKIF_OP_FLUSH_DISKCACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 				ring_req->operation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		ring_req->u.rw.nr_segments = num_grant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		if (unlikely(require_extra_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			extra_id = blkif_ring_get_request(rinfo, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 							  &final_extra_ring_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			extra_ring_req = &rinfo->shadow[extra_id].req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			 * Only the first request contains the scatter-gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			 * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			rinfo->shadow[extra_id].num_sg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			blkif_setup_extra_req(ring_req, extra_ring_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			/* Link the 2 requests together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			rinfo->shadow[extra_id].associated_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			rinfo->shadow[id].associated_id = extra_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	setup.ring_req = ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	setup.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	setup.require_extra_req = require_extra_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (unlikely(require_extra_req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		setup.extra_ring_req = extra_ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		BUG_ON(sg->offset + sg->length > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		if (setup.need_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			setup.bvec_off = sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			setup.bvec_data = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		gnttab_foreach_grant_in_range(sg_page(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 					      sg->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 					      sg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 					      blkif_setup_rw_req_grant,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 					      &setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		if (setup.need_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			kunmap_atomic(setup.bvec_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (setup.segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		kunmap_atomic(setup.segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* Copy request(s) to the ring page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	*final_ring_req = *ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	rinfo->shadow[id].status = REQ_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (unlikely(require_extra_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		*final_extra_ring_req = *extra_ring_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		rinfo->shadow[extra_id].status = REQ_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (new_persistent_gnts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		gnttab_free_grant_references(setup.gref_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * Generate a Xen blkfront IO request from a blk layer request.  Reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * and writes are handled as expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * @req: a request struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (unlikely(req_op(req) == REQ_OP_DISCARD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		     req_op(req) == REQ_OP_SECURE_ERASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		return blkif_queue_discard_req(req, rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		return blkif_queue_rw_req(req, rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) static inline void flush_requests(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		notify_remote_via_irq(rinfo->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) static inline bool blkif_request_flush_invalid(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 					       struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	return (blk_rq_is_passthrough(req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		((req_op(req) == REQ_OP_FLUSH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		 !info->feature_flush) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		((req->cmd_flags & REQ_FUA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		 !info->feature_fua));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			  const struct blk_mq_queue_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	int qid = hctx->queue_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct blkfront_info *info = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct blkfront_ring_info *rinfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	rinfo = get_rinfo(info, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	blk_mq_start_request(qd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	spin_lock_irqsave(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (RING_FULL(&rinfo->ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		goto out_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (blkif_queue_request(qd->rq, rinfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		goto out_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	flush_requests(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) out_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	blk_mq_stop_hw_queue(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) static void blkif_complete_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	blk_mq_end_request(rq, blkif_req(rq)->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static const struct blk_mq_ops blkfront_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	.queue_rq = blkif_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	.complete = blkif_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static void blkif_set_queue_limits(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	struct request_queue *rq = info->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	struct gendisk *gd = info->gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	unsigned int segments = info->max_indirect_segments ? :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (info->feature_discard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		blk_queue_max_discard_sectors(rq, get_capacity(gd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		rq->limits.discard_granularity = info->discard_granularity ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 						 info->physical_sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		rq->limits.discard_alignment = info->discard_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		if (info->feature_secdiscard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	/* Hard sector size and max sectors impersonate the equiv. hardware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	blk_queue_logical_block_size(rq, info->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	blk_queue_physical_block_size(rq, info->physical_sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	/* Each segment in a request is up to an aligned page in size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	blk_queue_max_segment_size(rq, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* Ensure a merged request will fit in a single I/O ring slot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	/* Make sure buffer addresses are sector-aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	blk_queue_dma_alignment(rq, 511);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				unsigned int physical_sector_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct request_queue *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct blkfront_info *info = gd->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	memset(&info->tag_set, 0, sizeof(info->tag_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	info->tag_set.ops = &blkfront_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	info->tag_set.nr_hw_queues = info->nr_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		 * When indirect descriptior is not supported, the I/O request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		 * will be split between multiple request in the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		 * To avoid problems when sending the request, divide by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		 * 2 the depth of the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		info->tag_set.queue_depth =  BLK_RING_SIZE(info) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		info->tag_set.queue_depth = BLK_RING_SIZE(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	info->tag_set.numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	info->tag_set.cmd_size = sizeof(struct blkif_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	info->tag_set.driver_data = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (blk_mq_alloc_tag_set(&info->tag_set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	rq = blk_mq_init_queue(&info->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		blk_mq_free_tag_set(&info->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		return PTR_ERR(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	rq->queuedata = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	info->rq = gd->queue = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	info->gd = gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	info->sector_size = sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	info->physical_sector_size = physical_sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	blkif_set_queue_limits(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static const char *flush_info(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (info->feature_flush && info->feature_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		return "barrier: enabled;";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	else if (info->feature_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		return "flush diskcache: enabled;";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		return "barrier or flush: disabled;";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static void xlvbd_flush(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			      info->feature_fua ? true : false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	pr_info("blkfront: %s: %s %s %s %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		info->gd->disk_name, flush_info(info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		"persistent grants:", info->feature_persistent ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		"enabled;" : "disabled;", "indirect descriptors:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		info->max_indirect_segments ? "enabled;" : "disabled;");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	int major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	major = BLKIF_MAJOR(vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	*minor = BLKIF_MINOR(vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	switch (major) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		case XEN_IDE0_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			*offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			*minor = ((*minor / 64) * PARTS_PER_DISK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				EMULATED_HD_DISK_MINOR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		case XEN_IDE1_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			*offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			*minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				EMULATED_HD_DISK_MINOR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		case XEN_SCSI_DISK0_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			*offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			*minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		case XEN_SCSI_DISK1_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		case XEN_SCSI_DISK2_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		case XEN_SCSI_DISK3_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		case XEN_SCSI_DISK4_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		case XEN_SCSI_DISK5_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		case XEN_SCSI_DISK6_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		case XEN_SCSI_DISK7_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			*offset = (*minor / PARTS_PER_DISK) + 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				EMULATED_SD_DISK_NAME_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			*minor = *minor +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				EMULATED_SD_DISK_MINOR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		case XEN_SCSI_DISK8_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		case XEN_SCSI_DISK9_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		case XEN_SCSI_DISK10_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		case XEN_SCSI_DISK11_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		case XEN_SCSI_DISK12_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		case XEN_SCSI_DISK13_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		case XEN_SCSI_DISK14_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		case XEN_SCSI_DISK15_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			*offset = (*minor / PARTS_PER_DISK) + 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 				EMULATED_SD_DISK_NAME_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			*minor = *minor +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 				EMULATED_SD_DISK_MINOR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		case XENVBD_MAJOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			*offset = *minor / PARTS_PER_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			printk(KERN_WARNING "blkfront: your disk configuration is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 					"incorrect, please use an xvd device instead\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static char *encode_disk_name(char *ptr, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (n >= 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		ptr = encode_disk_name(ptr, n / 26 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	*ptr = 'a' + n % 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	return ptr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			       struct blkfront_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			       u16 vdisk_info, u16 sector_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			       unsigned int physical_sector_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct gendisk *gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	int nr_minors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	int nr_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	BUG_ON(info->gd != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	BUG_ON(info->rq != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if ((info->vdevice>>EXT_SHIFT) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		/* this is above the extended range; something is wrong */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (!VDEV_IS_EXTENDED(info->vdevice)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		err = xen_translate_vdev(info->vdevice, &minor, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		nr_parts = PARTS_PER_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		minor = BLKIF_MINOR_EXT(info->vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		nr_parts = PARTS_PER_EXT_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		offset = minor / nr_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 					"emulated IDE disks,\n\t choose an xvd device name"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 					"from xvde on\n", info->vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	if (minor >> MINORBITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			info->vdevice, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if ((minor % nr_parts) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		nr_minors = nr_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	err = xlbd_reserve_minors(minor, nr_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	gd = alloc_disk(nr_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (gd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	strcpy(gd->disk_name, DEV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (nr_minors > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		*ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			 "%d", minor & (nr_parts - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	gd->major = XENVBD_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	gd->first_minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	gd->fops = &xlvbd_block_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	gd->private_data = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	set_capacity(gd, capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		del_gendisk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	xlvbd_flush(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (vdisk_info & VDISK_READONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		set_disk_ro(gd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (vdisk_info & VDISK_REMOVABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		gd->flags |= GENHD_FL_REMOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (vdisk_info & VDISK_CDROM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		gd->flags |= GENHD_FL_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	xlbd_release_minors(minor, nr_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void xlvbd_release_gendisk(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	unsigned int minor, nr_minors, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (info->rq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	/* No more blkif_request(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	blk_mq_stop_hw_queues(info->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		/* No more gnttab callback work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		gnttab_cancel_free_callback(&rinfo->callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		/* Flush gnttab callback work. Must be done with no locks held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		flush_work(&rinfo->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	del_gendisk(info->gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	minor = info->gd->first_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	nr_minors = info->gd->minors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	xlbd_release_minors(minor, nr_minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	blk_cleanup_queue(info->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	blk_mq_free_tag_set(&info->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	info->rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	put_disk(info->gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	info->gd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* Already hold rinfo->ring_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (!RING_FULL(&rinfo->ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	spin_lock_irqsave(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	kick_pending_request_queues_locked(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void blkif_restart_queue(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		kick_pending_request_queues(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void blkif_free_ring(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct grant *persistent_gnt, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	int i, j, segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 * Remove indirect pages, this only happens when using indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 * descriptors but not persistent grants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (!list_empty(&rinfo->indirect_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		struct page *indirect_page, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		BUG_ON(info->feature_persistent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			list_del(&indirect_page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			__free_page(indirect_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	/* Remove all persistent grants. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (!list_empty(&rinfo->grants)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		list_for_each_entry_safe(persistent_gnt, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 					 &rinfo->grants, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			list_del(&persistent_gnt->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			if (persistent_gnt->gref != GRANT_INVALID_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				gnttab_end_foreign_access(persistent_gnt->gref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 							  0, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 				rinfo->persistent_gnts_c--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			if (info->feature_persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 				__free_page(persistent_gnt->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			kfree(persistent_gnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	BUG_ON(rinfo->persistent_gnts_c != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	for (i = 0; i < BLK_RING_SIZE(info); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		 * Clear persistent grants present in requests already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		 * on the shared ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		if (!rinfo->shadow[i].request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			goto free_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		       rinfo->shadow[i].req.u.indirect.nr_segments :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		       rinfo->shadow[i].req.u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		for (j = 0; j < segs; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			persistent_gnt = rinfo->shadow[i].grants_used[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			if (info->feature_persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 				__free_page(persistent_gnt->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			kfree(persistent_gnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			 * If this is not an indirect operation don't try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			 * free indirect segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			goto free_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		for (j = 0; j < INDIRECT_GREFS(segs); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			persistent_gnt = rinfo->shadow[i].indirect_grants[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			__free_page(persistent_gnt->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			kfree(persistent_gnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) free_shadow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		kvfree(rinfo->shadow[i].grants_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		rinfo->shadow[i].grants_used = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		kvfree(rinfo->shadow[i].indirect_grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		rinfo->shadow[i].indirect_grants = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		kvfree(rinfo->shadow[i].sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		rinfo->shadow[i].sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	/* No more gnttab callback work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	gnttab_cancel_free_callback(&rinfo->callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	/* Flush gnttab callback work. Must be done with no locks held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	flush_work(&rinfo->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	/* Free resources associated with old device channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	for (i = 0; i < info->nr_ring_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			rinfo->ring_ref[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	free_pages_exact(rinfo->ring.sring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			 info->nr_ring_pages * XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	rinfo->ring.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	if (rinfo->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		unbind_from_irqhandler(rinfo->irq, rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	rinfo->evtchn = rinfo->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static void blkif_free(struct blkfront_info *info, int suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	/* Prevent new requests being issued until we fix things up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	info->connected = suspend ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	/* No more blkif_request(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (info->rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		blk_mq_stop_hw_queues(info->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	for_each_rinfo(info, rinfo, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		blkif_free_ring(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	kvfree(info->rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	info->rinfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	info->nr_rings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct copy_from_grant {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	const struct blk_shadow *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	unsigned int grant_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	unsigned int bvec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	char *bvec_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 				  unsigned int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct copy_from_grant *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	char *shared_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	/* Convenient aliases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	const struct blk_shadow *s = info->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	memcpy(info->bvec_data + info->bvec_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	       shared_data + offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	info->bvec_offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	info->grant_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	kunmap_atomic(shared_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static enum blk_req_status blkif_rsp_to_req_status(int rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	switch (rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	case BLKIF_RSP_OKAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		return REQ_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	case BLKIF_RSP_EOPNOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		return REQ_EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	case BLKIF_RSP_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		return REQ_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * Get the final status of the block request based on two ring response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static int blkif_get_final_status(enum blk_req_status s1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 				  enum blk_req_status s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	BUG_ON(s1 < REQ_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	BUG_ON(s2 < REQ_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	if (s1 == REQ_ERROR || s2 == REQ_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		return BLKIF_RSP_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		return BLKIF_RSP_EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	return BLKIF_RSP_OKAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  *  1 response processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  *  0 missing further responses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  * -1 error while processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int blkif_completion(unsigned long *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			    struct blkfront_ring_info *rinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			    struct blkif_response *bret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	int num_sg, num_grant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	struct blk_shadow *s = &rinfo->shadow[*id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	struct copy_from_grant data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		.grant_idx = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	/* The I/O request may be split in two. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		/* Keep the status of the current response in shadow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		s->status = blkif_rsp_to_req_status(bret->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		/* Wait the second response if not yet here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		if (s2->status < REQ_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		bret->status = blkif_get_final_status(s->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 						      s2->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		 * All the grants is stored in the first shadow in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		 * to make the completion code simpler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		num_grant += s2->req.u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		 * The two responses may not come in order. Only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		 * first request will store the scatter-gather list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		if (s2->num_sg != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			/* Update "id" with the ID of the first response. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			*id = s->associated_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			s = s2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		 * We don't need anymore the second request, so recycling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		 * it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		if (add_id_to_freelist(rinfo, s->associated_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			     info->gd->disk_name, s->associated_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	data.s = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	num_sg = s->num_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		for_each_sg(s->sg, sg, num_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			BUG_ON(sg->offset + sg->length > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			data.bvec_offset = sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			data.bvec_data = kmap_atomic(sg_page(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			gnttab_foreach_grant_in_range(sg_page(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 						      sg->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 						      sg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 						      blkif_copy_from_grant,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 						      &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			kunmap_atomic(data.bvec_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	/* Add the persistent grant into the list of free grants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	for (i = 0; i < num_grant; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			 * If the grant is still mapped by the backend (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			 * backend has chosen to make this grant persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			 * we add it at the head of the list, so it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			 * reused first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			if (!info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 				pr_alert("backed has not unmapped grant: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 					 s->grants_used[i]->gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			list_add(&s->grants_used[i]->node, &rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			rinfo->persistent_gnts_c++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			 * If the grant is not mapped by the backend we add it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			 * to the tail of the list, so it will not be picked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			 * again unless we run out of persistent grants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			s->grants_used[i]->gref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (s->req.operation == BLKIF_OP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 				if (!info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 					pr_alert("backed has not unmapped grant: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 						 s->indirect_grants[i]->gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 					return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 				list_add(&s->indirect_grants[i]->node, &rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 				rinfo->persistent_gnts_c++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 				struct page *indirect_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 				 * Add the used indirect page back to the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 				 * available pages for indirect grefs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 				if (!info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 					indirect_page = s->indirect_grants[i]->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 					list_add(&indirect_page->lru, &rinfo->indirect_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 				list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static irqreturn_t blkif_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	struct blkif_response bret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	RING_IDX i, rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	spin_lock_irqsave(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		pr_alert("%s: illegal number of responses %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			 info->gd->disk_name, rp - rinfo->ring.rsp_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	for (i = rinfo->ring.rsp_cons; i != rp; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		unsigned int op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		eoiflag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		id = bret.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		 * The backend has messed up and given us an id that we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		 * never have given to it (we stamp it up to BLK_RING_SIZE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		 * look in get_id_from_freelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		if (id >= BLK_RING_SIZE(info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			pr_alert("%s: response has incorrect id (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 				 info->gd->disk_name, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		if (rinfo->shadow[id].status != REQ_WAITING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 			pr_alert("%s: response references no pending request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 				 info->gd->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		rinfo->shadow[id].status = REQ_PROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		req  = rinfo->shadow[id].request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		op = rinfo->shadow[id].req.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		if (op == BLKIF_OP_INDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 			op = rinfo->shadow[id].req.u.indirect.indirect_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		if (bret.operation != op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			pr_alert("%s: response has wrong operation (%u instead of %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 				 info->gd->disk_name, bret.operation, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		if (bret.operation != BLKIF_OP_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			 * We may need to wait for an extra response if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			 * I/O request is split in 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			ret = blkif_completion(&id, rinfo, &bret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		if (add_id_to_freelist(rinfo, id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			     info->gd->disk_name, op_name(bret.operation), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		if (bret.status == BLKIF_RSP_OKAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			blkif_req(req)->error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			blkif_req(req)->error = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		switch (bret.operation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		case BLKIF_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 			if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 				struct request_queue *rq = info->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 				pr_warn_ratelimited("blkfront: %s: %s op failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 					   info->gd->disk_name, op_name(bret.operation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 				blkif_req(req)->error = BLK_STS_NOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 				info->feature_discard = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 				info->feature_secdiscard = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 				blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 				blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		case BLKIF_OP_FLUSH_DISKCACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		case BLKIF_OP_WRITE_BARRIER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				pr_warn_ratelimited("blkfront: %s: %s op failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 				       info->gd->disk_name, op_name(bret.operation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 				blkif_req(req)->error = BLK_STS_NOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			if (unlikely(bret.status == BLKIF_RSP_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 				pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 				       info->gd->disk_name, op_name(bret.operation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				blkif_req(req)->error = BLK_STS_NOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			if (unlikely(blkif_req(req)->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 				if (blkif_req(req)->error == BLK_STS_NOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 					blkif_req(req)->error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				info->feature_fua = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 				info->feature_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 				xlvbd_flush(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		case BLKIF_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		case BLKIF_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			if (unlikely(bret.status != BLKIF_RSP_OKAY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				dev_dbg_ratelimited(&info->xbdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 					"Bad return from blkdev data request: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 					bret.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		if (likely(!blk_should_fake_timeout(req->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	rinfo->ring.rsp_cons = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	if (i != rinfo->ring.req_prod_pvt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		int more_to_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		if (more_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		rinfo->ring.sring->rsp_event = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	kick_pending_request_queues_locked(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	xen_irq_lateeoi(irq, eoiflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	info->connected = BLKIF_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/* No EOI in order to avoid further interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	pr_alert("%s disabled for further use\n", info->gd->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static int setup_blkring(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 			 struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	struct blkif_sring *sring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	for (i = 0; i < info->nr_ring_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		rinfo->ring_ref[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	sring = alloc_pages_exact(ring_size, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (!sring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	SHARED_RING_INIT(sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		free_pages_exact(sring, ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		rinfo->ring.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	for (i = 0; i < info->nr_ring_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		rinfo->ring_ref[i] = gref[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 						0, "blkif", rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	if (err <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		xenbus_dev_fatal(dev, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 				 "bind_evtchn_to_irqhandler failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	rinfo->irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	blkif_free(info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)  * Write out per-ring/queue nodes including ring-ref and event-channel, and each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  * ring buffer may have multi pages depending on ->nr_ring_pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static int write_per_ring_nodes(struct xenbus_transaction xbt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 				struct blkfront_ring_info *rinfo, const char *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	const char *message = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	if (info->nr_ring_pages == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			message = "writing ring-ref";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 			goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		for (i = 0; i < info->nr_ring_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			char ring_ref_name[RINGREF_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			err = xenbus_printf(xbt, dir, ring_ref_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 					    "%u", rinfo->ring_ref[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 				message = "writing ring-ref";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 				goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		message = "writing event-channel";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) abort_transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	xenbus_transaction_end(xbt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	if (message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		xenbus_dev_fatal(info->xbdev, err, "%s", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) static void free_info(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	list_del(&info->info_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* Common code used when first setting up, and when resuming. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static int talk_to_blkback(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			   struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	const char *message = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	struct xenbus_transaction xbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	unsigned int i, max_page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	unsigned int ring_page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 					      "max-ring-page-order", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	info->nr_ring_pages = 1 << ring_page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	err = negotiate_mq(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		goto destroy_blkring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		/* Create shared ring, alloc event channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		err = setup_blkring(dev, rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			goto destroy_blkring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	err = xenbus_transaction_start(&xbt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		xenbus_dev_fatal(dev, err, "starting transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		goto destroy_blkring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (info->nr_ring_pages > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 				    ring_page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 			message = "writing ring-page-order";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	/* We already got the number of queues/rings in _probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	if (info->nr_rings == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 			goto destroy_blkring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		size_t pathsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 				    info->nr_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 			message = "writing multi-queue-num-queues";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		path = kmalloc(pathsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			message = "ENOMEM while writing ring references";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			memset(path, 0, pathsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			err = write_per_ring_nodes(xbt, rinfo, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 				kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 				goto destroy_blkring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			    XEN_IO_PROTO_ABI_NATIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		message = "writing protocol";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 			info->feature_persistent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		dev_warn(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			 "writing persistent grants feature to xenbus");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	err = xenbus_transaction_end(xbt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		xenbus_dev_fatal(dev, err, "completing transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		goto destroy_blkring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		for (j = 0; j < BLK_RING_SIZE(info); j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			rinfo->shadow[j].req.u.rw.id = j + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	xenbus_switch_state(dev, XenbusStateInitialised);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  abort_transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	xenbus_transaction_end(xbt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	if (message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		xenbus_dev_fatal(dev, err, "%s", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)  destroy_blkring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	blkif_free(info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	free_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	dev_set_drvdata(&dev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static int negotiate_mq(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	unsigned int backend_max_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	BUG_ON(info->nr_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	/* Check if backend supports multiple queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 						  "multi-queue-max-queues", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	/* We need at least one ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (!info->nr_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		info->nr_rings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	info->rinfo_size = struct_size(info->rinfo, shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 				       BLK_RING_SIZE(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	if (!info->rinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		info->nr_rings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		INIT_LIST_HEAD(&rinfo->indirect_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		INIT_LIST_HEAD(&rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		rinfo->dev_info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		INIT_WORK(&rinfo->work, blkif_restart_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		spin_lock_init(&rinfo->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /* Enable the persistent grants feature. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) static bool feature_persistent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) module_param(feature_persistent, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) MODULE_PARM_DESC(feature_persistent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		"Enables the persistent grants feature");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)  * Entry point to this code when a new device is created.  Allocate the basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)  * structures and the ring buffer for communication with the backend, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)  * inform the backend of the appropriate details for those.  Switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)  * Initialised state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) static int blkfront_probe(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			  const struct xenbus_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	int err, vdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	struct blkfront_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	/* FIXME: Use dynamic device id if this is not set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	err = xenbus_scanf(XBT_NIL, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 			   "virtual-device", "%i", &vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		/* go looking in the extended area instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 				   "%i", &vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 			xenbus_dev_fatal(dev, err, "reading virtual-device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	if (xen_hvm_domain()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		char *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		/* no unplug has been done: do not hook devices != xen vbds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		if (xen_has_pv_and_legacy_disk_devices()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			int major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			if (!VDEV_IS_EXTENDED(vdevice))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 				major = BLKIF_MAJOR(vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 				major = XENVBD_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			if (major != XENVBD_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 				printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 						"%s: HVM does not support vbd %d as xen block device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 						__func__, vdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 				return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		/* do not create a PV cdrom device if we are an HVM guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		if (IS_ERR(type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		if (strncmp(type, "cdrom", 5) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 			kfree(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		kfree(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	info = kzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	info->xbdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	mutex_init(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	info->vdevice = vdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	info->connected = BLKIF_STATE_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	info->feature_persistent = feature_persistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	/* Front end dir is a number, which is used as the id. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	dev_set_drvdata(&dev->dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	list_add(&info->info_list, &info_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) static int blkif_recover(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	unsigned int r_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	struct request *req, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	unsigned int segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	blkfront_gather_backend_features(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	blkif_set_queue_limits(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	for_each_rinfo(info, rinfo, r_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		rc = blkfront_setup_indirect(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	/* Now safe for us to use the shared ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	info->connected = BLKIF_STATE_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	for_each_rinfo(info, rinfo, r_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		/* Kick any other new requests queued since we resumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		kick_pending_request_queues(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	list_for_each_entry_safe(req, n, &info->requests, queuelist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		/* Requeue pending requests (flush or discard) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		list_del_init(&req->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		BUG_ON(req->nr_phys_segments > segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		blk_mq_requeue_request(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	blk_mq_start_stopped_hw_queues(info->rq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	blk_mq_kick_requeue_list(info->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		/* Traverse the list of pending bios and re-queue them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  * We are reconnecting to the backend, due to a suspend/resume, or a backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  * driver restart.  We tear down our blkif structure and recreate it, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)  * leave the device-layer structures intact so that this is transparent to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)  * rest of the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static int blkfront_resume(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	bio_list_init(&info->bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	INIT_LIST_HEAD(&info->requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		struct bio_list merge_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		struct blk_shadow *shadow = rinfo->shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		for (j = 0; j < BLK_RING_SIZE(info); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			/* Not in use? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			if (!shadow[j].request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			 * Get the bios in the request so we can re-queue them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 			if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 			    req_op(shadow[j].request) == REQ_OP_DISCARD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 			    req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 			    shadow[j].request->cmd_flags & REQ_FUA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 				 * Flush operations don't contain bios, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 				 * we need to requeue the whole request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 				 * XXX: but this doesn't make any sense for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 				 * write with the FUA flag set..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 				list_add(&shadow[j].request->queuelist, &info->requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 			merge_bio.head = shadow[j].request->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			merge_bio.tail = shadow[j].request->biotail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			bio_list_merge(&info->bio_list, &merge_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			shadow[j].request->bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 			blk_mq_end_request(shadow[j].request, BLK_STS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	err = talk_to_blkback(dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	 * We have to wait for the backend to switch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	 * connected state, since we want to read which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	 * features it supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) static void blkfront_closing(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	struct xenbus_device *xbdev = info->xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	struct block_device *bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	mutex_lock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	if (xbdev->state == XenbusStateClosing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		mutex_unlock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	if (info->gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		bdev = bdget_disk(info->gd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	mutex_unlock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	if (!bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		xenbus_frontend_closed(xbdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	mutex_lock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (bdev->bd_openers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		xenbus_dev_error(xbdev, -EBUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 				 "Device in use; refusing to close");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		xenbus_switch_state(xbdev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		xlvbd_release_gendisk(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		xenbus_frontend_closed(xbdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) static void blkfront_setup_discard(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	info->feature_discard = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 							 "discard-granularity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 							 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 						       "discard-alignment", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	info->feature_secdiscard =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 				       0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	unsigned int psegs, grants, memflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	struct blkfront_info *info = rinfo->dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	memflags = memalloc_noio_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	if (info->max_indirect_segments == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		if (!HAS_EXTRA_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			 * When an extra req is required, the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			 * grants supported is related to the size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			 * Linux block segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			grants = GRANTS_PER_PSEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		grants = info->max_indirect_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	err = fill_grant_buffer(rinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 				(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (!info->feature_persistent && info->max_indirect_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		 * We are using indirect descriptors but not persistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		 * grants, we need to allocate a set of pages that can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		 * used for mapping indirect grefs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		BUG_ON(!list_empty(&rinfo->indirect_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 			struct page *indirect_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			if (!indirect_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 				goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			list_add(&indirect_page->lru, &rinfo->indirect_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	for (i = 0; i < BLK_RING_SIZE(info); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		rinfo->shadow[i].grants_used =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 			kvcalloc(grants,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 				 sizeof(rinfo->shadow[i].grants_used[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		rinfo->shadow[i].sg = kvcalloc(psegs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 					       sizeof(rinfo->shadow[i].sg[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 					       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		if (info->max_indirect_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			rinfo->shadow[i].indirect_grants =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 				kvcalloc(INDIRECT_GREFS(grants),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 					 sizeof(rinfo->shadow[i].indirect_grants[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		if ((rinfo->shadow[i].grants_used == NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 			(rinfo->shadow[i].sg == NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		     (info->max_indirect_segments &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		     (rinfo->shadow[i].indirect_grants == NULL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		sg_init_table(rinfo->shadow[i].sg, psegs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	memalloc_noio_restore(memflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	for (i = 0; i < BLK_RING_SIZE(info); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		kvfree(rinfo->shadow[i].grants_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		rinfo->shadow[i].grants_used = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		kvfree(rinfo->shadow[i].sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		rinfo->shadow[i].sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		kvfree(rinfo->shadow[i].indirect_grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		rinfo->shadow[i].indirect_grants = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	if (!list_empty(&rinfo->indirect_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		struct page *indirect_page, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			list_del(&indirect_page->lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			__free_page(indirect_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	memalloc_noio_restore(memflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)  * Gather all backend feature-*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) static void blkfront_gather_backend_features(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	unsigned int indirect_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	info->feature_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	info->feature_fua = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	 * If there's no "feature-barrier" defined, then it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	 * we're dealing with a very old backend which writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	 * synchronously; nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 * If there are barriers, then we use flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		info->feature_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		info->feature_fua = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	 * And if there is "feature-flush-cache" use that above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	 * barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 				 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		info->feature_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		info->feature_fua = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		blkfront_setup_discard(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	if (info->feature_persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		info->feature_persistent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 			!!xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 					       "feature-persistent", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 					"feature-max-indirect-segments", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	if (indirect_segments > xen_blkif_max_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		indirect_segments = xen_blkif_max_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		indirect_segments = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	info->max_indirect_segments = indirect_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	if (info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		schedule_delayed_work(&blkfront_work, HZ * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)  * Invoked when the backend is finally 'ready' (and has told produced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)  * the details about the physical device - #sectors, size, etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) static void blkfront_connect(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	unsigned long long sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	unsigned long sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	unsigned int physical_sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	unsigned int binfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	switch (info->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	case BLKIF_STATE_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		 * Potentially, the back-end may be signalling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		 * a capacity change; update the capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 				   "sectors", "%Lu", &sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		if (XENBUS_EXIST_ERR(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		printk(KERN_INFO "Setting capacity to %Lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		       sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		set_capacity_revalidate_and_notify(info->gd, sectors, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	case BLKIF_STATE_SUSPENDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		 * If we are recovering from suspension, we need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		 * for the backend to announce it's features before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		 * reconnecting, at least we need to know if the backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		 * supports indirect descriptors, and how many.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		blkif_recover(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		__func__, info->xbdev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			    "sectors", "%llu", &sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			    "info", "%u", &binfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			    "sector-size", "%lu", &sector_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			    NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		xenbus_dev_fatal(info->xbdev, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 				 "reading backend fields at %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 				 info->xbdev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	 * physcial-sector-size is a newer field, so old backends may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	 * provide this. Assume physical sector size to be the same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	 * sector_size in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 						    "physical-sector-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 						    sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	blkfront_gather_backend_features(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		err = blkfront_setup_indirect(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 			xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 					 info->xbdev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			blkif_free(info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 				  physical_sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 				 info->xbdev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	/* Kick pending requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	info->connected = BLKIF_STATE_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	for_each_rinfo(info, rinfo, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		kick_pending_request_queues(rinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	device_add_disk(&info->xbdev->dev, info->gd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	info->is_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	blkif_free(info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)  * Callback received when the backend's state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) static void blkback_changed(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 			    enum xenbus_state backend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	switch (backend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		if (dev->state != XenbusStateInitialising)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		if (talk_to_blkback(dev, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	case XenbusStateInitialising:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	case XenbusStateInitialised:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	case XenbusStateReconfiguring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	case XenbusStateReconfigured:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	case XenbusStateUnknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		 * talk_to_blkback sets state to XenbusStateInitialised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		 * and blkfront_connect sets it to XenbusStateConnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		 * (if connection went OK).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		 * If the backend (or toolstack) decides to poke at backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		 * state (and re-trigger the watch by setting the state repeatedly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		 * to XenbusStateConnected (4)) we need to deal with this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		 * This is allowed as this is used to communicate to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		 * that the size of disk has changed!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		if ((dev->state != XenbusStateInitialised) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		    (dev->state != XenbusStateConnected)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 			if (talk_to_blkback(dev, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		blkfront_connect(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		if (dev->state == XenbusStateClosed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		if (info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 			blkfront_closing(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static int blkfront_remove(struct xenbus_device *xbdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	struct block_device *bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	blkif_free(info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	mutex_lock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	disk = info->gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	if (disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		bdev = bdget_disk(disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	info->xbdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	mutex_unlock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	if (!bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		free_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	 * The xbdev was removed before we reached the Closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	 * state. See if it's safe to remove the disk. If the bdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	 * isn't closed yet, we let release take care of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	mutex_lock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	info = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	dev_warn(disk_to_dev(disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		 "%s was hot-unplugged, %d stale handles\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		 xbdev->nodename, bdev->bd_openers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	if (info && !bdev->bd_openers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		xlvbd_release_gendisk(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		disk->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		free_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	mutex_unlock(&bdev->bd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) static int blkfront_is_ready(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	return info->is_ready && info->xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static int blkif_open(struct block_device *bdev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	struct gendisk *disk = bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	struct blkfront_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	info = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		/* xbdev gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	mutex_lock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	if (!info->gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		/* xbdev is closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	mutex_unlock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) static void blkif_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	struct blkfront_info *info = disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	struct xenbus_device *xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	bdev = bdget_disk(disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	if (!bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 		WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	if (bdev->bd_openers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	 * Check if we have been instructed to close. We will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	 * deferred this request, because the bdev was still open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	mutex_lock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	xbdev = info->xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (xbdev && xbdev->state == XenbusStateClosing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		/* pending switch to state closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		xlvbd_release_gendisk(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		xenbus_frontend_closed(info->xbdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)  	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	mutex_unlock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	if (!xbdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		/* sudden device removal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		xlvbd_release_gendisk(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		disk->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		free_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) static const struct block_device_operations xlvbd_block_fops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	.open = blkif_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	.release = blkif_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	.getgeo = blkif_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	.ioctl = blkif_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	.compat_ioctl = blkdev_compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) static const struct xenbus_device_id blkfront_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	{ "vbd" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	{ "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) static struct xenbus_driver blkfront_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	.ids  = blkfront_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	.probe = blkfront_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	.remove = blkfront_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	.resume = blkfront_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	.otherend_changed = blkback_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	.is_ready = blkfront_is_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) static void purge_persistent_grants(struct blkfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	struct blkfront_ring_info *rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	for_each_rinfo(info, rinfo, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		struct grant *gnt_list_entry, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		spin_lock_irqsave(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		if (rinfo->persistent_gnts_c == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 			spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 					 node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 			if (gnt_list_entry->gref == GRANT_INVALID_REF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 			    !gnttab_try_end_foreign_access(gnt_list_entry->gref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 			list_del(&gnt_list_entry->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			rinfo->persistent_gnts_c--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 			gnt_list_entry->gref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			list_add_tail(&gnt_list_entry->node, &rinfo->grants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		spin_unlock_irqrestore(&rinfo->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) static void blkfront_delay_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	struct blkfront_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	bool need_schedule_work = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	mutex_lock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	list_for_each_entry(info, &info_list, info_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		if (info->feature_persistent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			need_schedule_work = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			mutex_lock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			purge_persistent_grants(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 			mutex_unlock(&info->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	if (need_schedule_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		schedule_delayed_work(&blkfront_work, HZ * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	mutex_unlock(&blkfront_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) static int __init xlblk_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	int nr_cpus = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	if (!xen_has_pv_disk_devices())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		pr_warn("xen_blk: can't get major %d with name %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 			XENVBD_MAJOR, DEV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	if (xen_blkif_max_queues > nr_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		pr_info("Invalid max_queues (%d), will use default max: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			xen_blkif_max_queues, nr_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		xen_blkif_max_queues = nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	ret = xenbus_register_frontend(&blkfront_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) module_init(xlblk_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) static void __exit xlblk_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	cancel_delayed_work_sync(&blkfront_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	xenbus_unregister_driver(&blkfront_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	kfree(minors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) module_exit(xlblk_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) MODULE_DESCRIPTION("Xen virtual block device frontend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) MODULE_ALIAS("xen:vbd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) MODULE_ALIAS("xenblk");