Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define __XEN_BLKIF__BACKEND__COMMON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <xen/interface/io/ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <xen/interface/io/blkif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <xen/interface/io/protocols.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) extern unsigned int xen_blkif_max_ring_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) extern unsigned int xenblk_max_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * This is the maximum number of segments that would be allowed in indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * requests. This value will also be passed to the frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define MAX_INDIRECT_SEGMENTS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Xen use 4K pages. The guest may use different page size (4K or 64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * Number of Xen pages per segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define XEN_PAGES_PER_SEGMENT   (PAGE_SIZE / XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define XEN_PAGES_PER_INDIRECT_FRAME \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	(XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define SEGS_PER_INDIRECT_FRAME	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	(XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define MAX_INDIRECT_PAGES \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /* Not a real protocol.  Used to generate ring structs which contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * the elements common to all protocols only.  This way we get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * compiler-checkable way to use common struct elements, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * avoid using switch(protocol) in a number of places.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) struct blkif_common_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	char dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /* i386 protocol version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) struct blkif_x86_32_request_rw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	uint8_t        nr_segments;  /* number of segments                   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	blkif_vdev_t   handle;       /* only for read/write requests         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	uint64_t       id;           /* private guest value, echoed in resp  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) struct blkif_x86_32_request_discard {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	uint64_t       id;           /* private guest value, echoed in resp  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	uint64_t       nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) struct blkif_x86_32_request_other {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	uint8_t        _pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	blkif_vdev_t   _pad2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	uint64_t       id;           /* private guest value, echoed in resp  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct blkif_x86_32_request_indirect {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	uint8_t        indirect_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	uint16_t       nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	uint64_t       id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	blkif_sector_t sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	blkif_vdev_t   handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	uint16_t       _pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * The maximum number of indirect segments (and pages) that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * is also exported to the guest (via xenstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * feature-max-indirect-segments entry), so the frontend knows how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * many indirect segments the backend supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	uint64_t       _pad2;        /* make it 64 byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct blkif_x86_32_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	uint8_t        operation;    /* BLKIF_OP_???                         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		struct blkif_x86_32_request_rw rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		struct blkif_x86_32_request_discard discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		struct blkif_x86_32_request_other other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		struct blkif_x86_32_request_indirect indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	} u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* x86_64 protocol version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct blkif_x86_64_request_rw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	uint8_t        nr_segments;  /* number of segments                   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	blkif_vdev_t   handle;       /* only for read/write requests         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	uint64_t       id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct blkif_x86_64_request_discard {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)         uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	uint64_t       id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	uint64_t       nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct blkif_x86_64_request_other {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	uint8_t        _pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	blkif_vdev_t   _pad2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	uint64_t       id;           /* private guest value, echoed in resp  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct blkif_x86_64_request_indirect {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	uint8_t        indirect_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	uint16_t       nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	uint64_t       id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	blkif_sector_t sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	blkif_vdev_t   handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	uint16_t       _pad2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * The maximum number of indirect segments (and pages) that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * is also exported to the guest (via xenstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * feature-max-indirect-segments entry), so the frontend knows how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 * many indirect segments the backend supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	uint32_t       _pad3;        /* make it 64 byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct blkif_x86_64_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	uint8_t        operation;    /* BLKIF_OP_???                         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		struct blkif_x86_64_request_rw rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		struct blkif_x86_64_request_discard discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		struct blkif_x86_64_request_other other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		struct blkif_x86_64_request_indirect indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	} u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		  struct blkif_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		  struct blkif_response __packed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		  struct blkif_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) union blkif_back_rings {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct blkif_back_ring        native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct blkif_common_back_ring common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct blkif_x86_32_back_ring x86_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct blkif_x86_64_back_ring x86_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) enum blkif_protocol {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	BLKIF_PROTOCOL_NATIVE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	BLKIF_PROTOCOL_X86_32 = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	BLKIF_PROTOCOL_X86_64 = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * Default protocol if the frontend doesn't specify one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct xen_vbd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* What the domain refers to this vbd as. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	blkif_vdev_t		handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	/* Non-zero -> read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	unsigned char		readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/* VDISK_xxx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	unsigned char		type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	/* phys device that this vbd maps to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	u32			pdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct block_device	*bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	/* Cached size parameter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	sector_t		size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	unsigned int		flush_support:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	unsigned int		discard_secure:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	unsigned int		feature_gnt_persistent:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	unsigned int		overflow_max_grants:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct backend_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Number of requests that we can fit in a ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define XEN_BLKIF_REQS_PER_PAGE		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct persistent_gnt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	grant_ref_t gnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	grant_handle_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	unsigned long last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	bool active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct list_head remove_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Per-ring information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct xen_blkif_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	/* Physical parameters of the comms window. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	unsigned int		irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	union blkif_back_rings	blk_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	void			*blk_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	/* Private fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	spinlock_t		blk_ring_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	wait_queue_head_t	wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	atomic_t		inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	bool			active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* One thread per blkif ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	struct task_struct	*xenblkd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	unsigned int		waiting_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	/* List of all 'pending_req' available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct list_head	pending_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	/* And its spinlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	spinlock_t		pending_free_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	wait_queue_head_t	pending_free_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	/* Tree to store persistent grants. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	struct rb_root		persistent_gnts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	unsigned int		persistent_gnt_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	atomic_t		persistent_gnt_in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	unsigned long           next_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	/* Statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	unsigned long		st_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	unsigned long long	st_rd_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	unsigned long long	st_wr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	unsigned long long	st_oo_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned long long	st_f_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	unsigned long long	st_ds_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	unsigned long long	st_rd_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	unsigned long long	st_wr_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	/* Used by the kworker that offload work from the persistent purge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct list_head	persistent_purge_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	struct work_struct	persistent_purge_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* Buffer of free pages to map grant refs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct gnttab_page_cache free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	struct work_struct	free_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	/* Thread shutdown wait queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	wait_queue_head_t	shutdown_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	struct xen_blkif 	*blkif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct xen_blkif {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	/* Unique identifier for this interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	domid_t			domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	unsigned int		handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* Comms information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	enum blkif_protocol	blk_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/* The VBD attached to this interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct xen_vbd		vbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	/* Back pointer to the backend_info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct backend_info	*be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	atomic_t		refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	/* for barrier (drain) requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	struct completion	drain_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	atomic_t		drain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct work_struct	free_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	unsigned int 		nr_ring_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	bool			multi_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	/* All rings for this device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct xen_blkif_ring	*rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	unsigned int		nr_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	unsigned long		buffer_squeeze_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct seg_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	unsigned int nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct grant_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct page 		*page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	struct persistent_gnt	*persistent_gnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	grant_handle_t		handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	grant_ref_t		gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * Each outstanding request that we've passed to the lower device layers has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * 'pending_req' allocated to it. Each buffer_head that completes decrements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * the pendcnt towards zero. When it hits zero, the specified domain has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * response queued for it, with the saved 'id' passed back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct pending_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	struct xen_blkif_ring   *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	u64			id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	int			nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	atomic_t		pendcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	unsigned short		operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct list_head	free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	struct grant_page	*segments[MAX_INDIRECT_SEGMENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	/* Indirect descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct grant_page	*indirect_pages[MAX_INDIRECT_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	struct seg_buf		seg[MAX_INDIRECT_SEGMENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	struct bio		*biolist[MAX_INDIRECT_SEGMENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	struct gntab_unmap_queue_data gnttab_unmap_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define vbd_sz(_v)	((_v)->bdev->bd_part ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			 (_v)->bdev->bd_part->nr_sects : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			  get_capacity((_v)->bdev->bd_disk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define xen_blkif_put(_b)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	do {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if (atomic_dec_and_test(&(_b)->refcnt))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			schedule_work(&(_b)->free_work);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct phys_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	unsigned short		dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	blkif_sector_t		nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	struct block_device	*bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	blkif_sector_t		sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int xen_blkif_interface_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) void xen_blkif_interface_fini(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int xen_blkif_xenbus_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void xen_blkif_xenbus_fini(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int xen_blkif_schedule(void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int xen_blkif_purge_persistent(void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			      struct backend_info *be, int state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int xen_blkbk_barrier(struct xenbus_transaction xbt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		      struct backend_info *be, int state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void xen_blkbk_unmap_purged_grants(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static inline void blkif_get_x86_32_req(struct blkif_request *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 					struct blkif_x86_32_request *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	dst->operation = READ_ONCE(src->operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	switch (dst->operation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	case BLKIF_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	case BLKIF_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	case BLKIF_OP_WRITE_BARRIER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	case BLKIF_OP_FLUSH_DISKCACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		dst->u.rw.nr_segments = src->u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		dst->u.rw.handle = src->u.rw.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		dst->u.rw.id = src->u.rw.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		dst->u.rw.sector_number = src->u.rw.sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		if (n > dst->u.rw.nr_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			n = dst->u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			dst->u.rw.seg[i] = src->u.rw.seg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	case BLKIF_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		dst->u.discard.flag = src->u.discard.flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		dst->u.discard.id = src->u.discard.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		dst->u.discard.sector_number = src->u.discard.sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	case BLKIF_OP_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		dst->u.indirect.handle = src->u.indirect.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		dst->u.indirect.id = src->u.indirect.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		dst->u.indirect.sector_number = src->u.indirect.sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		for (i = 0; i < j; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			dst->u.indirect.indirect_grefs[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 				src->u.indirect.indirect_grefs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		 * Don't know how to translate this op. Only get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		 * ID so failure can be reported to the frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		dst->u.other.id = src->u.other.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static inline void blkif_get_x86_64_req(struct blkif_request *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 					struct blkif_x86_64_request *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	dst->operation = READ_ONCE(src->operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	switch (dst->operation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	case BLKIF_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	case BLKIF_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	case BLKIF_OP_WRITE_BARRIER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	case BLKIF_OP_FLUSH_DISKCACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		dst->u.rw.nr_segments = src->u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		dst->u.rw.handle = src->u.rw.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		dst->u.rw.id = src->u.rw.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		dst->u.rw.sector_number = src->u.rw.sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		if (n > dst->u.rw.nr_segments)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			n = dst->u.rw.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			dst->u.rw.seg[i] = src->u.rw.seg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	case BLKIF_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		dst->u.discard.flag = src->u.discard.flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		dst->u.discard.id = src->u.discard.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		dst->u.discard.sector_number = src->u.discard.sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	case BLKIF_OP_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		dst->u.indirect.handle = src->u.indirect.handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		dst->u.indirect.id = src->u.indirect.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		dst->u.indirect.sector_number = src->u.indirect.sector_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		for (i = 0; i < j; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			dst->u.indirect.indirect_grefs[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				src->u.indirect.indirect_grefs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		 * Don't know how to translate this op. Only get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		 * ID so failure can be reported to the frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		dst->u.other.id = src->u.other.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */