^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* sunvdc.c: Sun LDOM Virtual Disk Client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/cdrom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/ldc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DRV_MODULE_NAME "sunvdc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PFX DRV_MODULE_NAME ": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DRV_MODULE_VERSION "1.2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DRV_MODULE_RELDATE "November 24, 2014"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) MODULE_VERSION(DRV_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define VDC_TX_RING_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define VDC_DEFAULT_BLK_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define MAX_XFER_BLKS (128 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define WAITING_FOR_LINK_UP 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define WAITING_FOR_TX_SPACE 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define WAITING_FOR_GEN_CMD 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define WAITING_FOR_ANY -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define VDC_MAX_RETRIES 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static struct workqueue_struct *sunvdc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct vdc_req_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct vdc_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct vio_driver_state vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct vdc_completion *cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u64 req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u64 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long ring_cookies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u64 max_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 vdisk_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 drain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u64 ldc_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct delayed_work ldc_reset_timer_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct work_struct ldc_reset_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* The server fills these in for us in the disk attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * ACK packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u64 operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 vdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u8 vdisk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u8 vdisk_mtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u32 vdisk_phys_blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) char disk_name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void vdc_ldc_reset(struct vdc_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void vdc_ldc_reset_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void vdc_ldc_reset_timer_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return container_of(vio, struct vdc_port, vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Ordered from largest major to lowest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static struct vio_version vdc_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { .major = 1, .minor = 2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) { .major = 1, .minor = 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { .major = 1, .minor = 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline int vdc_version_supported(struct vdc_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u16 major, u16 minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return port->vio.ver.major == major && port->vio.ver.minor >= minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define VDCBLK_NAME "vdisk"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int vdc_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PARTITION_SHIFT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return vio_dring_avail(dr, VDC_TX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct gendisk *disk = bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sector_t nsect = get_capacity(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sector_t cylinders = nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) geo->heads = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) geo->sectors = 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) sector_div(cylinders, geo->heads * geo->sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) geo->cylinders = cylinders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) geo->cylinders = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Needed to be able to install inside an ldom from an iso image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned command, unsigned long argument)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) switch (command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) case CDROMMULTISESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pr_debug(PFX "Multisession CDs not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (i = 0; i < sizeof(struct cdrom_multisession); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (put_user(0, (char __user *)(argument + i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) case CDROM_GET_CAPABILITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) disk = bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pr_debug(PFX "ioctl %08x not supported\n", command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static const struct block_device_operations vdc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .getgeo = vdc_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) .ioctl = vdc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) .compat_ioctl = blkdev_compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void vdc_blk_queue_start(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* restart blk queue when ring is half emptied. also called after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * handshake completes, so check for initial handshake before we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * allocated a disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) blk_mq_start_stopped_hw_queues(port->disk->queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (vio->cmp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) (waiting_for == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) vio->cmp->waiting_for == waiting_for)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) vio->cmp->err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) complete(&vio->cmp->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) vio->cmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static void vdc_handshake_complete(struct vio_driver_state *vio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct vdc_port *port = to_vdc_port(vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cancel_delayed_work(&port->ldc_reset_timer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) vdc_blk_queue_start(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int vdc_handle_unknown(struct vdc_port *port, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct vio_msg_tag *pkt = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) printk(KERN_ERR PFX "Resetting connection.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ldc_disconnect(port->vio.lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int vdc_send_attr(struct vio_driver_state *vio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct vdc_port *port = to_vdc_port(vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct vio_disk_attr_info pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) memset(&pkt, 0, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pkt.tag.type = VIO_TYPE_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pkt.tag.stype = VIO_SUBTYPE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pkt.tag.stype_env = VIO_ATTR_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pkt.tag.sid = vio_send_sid(vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pkt.xfer_mode = VIO_DRING_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pkt.vdisk_block_size = port->vdisk_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pkt.max_xfer_size = port->max_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct vdc_port *port = to_vdc_port(vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct vio_disk_attr_info *pkt = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) pkt->tag.stype, pkt->operations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pkt->xfer_mode, pkt->vdisk_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pkt->max_xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) switch (pkt->vdisk_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) case VD_DISK_TYPE_DISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case VD_DISK_TYPE_SLICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) vio->name, pkt->vdisk_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (pkt->vdisk_block_size > port->vdisk_block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) printk(KERN_ERR PFX "%s: BLOCK size increased "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) "%u --> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) vio->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) port->vdisk_block_size, pkt->vdisk_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) port->operations = pkt->operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) port->vdisk_type = pkt->vdisk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (vdc_version_supported(port, 1, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) port->vdisk_size = pkt->vdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) port->vdisk_mtype = pkt->vdisk_mtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (pkt->max_xfer_size < port->max_xfer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) port->max_xfer_size = pkt->max_xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) port->vdisk_block_size = pkt->vdisk_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (vdc_version_supported(port, 1, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) port->vdisk_phys_blksz = pkt->phys_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int err = desc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct vio_disk_desc *desc = vio_dring_entry(dr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct vdc_req_entry *rqe = &port->rq_arr[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (unlikely(desc->hdr.state != VIO_DESC_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) desc->hdr.state = VIO_DESC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dr->cons = vio_dring_next(dr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) req = rqe->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) vdc_end_special(port, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rqe->req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) vdc_blk_queue_start(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int vdc_ack(struct vdc_port *port, void *msgbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct vio_dring_data *pkt = msgbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (unlikely(pkt->dring_ident != dr->ident ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pkt->start_idx != pkt->end_idx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pkt->start_idx >= VDC_TX_RING_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) vdc_end_one(port, dr, pkt->start_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static int vdc_nack(struct vdc_port *port, void *msgbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* XXX Implement me XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void vdc_event(void *arg, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct vdc_port *port = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct vio_driver_state *vio = &port->vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) spin_lock_irqsave(&vio->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (unlikely(event == LDC_EVENT_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) vio_link_state_change(vio, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) queue_work(sunvdc_wq, &port->ldc_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (unlikely(event == LDC_EVENT_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) vio_link_state_change(vio, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (unlikely(event != LDC_EVENT_DATA_READY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pr_warn(PFX "Unexpected LDC event %d\n", event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct vio_msg_tag tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u64 raw[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) } msgbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (err == -ECONNRESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) vio_conn_reset(vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) msgbuf.tag.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) msgbuf.tag.stype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) msgbuf.tag.stype_env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) msgbuf.tag.sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) err = vio_validate_sid(vio, &msgbuf.tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) err = vdc_ack(port, &msgbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) err = vdc_nack(port, &msgbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) err = vdc_handle_unknown(port, &msgbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) err = vio_control_pkt_engine(vio, &msgbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) err = vdc_handle_unknown(port, &msgbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) vdc_finish(&port->vio, err, WAITING_FOR_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_unlock_irqrestore(&vio->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static int __vdc_tx_trigger(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct vio_dring_data hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .tag = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) .type = VIO_TYPE_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .stype = VIO_SUBTYPE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .stype_env = VIO_DRING_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) .sid = vio_send_sid(&port->vio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) .dring_ident = dr->ident,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) .start_idx = dr->prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) .end_idx = dr->prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int err, delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) hdr.seq = dr->snd_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dr->snd_nxt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if ((delay <<= 1) > 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) delay = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (retries++ > VDC_MAX_RETRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) } while (err == -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (err == -ENOTCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) vdc_ldc_reset(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int __send_request(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct vdc_port *port = req->rq_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct scatterlist sg[MAX_RING_COOKIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct vdc_req_entry *rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct vio_disk_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) unsigned int map_perm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int nsg, err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (rq_data_dir(req) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) map_perm |= LDC_MAP_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) op = VD_OP_BREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) map_perm |= LDC_MAP_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) op = VD_OP_BWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sg_init_table(sg, port->ring_cookies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nsg = blk_rq_map_sg(req->q, req, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for (i = 0; i < nsg; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) len += sg[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) desc = vio_dring_cur(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) err = ldc_map_sg(port->vio.lp, sg, nsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) desc->cookies, port->ring_cookies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) map_perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) rqe = &port->rq_arr[dr->prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) rqe->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) desc->hdr.ack = VIO_ACK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) desc->req_id = port->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) desc->operation = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (port->vdisk_type == VD_DISK_TYPE_DISK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) desc->slice = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) desc->slice = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) desc->status = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) desc->size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) desc->ncookies = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* This has to be a non-SMP write barrier because we are writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * to memory which is shared with the peer LDOM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) desc->hdr.state = VIO_DESC_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) err = __vdc_tx_trigger(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) port->req_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dr->prod = vio_dring_next(dr, dr->prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct vdc_port *port = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct vio_dring_state *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) blk_mq_start_request(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) spin_lock_irqsave(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Doing drain, just end the request in error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (unlikely(port->drain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) blk_mq_stop_hw_queue(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (__send_request(bd->rq) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct vio_dring_state *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct vio_completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct vio_disk_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned int map_perm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int op_len, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) void *req_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!(((u64)1 << (u64)op) & port->operations))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) case VD_OP_BREAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) case VD_OP_BWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) case VD_OP_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) op_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) map_perm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) case VD_OP_GET_WCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) op_len = sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) map_perm = LDC_MAP_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) case VD_OP_SET_WCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) op_len = sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) map_perm = LDC_MAP_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) case VD_OP_GET_VTOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) op_len = sizeof(struct vio_disk_vtoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) map_perm = LDC_MAP_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case VD_OP_SET_VTOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) op_len = sizeof(struct vio_disk_vtoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) map_perm = LDC_MAP_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) case VD_OP_GET_DISKGEOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) op_len = sizeof(struct vio_disk_geom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) map_perm = LDC_MAP_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) case VD_OP_SET_DISKGEOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) op_len = sizeof(struct vio_disk_geom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) map_perm = LDC_MAP_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) case VD_OP_SCSICMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) op_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) map_perm = LDC_MAP_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) case VD_OP_GET_DEVID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) op_len = sizeof(struct vio_disk_devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) map_perm = LDC_MAP_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) case VD_OP_GET_EFI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case VD_OP_SET_EFI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) op_len = (op_len + 7) & ~7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) req_buf = kzalloc(op_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!req_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (len > op_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) len = op_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (map_perm & LDC_MAP_R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) memcpy(req_buf, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) spin_lock_irqsave(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* XXX If we want to use this code generically we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * XXX handle TX ring exhaustion etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) desc = vio_dring_cur(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) err = ldc_map_single(port->vio.lp, req_buf, op_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) desc->cookies, port->ring_cookies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) map_perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) kfree(req_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) init_completion(&comp.com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) comp.waiting_for = WAITING_FOR_GEN_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) port->vio.cmp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) desc->hdr.ack = VIO_ACK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) desc->req_id = port->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) desc->operation = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) desc->slice = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) desc->status = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) desc->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) desc->size = op_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) desc->ncookies = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* This has to be a non-SMP write barrier because we are writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * to memory which is shared with the peer LDOM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) desc->hdr.state = VIO_DESC_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) err = __vdc_tx_trigger(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) port->req_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dr->prod = vio_dring_next(dr, dr->prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) wait_for_completion(&comp.com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) err = comp.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) port->vio.cmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) spin_unlock_irqrestore(&port->vio.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (map_perm & LDC_MAP_W)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) memcpy(buf, req_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) kfree(req_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static int vdc_alloc_tx_ring(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned long len, entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int ncookies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) void *dring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) entry_size = sizeof(struct vio_disk_desc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) len = (VDC_TX_RING_SIZE * entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ncookies = VIO_MAX_RING_COOKIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dring = ldc_alloc_exp_dring(port->vio.lp, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dr->cookies, &ncookies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) (LDC_MAP_SHADOW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) LDC_MAP_DIRECT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) LDC_MAP_RW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (IS_ERR(dring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return PTR_ERR(dring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dr->base = dring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dr->entry_size = entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dr->num_entries = VDC_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) dr->prod = dr->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) dr->pending = VDC_TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dr->ncookies = ncookies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static void vdc_free_tx_ring(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (dr->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ldc_free_exp_dring(port->vio.lp, dr->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) (dr->entry_size * dr->num_entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) dr->cookies, dr->ncookies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) dr->base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) dr->entry_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) dr->num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dr->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dr->ncookies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static int vdc_port_up(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct vio_completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) init_completion(&comp.com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) comp.err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) comp.waiting_for = WAITING_FOR_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) port->vio.cmp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) vio_port_up(&port->vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) wait_for_completion(&comp.com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return comp.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void vdc_port_down(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ldc_disconnect(port->vio.lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ldc_unbind(port->vio.lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) vdc_free_tx_ring(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) vio_ldc_free(&port->vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static const struct blk_mq_ops vdc_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .queue_rq = vdc_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static void cleanup_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct vdc_port *port = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) blk_cleanup_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) blk_mq_free_tag_set(&port->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static struct request_queue *init_queue(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) BLK_MQ_F_SHOULD_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (IS_ERR(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) q->queuedata = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static int probe_disk(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct gendisk *g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) err = vdc_port_up(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Using version 1.2 means vdisk_phys_blksz should be set unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * disk is reserved by another system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (vdc_version_supported(port, 1, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* vdisk_size should be set during the handshake, if it wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * then the underlying disk is reserved by another system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (port->vdisk_size == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct vio_disk_geom geom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) err = generic_request(port, VD_OP_GET_DISKGEOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) &geom, sizeof(geom));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) "error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) port->vdisk_size = ((u64)geom.num_cyl *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) (u64)geom.num_hd *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) (u64)geom.num_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) q = init_queue(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (IS_ERR(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) port->vio.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return PTR_ERR(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) g = alloc_disk(1 << PARTITION_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) port->vio.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) cleanup_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) port->disk = g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* Each segment in a request is up to an aligned page in size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) blk_queue_segment_boundary(q, PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) blk_queue_max_segment_size(q, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) blk_queue_max_segments(q, port->ring_cookies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) blk_queue_max_hw_sectors(q, port->max_xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) g->major = vdc_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) strcpy(g->disk_name, port->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) g->fops = &vdc_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) g->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) g->private_data = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) set_capacity(g, port->vdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (vdc_version_supported(port, 1, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) switch (port->vdisk_mtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) case VD_MEDIA_TYPE_CD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) g->flags |= GENHD_FL_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) g->flags |= GENHD_FL_REMOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) set_disk_ro(g, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case VD_MEDIA_TYPE_DVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) pr_info(PFX "Virtual DVD %s\n", port->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) g->flags |= GENHD_FL_CD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) g->flags |= GENHD_FL_REMOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) set_disk_ro(g, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case VD_MEDIA_TYPE_FIXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) g->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) port->vdisk_size, (port->vdisk_size >> (20 - 9)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) port->vio.ver.major, port->vio.ver.minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) device_add_disk(&port->vio.vdev->dev, g, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static struct ldc_channel_config vdc_ldc_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .event = vdc_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .mtu = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) .mode = LDC_MODE_UNRELIABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static struct vio_driver_ops vdc_vio_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) .send_attr = vdc_send_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) .handle_attr = vdc_handle_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) .handshake_complete = vdc_handshake_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void print_version(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static int version_printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (version_printed++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) printk(KERN_INFO "%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct vdc_check_port_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) char *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int vdc_device_probed(struct device *dev, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct vio_dev *vdev = to_vio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct vdc_check_port_data *port_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) port_data = (struct vdc_check_port_data *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if ((vdev->dev_no == port_data->dev_no) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) (!(strcmp((char *)&vdev->type, port_data->type))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dev_get_drvdata(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* This device has already been configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * by vdc_port_probe()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Determine whether the VIO device is part of an mpgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * by locating all the virtual-device-port nodes associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * with the parent virtual-device node for the VIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * and checking whether any of these nodes are vdc-ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * which have already been configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * Returns true if this device is part of an mpgroup and has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * already been probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct vdc_check_port_data port_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) port_data.dev_no = vdev->dev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) port_data.type = (char *)&vdev->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev = device_find_child(vdev->dev.parent, &port_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) vdc_device_probed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct mdesc_handle *hp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct vdc_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) const u64 *ldc_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) print_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) hp = mdesc_grab();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) printk(KERN_ERR PFX "Port id [%llu] too large.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) vdev->dev_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto err_out_release_mdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* Check if this device is part of an mpgroup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (vdc_port_mpgroup_check(vdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) "VIO: Ignoring extra vdisk port %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dev_name(&vdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) goto err_out_release_mdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) port = kzalloc(sizeof(*port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) goto err_out_release_mdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (vdev->dev_no >= 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) snprintf(port->disk_name, sizeof(port->disk_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) VDCBLK_NAME "%c%c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 'a' + ((int)vdev->dev_no / 26) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 'a' + ((int)vdev->dev_no % 26));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) snprintf(port->disk_name, sizeof(port->disk_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) port->vdisk_size = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Actual wall time may be double due to do_generic_file_read() doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * a readahead I/O first, and once that fails it will try to read a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * single page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) vdc_versions, ARRAY_SIZE(vdc_versions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) &vdc_vio_ops, port->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto err_out_free_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) port->max_xfer_size = MAX_XFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) port->ring_cookies = MAX_RING_COOKIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) goto err_out_free_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) err = vdc_alloc_tx_ring(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto err_out_free_ldc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) err = probe_disk(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) goto err_out_free_tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Note that the device driver_data is used to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * whether the port has been probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_set_drvdata(&vdev->dev, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) mdesc_release(hp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) err_out_free_tx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) vdc_free_tx_ring(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) err_out_free_ldc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) vio_ldc_free(&port->vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) err_out_free_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) err_out_release_mdesc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) mdesc_release(hp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int vdc_port_remove(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct vdc_port *port = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) blk_mq_stop_hw_queues(port->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) flush_work(&port->ldc_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) cancel_delayed_work_sync(&port->ldc_reset_timer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) del_timer_sync(&port->vio.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) del_gendisk(port->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) cleanup_queue(port->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) put_disk(port->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) port->disk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) vdc_free_tx_ring(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) vio_ldc_free(&port->vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_set_drvdata(&vdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static void vdc_requeue_inflight(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct vdc_req_entry *rqe = &port->rq_arr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) desc->hdr.state = VIO_DESC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) dr->cons = vio_dring_next(dr, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) req = rqe->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) vdc_end_special(port, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) rqe->req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) blk_mq_requeue_request(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static void vdc_queue_drain(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct request_queue *q = port->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * Mark the queue as draining, then freeze/quiesce to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * that all existing requests are seen in ->queue_rq() and killed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) port->drain = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) spin_unlock_irq(&port->vio.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) blk_mq_quiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) spin_lock_irq(&port->vio.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) port->drain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) blk_mq_unquiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static void vdc_ldc_reset_timer_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct vdc_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct vio_driver_state *vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) vio = &port->vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) spin_lock_irq(&vio->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) port->disk_name, port->ldc_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) vdc_queue_drain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) vdc_blk_queue_start(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) spin_unlock_irq(&vio->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void vdc_ldc_reset_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct vdc_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct vio_driver_state *vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) port = container_of(work, struct vdc_port, ldc_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) vio = &port->vio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) spin_lock_irqsave(&vio->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) vdc_ldc_reset(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) spin_unlock_irqrestore(&vio->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static void vdc_ldc_reset(struct vdc_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) assert_spin_locked(&port->vio.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pr_warn(PFX "%s ldc link reset\n", port->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) blk_mq_stop_hw_queues(port->disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) vdc_requeue_inflight(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) vdc_port_down(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err = vdc_alloc_tx_ring(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) goto err_free_ldc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (port->ldc_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) round_jiffies(jiffies + HZ * port->ldc_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) err_free_ldc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) vio_ldc_free(&port->vio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static const struct vio_device_id vdc_port_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .type = "vdc-port",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) MODULE_DEVICE_TABLE(vio, vdc_port_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static struct vio_driver vdc_port_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) .id_table = vdc_port_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .probe = vdc_port_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) .remove = vdc_port_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) .name = "vdc_port",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static int __init vdc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (!sunvdc_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) err = register_blkdev(0, VDCBLK_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) goto out_free_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) vdc_major = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) err = vio_register_driver(&vdc_port_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) goto out_unregister_blkdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) out_unregister_blkdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) unregister_blkdev(vdc_major, VDCBLK_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) vdc_major = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) out_free_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) destroy_workqueue(sunvdc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void __exit vdc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) vio_unregister_driver(&vdc_port_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) unregister_blkdev(vdc_major, VDCBLK_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) destroy_workqueue(sunvdc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) module_init(vdc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) module_exit(vdc_exit);