^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2018-2020 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2020 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Tiwei Bie <tiwei.bie@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Jason Wang <jasowang@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Thanks Michael S. Tsirkin for the valuable comments and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * their supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/vdpa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/vhost.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/virtio_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "vhost.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) VHOST_VDPA_BACKEND_FEATURES =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct vhost_vdpa {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct vhost_dev vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct vhost_virtqueue *vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct vdpa_device *vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct cdev cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) atomic_t opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int nvqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int virtio_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct eventfd_ctx *config_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int in_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct vdpa_iova_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static DEFINE_IDA(vhost_vdpa_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static dev_t vhost_vdpa_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void handle_vq_kick(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) poll.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) const struct vdpa_config_ops *ops = v->vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ops->kick_vq(v->vdpa, vq - v->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct vhost_virtqueue *vq = private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (call_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) eventfd_signal(call_ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static irqreturn_t vhost_vdpa_config_cb(void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct vhost_vdpa *v = private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct eventfd_ctx *config_ctx = v->config_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (config_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) eventfd_signal(config_ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct vhost_virtqueue *vq = &v->vqs[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) const struct vdpa_config_ops *ops = v->vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!ops->get_vq_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) irq = ops->get_vq_irq(vdpa, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) irq_bypass_unregister_producer(&vq->call_ctx.producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!vq->call_ctx.ctx || irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) vq->call_ctx.producer.token = vq->call_ctx.ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) vq->call_ctx.producer.irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = irq_bypass_register_producer(&vq->call_ctx.producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) qid, vq->call_ctx.producer.token, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct vhost_virtqueue *vq = &v->vqs[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) irq_bypass_unregister_producer(&vq->call_ctx.producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void vhost_vdpa_reset(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) vdpa_reset(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) v->in_batch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) device_id = ops->get_device_id(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (copy_to_user(argp, &device_id, sizeof(device_id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) status = ops->get_status(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (copy_to_user(statusp, &status, sizeof(status)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u8 status, status_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int nvqs = v->nvqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (copy_from_user(&status, statusp, sizeof(status)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) status_old = ops->get_status(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Userspace shouldn't remove status bits unless reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * status to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ops->set_status(vdpa, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) for (i = 0; i < nvqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) vhost_vdpa_setup_vq_irq(v, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (i = 0; i < nvqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vhost_vdpa_unsetup_vq_irq(v, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct vhost_vdpa_config *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) long size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) switch (v->virtio_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case VIRTIO_ID_NET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) size = sizeof(struct virtio_net_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (c->len == 0 || c->off > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (c->len > size - c->off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static long vhost_vdpa_get_config(struct vhost_vdpa *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct vhost_vdpa_config __user *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct vhost_vdpa_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long size = offsetof(struct vhost_vdpa_config, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (copy_from_user(&config, c, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (vhost_vdpa_config_validate(v, &config))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) buf = kvzalloc(config.len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) vdpa_get_config(vdpa, config.off, buf, config.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (copy_to_user(c->buf, buf, config.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static long vhost_vdpa_set_config(struct vhost_vdpa *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct vhost_vdpa_config __user *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct vhost_vdpa_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned long size = offsetof(struct vhost_vdpa_config, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (copy_from_user(&config, c, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (vhost_vdpa_config_validate(v, &config))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) buf = kvzalloc(config.len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (copy_from_user(buf, c->buf, config.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ops->set_config(vdpa, config.off, buf, config.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) features = ops->get_features(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (copy_to_user(featurep, &features, sizeof(features)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * It's not allowed to change the features after they have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * been negotiated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (copy_from_user(&features, featurep, sizeof(features)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (vdpa_set_features(vdpa, features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u16 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) num = ops->get_vq_num_max(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (copy_to_user(argp, &num, sizeof(num)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void vhost_vdpa_config_put(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (v->config_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) eventfd_ctx_put(v->config_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) v->config_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct vdpa_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct eventfd_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) cb.callback = vhost_vdpa_config_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cb.private = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (copy_from_user(&fd, argp, sizeof(fd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) swap(ctx, v->config_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!IS_ERR_OR_NULL(ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) eventfd_ctx_put(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (IS_ERR(v->config_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) long ret = PTR_ERR(v->config_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) v->config_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) v->vdpa->config->set_config_cb(v->vdpa, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct vhost_vdpa_iova_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .first = v->range.first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .last = v->range.last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (copy_to_user(argp, &range, sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct vdpa_vq_state vq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct vdpa_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct vhost_vring_state s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) long r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) r = get_user(idx, (u32 __user *)argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (idx >= v->nvqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) idx = array_index_nospec(idx, v->nvqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) vq = &v->vqs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) case VHOST_VDPA_SET_VRING_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (copy_from_user(&s, argp, sizeof(s)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ops->set_vq_ready(vdpa, idx, s.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case VHOST_GET_VRING_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) r = ops->get_vq_state(v->vdpa, idx, &vq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) vq->last_avail_idx = vq_state.avail_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) r = vhost_vring_ioctl(&v->vdev, cmd, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) case VHOST_SET_VRING_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (ops->set_vq_address(vdpa, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (u64)(uintptr_t)vq->desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) (u64)(uintptr_t)vq->avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (u64)(uintptr_t)vq->used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case VHOST_SET_VRING_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) vq_state.avail_index = vq->last_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ops->set_vq_state(vdpa, idx, &vq_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) case VHOST_SET_VRING_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (vq->call_ctx.ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) cb.callback = vhost_vdpa_virtqueue_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cb.private = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) cb.callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cb.private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ops->set_vq_cb(vdpa, idx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) vhost_vdpa_setup_vq_irq(v, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) case VHOST_SET_VRING_NUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ops->set_vq_num(vdpa, idx, vq->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static long vhost_vdpa_unlocked_ioctl(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct vhost_vdpa *v = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct vhost_dev *d = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u64 __user *featurep = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) long r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (cmd == VHOST_SET_BACKEND_FEATURES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (copy_from_user(&features, featurep, sizeof(features)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (features & ~VHOST_VDPA_BACKEND_FEATURES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) vhost_set_backend_features(&v->vdev, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mutex_lock(&d->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) case VHOST_VDPA_GET_DEVICE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) r = vhost_vdpa_get_device_id(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) case VHOST_VDPA_GET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) r = vhost_vdpa_get_status(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) case VHOST_VDPA_SET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) r = vhost_vdpa_set_status(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) case VHOST_VDPA_GET_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) r = vhost_vdpa_get_config(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) case VHOST_VDPA_SET_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) r = vhost_vdpa_set_config(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case VHOST_GET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) r = vhost_vdpa_get_features(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) case VHOST_SET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) r = vhost_vdpa_set_features(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case VHOST_VDPA_GET_VRING_NUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) r = vhost_vdpa_get_vring_num(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) case VHOST_SET_LOG_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) case VHOST_SET_LOG_FD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) r = -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) case VHOST_VDPA_SET_CONFIG_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) r = vhost_vdpa_set_config_call(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case VHOST_GET_BACKEND_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) features = VHOST_VDPA_BACKEND_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (copy_to_user(featurep, &features, sizeof(features)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case VHOST_VDPA_GET_IOVA_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) r = vhost_vdpa_get_iova_range(v, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) r = vhost_dev_ioctl(&v->vdev, cmd, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (r == -ENOIOCTLCMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) r = vhost_vdpa_vring_ioctl(v, cmd, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) mutex_unlock(&d->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct vhost_dev *dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct vhost_iotlb *iotlb = dev->iotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct vhost_iotlb_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) unsigned long pfn, pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) pinned = map->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) for (pfn = map->addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pinned > 0; pfn++, pinned--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (map->perm & VHOST_ACCESS_WO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) set_page_dirty_lock(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unpin_user_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) vhost_iotlb_map_free(iotlb, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct vhost_dev *dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) kfree(dev->iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev->iotlb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int perm_to_iommu_flags(u32 perm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) switch (perm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) case VHOST_ACCESS_WO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) flags |= IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) case VHOST_ACCESS_RO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) flags |= IOMMU_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) case VHOST_ACCESS_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) flags |= (IOMMU_WRITE | IOMMU_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) WARN(1, "invalidate vhost IOTLB permission\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return flags | IOMMU_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int vhost_vdpa_map(struct vhost_vdpa *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) u64 iova, u64 size, u64 pa, u32 perm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct vhost_dev *dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) pa, perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ops->dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) r = ops->dma_map(vdpa, iova, size, pa, perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else if (ops->set_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!v->in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) r = ops->set_map(vdpa, dev->iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) r = iommu_map(v->domain, iova, pa, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) perm_to_iommu_flags(perm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct vhost_dev *dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (ops->dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ops->dma_unmap(vdpa, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) } else if (ops->set_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!v->in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ops->set_map(vdpa, dev->iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) iommu_unmap(v->domain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct vhost_iotlb_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct vhost_dev *dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct vhost_iotlb *iotlb = dev->iotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct page **page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned int gup_flags = FOLL_LONGTERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned long npages, cur_base, map_pfn, last_pfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long lock_limit, sz2pin, nchunks, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u64 iova = msg->iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) long pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (msg->iova < v->range.first || !msg->size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) msg->iova > U64_MAX - msg->size + 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) msg->iova + msg->size - 1 > v->range.last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (vhost_iotlb_itree_first(iotlb, msg->iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) msg->iova + msg->size - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Limit the use of memory for bookkeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) page_list = (struct page **) __get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!page_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (msg->perm & VHOST_ACCESS_WO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) gup_flags |= FOLL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mmap_read_lock(dev->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) cur_base = msg->uaddr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) iova &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) nchunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) while (npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) sz2pin = min_t(unsigned long, npages, list_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pinned = pin_user_pages(cur_base, sz2pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) gup_flags, page_list, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (sz2pin != pinned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (pinned < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ret = pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) unpin_user_pages(page_list, pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) nchunks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!last_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) map_pfn = page_to_pfn(page_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < pinned; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned long this_pfn = page_to_pfn(page_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u64 csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (last_pfn && (this_pfn != last_pfn + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Pin a contiguous chunk of memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = vhost_vdpa_map(v, iova, csize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) map_pfn << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) msg->perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * Unpin the pages that are left unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * from this point on in the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * page_list. The remaining outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * ones which may stride across several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * chunks will be covered in the common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * error path subsequently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) unpin_user_pages(&page_list[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) pinned - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) map_pfn = this_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) iova += csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) nchunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) last_pfn = this_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cur_base += pinned << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) npages -= pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Pin the rest chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) map_pfn << PAGE_SHIFT, msg->perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (nchunks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * Unpin the outstanding pages which are yet to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * mapped but haven't due to vdpa_map() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * pin_user_pages() failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Mapped pages are accounted in vdpa_map(), hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * the corresponding unpinning will be handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * vdpa_unmap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) WARN_ON(!last_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) for (pfn = map_pfn; pfn <= last_pfn; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unpin_user_page(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) vhost_vdpa_unmap(v, msg->iova, msg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) mmap_read_unlock(dev->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) free_page((unsigned long)page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct vhost_iotlb_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mutex_lock(&dev->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) r = vhost_dev_check_owner(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) switch (msg->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) case VHOST_IOTLB_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) r = vhost_vdpa_process_iotlb_update(v, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) case VHOST_IOTLB_INVALIDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) vhost_vdpa_unmap(v, msg->iova, msg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) case VHOST_IOTLB_BATCH_BEGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) v->in_batch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) case VHOST_IOTLB_BATCH_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (v->in_batch && ops->set_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ops->set_map(vdpa, dev->iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) v->in_batch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mutex_unlock(&dev->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct vhost_vdpa *v = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct vhost_dev *dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return vhost_chr_write_iter(dev, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct device *dma_dev = vdpa_get_dma_dev(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct bus_type *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Device want to do DMA by itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (ops->set_map || ops->dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) bus = dma_dev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) v->domain = iommu_domain_alloc(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!v->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret = iommu_attach_device(v->domain, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) goto err_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) err_attach:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) iommu_domain_free(v->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct device *dma_dev = vdpa_get_dma_dev(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (v->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) iommu_detach_device(v->domain, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) iommu_domain_free(v->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) v->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct vdpa_iova_range *range = &v->range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct iommu_domain_geometry geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (ops->get_iova_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *range = ops->get_iova_range(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) } else if (v->domain &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) !iommu_domain_get_attr(v->domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) DOMAIN_ATTR_GEOMETRY, &geo) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) geo.force_aperture) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) range->first = geo.aperture_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) range->last = geo.aperture_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) range->first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) range->last = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int vhost_vdpa_open(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct vhost_vdpa *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct vhost_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct vhost_virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int nvqs, i, r, opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) opened = atomic_cmpxchg(&v->opened, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) nvqs = v->nvqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) vhost_vdpa_reset(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!vqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dev = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) for (i = 0; i < nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) vqs[i] = &v->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) vqs[i]->handle_kick = handle_vq_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) vhost_vdpa_process_iotlb_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dev->iotlb = vhost_iotlb_alloc(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!dev->iotlb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto err_init_iotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) r = vhost_vdpa_alloc_domain(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto err_init_iotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) vhost_vdpa_set_iova_range(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) filep->private_data = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) err_init_iotlb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) vhost_dev_cleanup(&v->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) atomic_dec(&v->opened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) for (i = 0; i < v->nvqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) vhost_vdpa_unsetup_vq_irq(v, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static int vhost_vdpa_release(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct vhost_vdpa *v = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct vhost_dev *d = &v->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) mutex_lock(&d->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) filep->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) vhost_vdpa_reset(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) vhost_dev_stop(&v->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) vhost_vdpa_iotlb_free(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) vhost_vdpa_free_domain(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) vhost_vdpa_config_put(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) vhost_vdpa_clean_irq(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) vhost_dev_cleanup(&v->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) kfree(v->vdev.vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) mutex_unlock(&d->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) atomic_dec(&v->opened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) complete(&v->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct vdpa_notification_area notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) u16 index = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) notify = ops->get_vq_notification(vdpa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) notify.addr >> PAGE_SHIFT, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) vma->vm_page_prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static const struct vm_operations_struct vhost_vdpa_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .fault = vhost_vdpa_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct vhost_vdpa *v = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct vdpa_device *vdpa = v->vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct vdpa_notification_area notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned long index = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (vma->vm_end - vma->vm_start != PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if ((vma->vm_flags & VM_SHARED) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (vma->vm_flags & VM_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (index > 65535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (!ops->get_vq_notification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* To be safe and easily modelled by userspace, We only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * support the doorbell which sits on the page boundary and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * does not share the page with other registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) notify = ops->get_vq_notification(vdpa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (notify.addr & (PAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (vma->vm_end - vma->vm_start != notify.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) vma->vm_ops = &vhost_vdpa_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static const struct file_operations vhost_vdpa_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .open = vhost_vdpa_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .release = vhost_vdpa_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) .write_iter = vhost_vdpa_chr_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .mmap = vhost_vdpa_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void vhost_vdpa_release_dev(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct vhost_vdpa *v =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) container_of(device, struct vhost_vdpa, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ida_simple_remove(&vhost_vdpa_ida, v->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) kfree(v->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) kfree(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static int vhost_vdpa_probe(struct vdpa_device *vdpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) const struct vdpa_config_ops *ops = vdpa->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct vhost_vdpa *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* Currently, we only accept the network devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (!v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) minor = ida_simple_get(&vhost_vdpa_ida, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) VHOST_VDPA_DEV_MAX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (minor < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kfree(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) atomic_set(&v->opened, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) v->minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) v->vdpa = vdpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) v->nvqs = vdpa->nvqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) v->virtio_id = ops->get_device_id(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) device_initialize(&v->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) v->dev.release = vhost_vdpa_release_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) v->dev.parent = &vdpa->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (!v->vqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) cdev_init(&v->cdev, &vhost_vdpa_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) v->cdev.owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) r = cdev_device_add(&v->cdev, &v->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) init_completion(&v->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) vdpa_set_drvdata(vdpa, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) put_device(&v->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void vhost_vdpa_remove(struct vdpa_device *vdpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) cdev_device_del(&v->cdev, &v->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) opened = atomic_cmpxchg(&v->opened, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) wait_for_completion(&v->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) put_device(&v->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static struct vdpa_driver vhost_vdpa_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .name = "vhost_vdpa",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .probe = vhost_vdpa_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .remove = vhost_vdpa_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int __init vhost_vdpa_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) "vhost-vdpa");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) goto err_alloc_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) r = vdpa_register_driver(&vhost_vdpa_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) goto err_vdpa_register_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) err_vdpa_register_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err_alloc_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) module_init(vhost_vdpa_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static void __exit vhost_vdpa_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) vdpa_unregister_driver(&vhost_vdpa_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) module_exit(vhost_vdpa_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) MODULE_VERSION("0.0.1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");