Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Remote processor messaging transport (OMAP platform-specific bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2011 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2011 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Ohad Ben-Cohen <ohad@wizery.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Brian Swetland <swetland@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/of_reserved_mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/remoteproc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/virtio_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "remoteproc_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /* kick the remote processor, and let it know which virtqueue to poke at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static bool rproc_virtio_notify(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct rproc_vring *rvring = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct rproc *rproc = rvring->rvdev->rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	int notifyid = rvring->notifyid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	rproc->ops->kick(rproc, notifyid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * @rproc: handle to the remote processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * @notifyid: index of the signalled virtqueue (unique per this @rproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * This function should be called by the platform-specific rproc driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * when the remote processor signals that a specific virtqueue has pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * messages available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * and otherwise returns IRQ_HANDLED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct rproc_vring *rvring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	rvring = idr_find(&rproc->notifyids, notifyid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	if (!rvring || !rvring->vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return vring_interrupt(0, rvring->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) EXPORT_SYMBOL(rproc_vq_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 				    unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				    void (*callback)(struct virtqueue *vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 				    const char *name, bool ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct rproc *rproc = vdev_to_rproc(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct device *dev = &rproc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct rproc_mem_entry *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct rproc_vring *rvring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int len, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/* we're temporarily limited to two virtqueues per rvdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (id >= ARRAY_SIZE(rvdev->vring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* Search allocated memory region by name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 					  id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!mem || !mem->va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	rvring = &rvdev->vring[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	addr = mem->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	len = rvring->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/* zero vring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	size = vring_size(len, rvring->align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	memset(addr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		id, addr, len, rvring->notifyid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * Create the new vq, and tell virtio we're not interested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 * the 'weak' smp barriers, since we're talking with a real device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 				 addr, rproc_virtio_notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!vq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		rproc_free_vring(rvring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	rvring->vq = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	vq->priv = rvring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* Update vring in resource table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	rsc->vring[id].da = mem->da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct virtqueue *vq, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct rproc_vring *rvring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		rvring = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		rvring->vq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void rproc_virtio_del_vqs(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	__rproc_virtio_del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				 struct virtqueue *vqs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				 vq_callback_t *callbacks[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 				 const char * const names[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 				 const bool * ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 				 struct irq_affinity *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	int i, ret, queue_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	for (i = 0; i < nvqs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (!names[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			vqs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 				    ctx ? ctx[i] : false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		if (IS_ERR(vqs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			ret = PTR_ERR(vqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	__rproc_virtio_del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static u8 rproc_virtio_get_status(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return rsc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	rsc->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	dev_dbg(&vdev->dev, "status: %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void rproc_virtio_reset(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	rsc->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dev_dbg(&vdev->dev, "reset !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* provide the vdev features as retrieved from the firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static u64 rproc_virtio_get_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	return rsc->dfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void rproc_transport_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 * Packed ring isn't enabled on remoteproc for now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * because remoteproc uses vring_new_virtqueue() which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 * creates virtio rings on preallocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int rproc_virtio_finalize_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	/* Give virtio_ring a chance to accept features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	vring_transport_features(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/* Give virtio_rproc a chance to accept features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	rproc_transport_features(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	/* Make sure we don't have any features > 32 bits! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	BUG_ON((u32)vdev->features != vdev->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 * Remember the finalized features of our vdev, and provide it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 * to the remote processor once it is powered on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	rsc->gfeatures = vdev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			     void *buf, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	void *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	cfg = &rsc->vring[rsc->num_of_vrings];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (offset + len > rsc->config_len || offset + len < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	memcpy(buf, cfg + offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			     const void *buf, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct fw_rsc_vdev *rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	void *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	cfg = &rsc->vring[rsc->num_of_vrings];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (offset + len > rsc->config_len || offset + len < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	memcpy(cfg + offset, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static const struct virtio_config_ops rproc_virtio_config_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	.get_features	= rproc_virtio_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	.finalize_features = rproc_virtio_finalize_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	.find_vqs	= rproc_virtio_find_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.del_vqs	= rproc_virtio_del_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.reset		= rproc_virtio_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	.set_status	= rproc_virtio_set_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.get_status	= rproc_virtio_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	.get		= rproc_virtio_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	.set		= rproc_virtio_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * This function is called whenever vdev is released, and is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * to decrement the remote processor's refcount which was taken when vdev was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * Never call this function directly; it will be called by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * core when needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void rproc_virtio_dev_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct virtio_device *vdev = dev_to_virtio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	struct rproc *rproc = vdev_to_rproc(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	kfree(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	kref_put(&rvdev->refcount, rproc_vdev_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	put_device(&rproc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * rproc_add_virtio_dev() - register an rproc-induced virtio device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * @rvdev: the remote vdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * @id: the device type identification (used to match it with a driver).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * This function registers a virtio device. This vdev's partent is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * the rproc device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * Returns 0 on success or an appropriate error value otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct rproc *rproc = rvdev->rproc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct device *dev = &rvdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct rproc_mem_entry *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (rproc->ops->kick == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		dev_err(dev, ".kick method not defined for %s\n", rproc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	/* Try to find dedicated vdev buffer carveout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		phys_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		if (mem->of_resm_idx != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			struct device_node *np = rproc->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			/* Associate reserved memory to vdev device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			ret = of_reserved_mem_device_init_by_idx(dev, np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 								 mem->of_resm_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 				dev_err(dev, "Can't associate reserved memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			if (mem->va) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 				dev_warn(dev, "vdev %d buffer already mapped\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 					 rvdev->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 				pa = rproc_va_to_pa(mem->va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				/* Use dma address as carveout no memmapped yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 				pa = (phys_addr_t)mem->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			/* Associate vdev buffer memory pool to vdev subdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			ret = dma_declare_coherent_memory(dev, pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 							   mem->da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 							   mem->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 				dev_err(dev, "Failed to associate buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		struct device_node *np = rproc->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		 * If we don't have dedicated buffer, just attempt to re-assign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		 * the reserved memory from our parent. A default memory-region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		 * at index 0 from the parent's memory-regions is assigned for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		 * the rvdev dev to allocate from. Failure is non-critical and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		 * the allocations will fall back to global pools, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		 * check return value either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		of_reserved_mem_device_init_by_idx(dev, np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	/* Allocate virtio device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (!vdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	vdev->id.device	= id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	vdev->config = &rproc_virtio_config_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	vdev->dev.parent = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	vdev->dev.release = rproc_virtio_dev_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 * We're indirectly making a non-temporary copy of the rproc pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	 * here, because drivers probed with this vdev will indirectly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	 * access the wrapping rproc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * Therefore we must increment the rproc refcount here, and decrement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 * it _only_ when the vdev is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	get_device(&rproc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	/* Reference the vdev and vring allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	kref_get(&rvdev->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	ret = register_virtio_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		put_device(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		dev_err(dev, "failed to register vdev: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * @dev: the virtio device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * @data: must be null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * This function unregisters an existing virtio device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int rproc_remove_virtio_dev(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct virtio_device *vdev = dev_to_virtio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	unregister_virtio_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }