^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Helpers for the host side of a virtio ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Since these may be in userspace, we use (inline) accessors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/vringh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/vhost_iotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <uapi/linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static DEFINE_RATELIMIT_STATE(vringh_rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) DEFAULT_RATELIMIT_INTERVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DEFAULT_RATELIMIT_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (__ratelimit(&vringh_rs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) printk(KERN_NOTICE "vringh:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) vprintk(fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Returns vring->num if empty, -ve on error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline int __vringh_get_head(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int (*getu16)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u16 *val, const __virtio16 *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u16 *last_avail_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u16 avail_idx, i, head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) vringh_bad("Failed to access avail idx at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) &vrh->vring.avail->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (*last_avail_idx == avail_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return vrh->vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Only get avail ring entries after they have been exposed by guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) virtio_rmb(vrh->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) i = *last_avail_idx & (vrh->vring.num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) vringh_bad("Failed to read head: idx %d address %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *last_avail_idx, &vrh->vring.avail->ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (head >= vrh->vring.num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) vringh_bad("Guest says index %u > %u is available",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) head, vrh->vring.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) (*last_avail_idx)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Copy some bytes to/from the iovec. Returns num copied. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct vringh_kiov *iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void *ptr, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int (*xfer)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void *addr, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) size_t len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int err, done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) while (len && iov->i < iov->used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) size_t partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) partlen = min(iov->iov[iov->i].iov_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) done += partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) len -= partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ptr += partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) iov->consumed += partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) iov->iov[iov->i].iov_len -= partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) iov->iov[iov->i].iov_base += partlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!iov->iov[iov->i].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Fix up old iov element then increment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) iov->iov[iov->i].iov_len = iov->consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) iov->iov[iov->i].iov_base -= iov->consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) iov->consumed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) iov->i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* May reduce *len if range is shorter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct vringh_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bool (*getrange)(struct vringh *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u64, struct vringh_range *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (addr < range->start || addr > range->end_incl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!getrange(vrh, addr, range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) BUG_ON(addr < range->start || addr > range->end_incl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* To end of memory? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (unlikely(addr + *len == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (range->end_incl == -1ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Otherwise, don't wrap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (addr + *len < addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) vringh_bad("Wrapping descriptor %zu@0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *len, (unsigned long long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (unlikely(addr + *len - 1 > range->end_incl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) truncate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *len = range->end_incl + 1 - addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct vringh_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bool (*getrange)(struct vringh *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u64, struct vringh_range *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* No reason for this code to be inline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int move_to_indirect(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int *up_next, u16 *i, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const struct vring_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct vring_desc **descs, int *desc_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Indirect tables can't have indirect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (*up_next != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) len = vringh32_to_cpu(vrh, desc->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (unlikely(len % sizeof(struct vring_desc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) vringh_bad("Strange indirect len %u", desc->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* We will check this when we follow it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *up_next = vringh16_to_cpu(vrh, desc->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *up_next = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *descs = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *desc_max = len / sizeof(struct vring_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Now, start at the first indirect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct kvec *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (new_num < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) new_num = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) memcpy(new, iov->iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) iov->max_num * sizeof(struct iovec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) flag = VRINGH_IOV_ALLOCATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) iov->iov = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) iov->max_num = (new_num | flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct vring_desc **descs, int *desc_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u16 i = *up_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *up_next = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *descs = vrh->vring.desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *desc_max = vrh->vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int slow_copy(struct vringh *vrh, void *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct vringh_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bool (*getrange)(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct vringh_range *)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool (*getrange)(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u64 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct vringh_range *r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct vringh_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int (*copy)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void *dst, const void *src, size_t len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) size_t part, len = sizeof(struct vring_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) part = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) addr = (u64)(unsigned long)src - range->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!rcheck(vrh, addr, &part, range, getrange))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) err = copy(vrh, dst, src, part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dst += part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) src += part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) len -= part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) __vringh_iov(struct vringh *vrh, u16 i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct vringh_kiov *riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct vringh_kiov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct vringh_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bool (*getrange)(struct vringh *, u64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct vringh_range *)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) bool (*getrange)(struct vringh *, u64, struct vringh_range *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int (*copy)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void *dst, const void *src, size_t len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int err, count = 0, up_next, desc_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct vring_desc desc, *descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct vringh_range range = { -1ULL, 0 }, slowrange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bool slow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* We start traversing vring's descriptor table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) descs = vrh->vring.desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) desc_max = vrh->vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) up_next = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* You must want something! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (WARN_ON(!riov && !wiov))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (riov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) riov->i = riov->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (wiov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) wiov->i = wiov->used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct vringh_kiov *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (unlikely(slow))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) &slowrange, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) err = copy(vrh, &desc, &descs[i], sizeof(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (unlikely(desc.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u64 a = vringh64_to_cpu(vrh, desc.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Make sure it's OK, and get offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) len = vringh32_to_cpu(vrh, desc.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!rcheck(vrh, a, &len, &range, getrange)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) slow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* We need to save this range to use offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) slowrange = range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) addr = (void *)(long)(a + range.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) &descs, &desc_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (count++ == vrh->vring.num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) vringh_bad("Descriptor loop in %p", descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) err = -ELOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) iov = wiov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) iov = riov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (unlikely(wiov && wiov->used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) vringh_bad("Readable desc %p after writable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) &descs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!iov) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) vringh_bad("Unexpected %s desc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) !wiov ? "writable" : "readable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) err = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Make sure it's OK, and get offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) len = vringh32_to_cpu(vrh, desc.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) getrange)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) range.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) err = resize_iovec(iov, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) iov->iov[iov->used].iov_base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) iov->iov[iov->used].iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) iov->used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) desc.len = cpu_to_vringh32(vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) vringh32_to_cpu(vrh, desc.len) - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) desc.addr = cpu_to_vringh64(vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) vringh64_to_cpu(vrh, desc.addr) + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) i = vringh16_to_cpu(vrh, desc.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Just in case we need to finish traversing above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (unlikely(up_next > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) i = return_from_indirect(vrh, &up_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) &descs, &desc_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) slow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (i >= desc_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) vringh_bad("Chained index %u > %u", i, desc_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline int __vringh_complete(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) const struct vring_used_elem *used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned int num_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int (*putu16)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __virtio16 *p, u16 val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int (*putused)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct vring_used_elem *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) const struct vring_used_elem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *src, unsigned num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct vring_used *used_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u16 used_idx, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) used_ring = vrh->vring.used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) used_idx = vrh->last_used_idx + vrh->completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) off = used_idx % vrh->vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Compiler knows num_used == 1 sometimes, hence extra check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u16 part = vrh->vring.num - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = putused(vrh, &used_ring->ring[off], used, part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) err = putused(vrh, &used_ring->ring[0], used + part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) num_used - part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) err = putused(vrh, &used_ring->ring[off], used, num_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) vringh_bad("Failed to write %u used entries %u at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) num_used, off, &used_ring->ring[off]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Make sure buffer is written before we update index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) virtio_wmb(vrh->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) vringh_bad("Failed to update used index at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) &vrh->vring.used->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) vrh->completed += num_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static inline int __vringh_need_notify(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int (*getu16)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u16 *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) const __virtio16 *p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) bool notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u16 used_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Flush out used index update. This is paired with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * barrier that the Guest executes when enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) virtio_mb(vrh->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* Old-style, without event indices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!vrh->event_indices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) err = getu16(vrh, &flags, &vrh->vring.avail->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) vringh_bad("Failed to get flags at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) &vrh->vring.avail->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Modern: we know when other side wants to know. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) vringh_bad("Failed to get used event idx at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) &vring_used_event(&vrh->vring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* Just in case we added so many that we wrap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (unlikely(vrh->completed > 0xffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) notify = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) notify = vring_need_event(used_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) vrh->last_used_idx + vrh->completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) vrh->last_used_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) vrh->last_used_idx += vrh->completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) vrh->completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static inline bool __vringh_notify_enable(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int (*getu16)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u16 *val, const __virtio16 *p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int (*putu16)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) __virtio16 *p, u16 val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u16 avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (!vrh->event_indices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /* Old-school; update flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) vringh_bad("Clearing used flags %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) &vrh->vring.used->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (putu16(vrh, &vring_avail_event(&vrh->vring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) vrh->last_avail_idx) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) vringh_bad("Updating avail event index %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) &vring_avail_event(&vrh->vring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* They could have slipped one in as we were doing that: make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * sure it's written, then check again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) virtio_mb(vrh->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) vringh_bad("Failed to check avail idx at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) &vrh->vring.avail->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* This is unlikely, so we just leave notifications enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * (if we're using event_indices, we'll only get one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * notification anyway). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return avail == vrh->last_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline void __vringh_notify_disable(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int (*putu16)(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) __virtio16 *p, u16 val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!vrh->event_indices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Old-school; update flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (putu16(vrh, &vrh->vring.used->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) VRING_USED_F_NO_NOTIFY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) vringh_bad("Setting used flags %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) &vrh->vring.used->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* Userspace access helpers: in this case, addresses are really userspace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) __virtio16 v = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int rc = get_user(v, (__force __virtio16 __user *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) *val = vringh16_to_cpu(vrh, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) __virtio16 v = cpu_to_vringh16(vrh, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return put_user(v, (__force __virtio16 __user *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static inline int copydesc_user(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) void *dst, const void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return copy_from_user(dst, (__force void __user *)src, len) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static inline int putused_user(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct vring_used_elem *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) const struct vring_used_elem *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return copy_to_user((__force void __user *)dst, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) sizeof(*dst) * num) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static inline int xfer_from_user(const struct vringh *vrh, void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void *dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return copy_from_user(dst, (__force void __user *)src, len) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static inline int xfer_to_user(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) void *dst, void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return copy_to_user((__force void __user *)dst, src, len) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * vringh_init_user - initialize a vringh for a userspace vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * @vrh: the vringh to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @features: the feature bits for this ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @num: the number of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @weak_barriers: true if we only need memory barriers, not I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * @desc: the userpace descriptor pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * @avail: the userpace avail pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * @used: the userpace used pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Returns an error if num is invalid: you should check pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * yourself!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int vringh_init_user(struct vringh *vrh, u64 features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned int num, bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) vring_desc_t __user *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) vring_avail_t __user *avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) vring_used_t __user *used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Sane power of 2 please! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!num || num > 0xffff || (num & (num - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) vringh_bad("Bad ring size %u", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) vrh->weak_barriers = weak_barriers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) vrh->completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) vrh->last_avail_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) vrh->last_used_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) vrh->vring.num = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* vring expects kernel addresses, but only used via accessors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) vrh->vring.desc = (__force struct vring_desc *)desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) vrh->vring.avail = (__force struct vring_avail *)avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) vrh->vring.used = (__force struct vring_used *)used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) EXPORT_SYMBOL(vringh_init_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * vringh_getdesc_user - get next available descriptor from userspace ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * @vrh: the userspace vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * @riov: where to put the readable descriptors (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * @wiov: where to put the writable descriptors (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * @getrange: function to call to check ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * @head: head index we received, for passing to vringh_complete_user().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Returns 0 if there was no descriptor, 1 if there was, or -errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Note that on error return, you can tell the difference between an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * invalid ring and a single invalid descriptor: in the former case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * *head will be vrh->vring.num. You may be able to ignore an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * descriptor, but there's not much you can do with an invalid ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Note that you may need to clean up riov and wiov, even on error!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int vringh_getdesc_user(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct vringh_iov *riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct vringh_iov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bool (*getrange)(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u64 addr, struct vringh_range *r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u16 *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *head = vrh->vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Empty... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (err == vrh->vring.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* We need the layouts to be the identical for this to work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) offsetof(struct vringh_iov, iov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) offsetof(struct vringh_iov, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) offsetof(struct vringh_iov, used));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) offsetof(struct vringh_iov, max_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) offsetof(struct kvec, iov_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) offsetof(struct kvec, iov_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) != sizeof(((struct kvec *)NULL)->iov_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) != sizeof(((struct kvec *)NULL)->iov_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) *head = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) (struct vringh_kiov *)wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) range_check, getrange, GFP_KERNEL, copydesc_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) EXPORT_SYMBOL(vringh_getdesc_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * vringh_iov_pull_user - copy bytes from vring_iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @dst: the place to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @len: the maximum length to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * Returns the bytes copied <= len or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dst, len, xfer_from_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) EXPORT_SYMBOL(vringh_iov_pull_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * vringh_iov_push_user - copy bytes into vring_iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @src: the place to copy from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @len: the maximum length to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * Returns the bytes copied <= len or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) const void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) (void *)src, len, xfer_to_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) EXPORT_SYMBOL(vringh_iov_push_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * vringh_abandon_user - we've decided not to handle the descriptor(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * @num: the number of descriptors to put back (ie. num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * vringh_get_user() to undo).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * The next vringh_get_user() will return the old descriptor(s) again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) void vringh_abandon_user(struct vringh *vrh, unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* We only update vring_avail_event(vr) when we want to be notified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * so we haven't changed that yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) vrh->last_avail_idx -= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) EXPORT_SYMBOL(vringh_abandon_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * vringh_complete_user - we've finished with descriptor, publish it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * @head: the head as filled in by vringh_getdesc_user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @len: the length of data we have written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * You should check vringh_need_notify_user() after one or more calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct vring_used_elem used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) used.id = cpu_to_vringh32(vrh, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) used.len = cpu_to_vringh32(vrh, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) EXPORT_SYMBOL(vringh_complete_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * vringh_complete_multi_user - we've finished with many descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * @used: the head, length pairs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * @num_used: the number of used elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * You should check vringh_need_notify_user() after one or more calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int vringh_complete_multi_user(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) const struct vring_used_elem used[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) unsigned num_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return __vringh_complete(vrh, used, num_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) putu16_user, putused_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) EXPORT_SYMBOL(vringh_complete_multi_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * vringh_notify_enable_user - we want to know if something changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * This always enables notifications, but returns false if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * now more buffers available in the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) bool vringh_notify_enable_user(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return __vringh_notify_enable(vrh, getu16_user, putu16_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) EXPORT_SYMBOL(vringh_notify_enable_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * vringh_notify_disable_user - don't tell us if something changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * This is our normal running state: we disable and then only enable when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * we're going to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) void vringh_notify_disable_user(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) __vringh_notify_disable(vrh, putu16_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) EXPORT_SYMBOL(vringh_notify_disable_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * vringh_need_notify_user - must we tell the other side about used buffers?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * @vrh: the vring we've called vringh_complete_user() on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int vringh_need_notify_user(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return __vringh_need_notify(vrh, getu16_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) EXPORT_SYMBOL(vringh_need_notify_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Kernelspace access helpers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static inline int getu16_kern(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u16 *val, const __virtio16 *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static inline int copydesc_kern(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) void *dst, const void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static inline int putused_kern(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct vring_used_elem *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) const struct vring_used_elem *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) memcpy(dst, src, num * sizeof(*dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static inline int xfer_kern(const struct vringh *vrh, void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) void *dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static inline int kern_xfer(const struct vringh *vrh, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * vringh_init_kern - initialize a vringh for a kernelspace vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * @vrh: the vringh to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * @features: the feature bits for this ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * @num: the number of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @weak_barriers: true if we only need memory barriers, not I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @desc: the userpace descriptor pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @avail: the userpace avail pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * @used: the userpace used pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Returns an error if num is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int vringh_init_kern(struct vringh *vrh, u64 features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) unsigned int num, bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct vring_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct vring_avail *avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct vring_used *used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* Sane power of 2 please! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!num || num > 0xffff || (num & (num - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) vringh_bad("Bad ring size %u", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) vrh->weak_barriers = weak_barriers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) vrh->completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) vrh->last_avail_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) vrh->last_used_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) vrh->vring.num = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) vrh->vring.desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) vrh->vring.avail = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) vrh->vring.used = used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) EXPORT_SYMBOL(vringh_init_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @vrh: the kernelspace vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * @riov: where to put the readable descriptors (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * @wiov: where to put the writable descriptors (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * @head: head index we received, for passing to vringh_complete_kern().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * @gfp: flags for allocating larger riov/wiov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Returns 0 if there was no descriptor, 1 if there was, or -errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * Note that on error return, you can tell the difference between an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * invalid ring and a single invalid descriptor: in the former case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * *head will be vrh->vring.num. You may be able to ignore an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * descriptor, but there's not much you can do with an invalid ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Note that you may need to clean up riov and wiov, even on error!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int vringh_getdesc_kern(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct vringh_kiov *riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct vringh_kiov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u16 *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* Empty... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (err == vrh->vring.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *head = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) gfp, copydesc_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) EXPORT_SYMBOL(vringh_getdesc_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * vringh_iov_pull_kern - copy bytes from vring_iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * @dst: the place to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * @len: the maximum length to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Returns the bytes copied <= len or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) EXPORT_SYMBOL(vringh_iov_pull_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * vringh_iov_push_kern - copy bytes into vring_iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * @src: the place to copy from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * @len: the maximum length to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * Returns the bytes copied <= len or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) const void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) EXPORT_SYMBOL(vringh_iov_push_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * vringh_abandon_kern - we've decided not to handle the descriptor(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * @num: the number of descriptors to put back (ie. num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * vringh_get_kern() to undo).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * The next vringh_get_kern() will return the old descriptor(s) again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* We only update vring_avail_event(vr) when we want to be notified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * so we haven't changed that yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) vrh->last_avail_idx -= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) EXPORT_SYMBOL(vringh_abandon_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * vringh_complete_kern - we've finished with descriptor, publish it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * @head: the head as filled in by vringh_getdesc_kern.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * @len: the length of data we have written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * You should check vringh_need_notify_kern() after one or more calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct vring_used_elem used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) used.id = cpu_to_vringh32(vrh, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) used.len = cpu_to_vringh32(vrh, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) EXPORT_SYMBOL(vringh_complete_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * vringh_notify_enable_kern - we want to know if something changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * This always enables notifications, but returns false if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * now more buffers available in the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bool vringh_notify_enable_kern(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) EXPORT_SYMBOL(vringh_notify_enable_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * vringh_notify_disable_kern - don't tell us if something changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * This is our normal running state: we disable and then only enable when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * we're going to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) void vringh_notify_disable_kern(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) __vringh_notify_disable(vrh, putu16_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) EXPORT_SYMBOL(vringh_notify_disable_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * vringh_need_notify_kern - must we tell the other side about used buffers?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * @vrh: the vring we've called vringh_complete_kern() on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) int vringh_need_notify_kern(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return __vringh_need_notify(vrh, getu16_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) EXPORT_SYMBOL(vringh_need_notify_kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static int iotlb_translate(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) u64 addr, u64 len, struct bio_vec iov[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) int iov_size, u32 perm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct vhost_iotlb_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct vhost_iotlb *iotlb = vrh->iotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) u64 s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) while (len > s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) u64 size, pa, pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (unlikely(ret >= iov_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ret = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) map = vhost_iotlb_itree_first(iotlb, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) addr + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!map || map->start > addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) } else if (!(map->perm & perm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) size = map->size - addr + map->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) pa = map->addr + addr - map->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) pfn = pa >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) iov[ret].bv_page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) iov[ret].bv_len = min(len - s, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) s += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) addr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ++ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct bio_vec iov[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) len, iov, 16, VHOST_MAP_RO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) iov_iter_bvec(&iter, READ, iov, ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) ret = copy_from_iter(dst, len, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct bio_vec iov[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) len, iov, 16, VHOST_MAP_WO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) iov_iter_bvec(&iter, WRITE, iov, ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return copy_to_iter(src, len, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static inline int getu16_iotlb(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) u16 *val, const __virtio16 *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct bio_vec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) void *kaddr, *from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Atomic read is needed for getu16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) &iov, 1, VHOST_MAP_RO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) kaddr = kmap_atomic(iov.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) from = kaddr + iov.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static inline int putu16_iotlb(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) __virtio16 *p, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct bio_vec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) void *kaddr, *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* Atomic write is needed for putu16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) &iov, 1, VHOST_MAP_WO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) kaddr = kmap_atomic(iov.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) to = kaddr + iov.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static inline int copydesc_iotlb(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) void *dst, const void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ret = copy_from_iotlb(vrh, dst, (void *)src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (ret != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) void *dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ret = copy_from_iotlb(vrh, dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (ret != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static inline int xfer_to_iotlb(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) void *dst, void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ret = copy_to_iotlb(vrh, dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (ret != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static inline int putused_iotlb(const struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct vring_used_elem *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) const struct vring_used_elem *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int size = num * sizeof(*dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (ret != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * @vrh: the vringh to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * @features: the feature bits for this ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * @num: the number of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * @weak_barriers: true if we only need memory barriers, not I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * @desc: the userpace descriptor pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @avail: the userpace avail pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * @used: the userpace used pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Returns an error if num is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int vringh_init_iotlb(struct vringh *vrh, u64 features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned int num, bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct vring_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct vring_avail *avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct vring_used *used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return vringh_init_kern(vrh, features, num, weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) desc, avail, used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) EXPORT_SYMBOL(vringh_init_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * @vrh: the vring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * @iotlb: iotlb associated with this vring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) vrh->iotlb = iotlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) EXPORT_SYMBOL(vringh_set_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * vringh_getdesc_iotlb - get next available descriptor from ring with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * IOTLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * @vrh: the kernelspace vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * @riov: where to put the readable descriptors (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * @wiov: where to put the writable descriptors (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * @head: head index we received, for passing to vringh_complete_iotlb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * @gfp: flags for allocating larger riov/wiov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * Returns 0 if there was no descriptor, 1 if there was, or -errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * Note that on error return, you can tell the difference between an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * invalid ring and a single invalid descriptor: in the former case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * *head will be vrh->vring.num. You may be able to ignore an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * descriptor, but there's not much you can do with an invalid ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Note that you may need to clean up riov and wiov, even on error!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) int vringh_getdesc_iotlb(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct vringh_kiov *riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct vringh_kiov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) u16 *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* Empty... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (err == vrh->vring.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) *head = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) gfp, copydesc_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) EXPORT_SYMBOL(vringh_getdesc_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * vringh_iov_pull_iotlb - copy bytes from vring_iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * @dst: the place to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * @len: the maximum length to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * Returns the bytes copied <= len or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct vringh_kiov *riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) void *dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) EXPORT_SYMBOL(vringh_iov_pull_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * vringh_iov_push_iotlb - copy bytes into vring_iov.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * @src: the place to copy from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * @len: the maximum length to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Returns the bytes copied <= len or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct vringh_kiov *wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) const void *src, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) EXPORT_SYMBOL(vringh_iov_push_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * @num: the number of descriptors to put back (ie. num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * vringh_get_iotlb() to undo).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * The next vringh_get_iotlb() will return the old descriptor(s) again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* We only update vring_avail_event(vr) when we want to be notified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * so we haven't changed that yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) vrh->last_avail_idx -= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) EXPORT_SYMBOL(vringh_abandon_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * vringh_complete_iotlb - we've finished with descriptor, publish it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * @head: the head as filled in by vringh_getdesc_iotlb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * @len: the length of data we have written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * You should check vringh_need_notify_iotlb() after one or more calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct vring_used_elem used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) used.id = cpu_to_vringh32(vrh, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) used.len = cpu_to_vringh32(vrh, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) EXPORT_SYMBOL(vringh_complete_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * vringh_notify_enable_iotlb - we want to know if something changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * This always enables notifications, but returns false if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * now more buffers available in the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) bool vringh_notify_enable_iotlb(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) EXPORT_SYMBOL(vringh_notify_enable_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * vringh_notify_disable_iotlb - don't tell us if something changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * @vrh: the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * This is our normal running state: we disable and then only enable when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * we're going to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) void vringh_notify_disable_iotlb(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) __vringh_notify_disable(vrh, putu16_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) EXPORT_SYMBOL(vringh_notify_disable_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * vringh_need_notify_iotlb - must we tell the other side about used buffers?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * @vrh: the vring we've called vringh_complete_iotlb() on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int vringh_need_notify_iotlb(struct vringh *vrh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return __vringh_need_notify(vrh, getu16_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) EXPORT_SYMBOL(vringh_need_notify_iotlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) MODULE_LICENSE("GPL");