^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Simple test of virtio code, entirely in userpsace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/vringh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <sys/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <sys/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define USER_MEM (1024*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void *__user_addr_min, *__user_addr_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static u64 user_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define RINGSIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define ALIGN 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static bool never_notify_host(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) abort();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void never_callback_guest(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) abort();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) r->offset = user_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* We return single byte ranges. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) r->start = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) r->end_incl = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) r->offset = user_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct guest_virtio_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct virtio_device vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int to_host_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long notifies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static bool parallel_notify_host(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct guest_virtio_device *gvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) rc = write(gvdev->to_host_fd, "", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) gvdev->notifies++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static bool no_notify_host(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define NUM_XFERS (10000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* We aim for two "distant" cpus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void find_cpus(unsigned int *first, unsigned int *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *first = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (i = 0; i < 4096; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cpu_set_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) CPU_ZERO(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) CPU_SET(i, &set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (i < *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *first = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (i > *last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *last = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Opencoded version for fast mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static inline int vringh_get_head(struct vringh *vrh, u16 *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u16 avail_idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) err = get_user(avail_idx, &vrh->vring.avail->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (vrh->last_avail_idx == avail_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Only get avail ring entries after they have been exposed by guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) virtio_rmb(vrh->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) i = vrh->last_avail_idx & (vrh->vring.num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) err = get_user(*head, &vrh->vring.avail->ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) vrh->last_avail_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int parallel_test(u64 features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool (*getrange)(struct vringh *vrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u64 addr, struct vringh_range *r),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bool fast_vringh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void *host_map, *guest_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int fd, mapsize, to_guest[2], to_host[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long xfers = 0, notifies = 0, receives = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned int first_cpu, last_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cpu_set_t cpu_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Create real file to mmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) err(1, "Opening /tmp/vringh_test-file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Extra room at the end for some data, and indirects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mapsize = vring_size(RINGSIZE, ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) + RINGSIZE * 2 * sizeof(int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) + RINGSIZE * 6 * sizeof(struct vring_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ftruncate(fd, mapsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Parent and child use separate addresses, to check our mapping logic! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pipe(to_guest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pipe(to_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) CPU_ZERO(&cpu_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) find_cpus(&first_cpu, &last_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) printf("Using CPUS %u and %u\n", first_cpu, last_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) fflush(stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (fork() != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct vringh vrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int status, err, rlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) char rbuf[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* We are the host: never access guest addresses! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) munmap(guest_map, mapsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __user_addr_min = host_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __user_addr_max = __user_addr_min + mapsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) user_addr_offset = host_map - guest_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) assert(user_addr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) close(to_guest[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) close(to_host[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) vringh_init_user(&vrh, features, RINGSIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) CPU_SET(first_cpu, &cpu_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) errx(1, "Could not set affinity to cpu %u", first_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) while (xfers < NUM_XFERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct iovec host_riov[2], host_wiov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct vringh_iov riov, wiov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u16 head, written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (fast_vringh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) err = vringh_get_head(&vrh, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err = vringh_need_notify_user(&vrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) errx(1, "vringh_need_notify_user: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) write(to_guest[1], "", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) notifies++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) errx(1, "vringh_get_head");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) vringh_iov_init(&riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) host_riov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ARRAY_SIZE(host_riov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) vringh_iov_init(&wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) host_wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ARRAY_SIZE(host_wiov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) err = vringh_getdesc_user(&vrh, &riov, &wiov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) getrange, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) err = vringh_need_notify_user(&vrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) errx(1, "vringh_need_notify_user: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) write(to_guest[1], "", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) notifies++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!vringh_notify_enable_user(&vrh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Swallow all notifies at once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (read(to_host[0], buf, sizeof(buf)) < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) vringh_notify_disable_user(&vrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) receives++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) errx(1, "vringh_getdesc_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* We simply copy bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (riov.used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rlen = vringh_iov_pull_user(&riov, rbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) sizeof(rbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (rlen != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) errx(1, "vringh_iov_pull_user: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) assert(riov.i == riov.used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) err = vringh_iov_push_user(&wiov, rbuf, rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (err != rlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) errx(1, "vringh_iov_push_user: %i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) assert(wiov.i == wiov.used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) written = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) xfers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) err = vringh_complete_user(&vrh, head, written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) errx(1, "vringh_complete_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) err = vringh_need_notify_user(&vrh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) errx(1, "vringh_need_notify_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) write(to_guest[1], "", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) notifies++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) wait(&status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!WIFEXITED(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) errx(1, "Child died with signal %i?", WTERMSIG(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (WEXITSTATUS(status) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) errx(1, "Child exited %i?", WEXITSTATUS(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) printf("Host: notified %lu, pinged %lu\n", notifies, receives);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct guest_virtio_device gvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned int *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct vring_desc *indirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned int finished = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) data = guest_map + vring_size(RINGSIZE, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* We are the guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) munmap(host_map, mapsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) close(to_guest[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) close(to_host[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) gvdev.vdev.features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) INIT_LIST_HEAD(&gvdev.vdev.vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) gvdev.to_host_fd = to_host[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) gvdev.notifies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) CPU_SET(first_cpu, &cpu_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err(1, "Could not set affinity to cpu %u", first_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) false, guest_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) fast_vringh ? no_notify_host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) : parallel_notify_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) never_callback_guest, "guest vq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Don't kfree indirects. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) __kfree_ignore_start = indirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) __kfree_ignore_end = indirects + RINGSIZE * 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) while (xfers < NUM_XFERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct scatterlist sg[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned int num_sg, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int *dbuf, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bool output = !(xfers % 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Consume bufs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (len == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) assert(*dbuf == finished - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) else if (!fast_vringh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) assert(*dbuf == finished);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) finished++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Produce a buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dbuf = data + (xfers % (RINGSIZE + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *dbuf = xfers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *dbuf = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) switch ((xfers / sizeof(*dbuf)) % 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Nasty three-element sg list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sg_init_table(sg, num_sg = 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sg_set_buf(&sg[0], (void *)dbuf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) sg_set_buf(&sg[1], (void *)dbuf + 1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sg_set_buf(&sg[2], (void *)dbuf + 3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) sg_init_table(sg, num_sg = 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) sg_set_buf(&sg[0], (void *)dbuf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sg_set_buf(&sg[1], (void *)dbuf + 1, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) sg_init_table(sg, num_sg = 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) sg_set_buf(&sg[0], (void *)dbuf, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sg_init_table(sg, num_sg = 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) sg_set_buf(&sg[0], (void *)dbuf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sg_set_buf(&sg[1], (void *)dbuf + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) sg_set_buf(&sg[2], (void *)dbuf + 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) sg_set_buf(&sg[3], (void *)dbuf + 3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* May allocate an indirect, so force it to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * user addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (output)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) err = virtqueue_add_inbuf(vq, sg, num_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dbuf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!virtqueue_enable_cb_delayed(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Swallow all notifies at once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (read(to_guest[0], buf, sizeof(buf)) < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) receives++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) errx(1, "virtqueue_add_in/outbuf: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) xfers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) virtqueue_kick(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Any extra? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) while (finished != xfers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int *dbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Consume bufs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dbuf = virtqueue_get_buf(vq, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (dbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (len == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) assert(*dbuf == finished - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) assert(len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) finished++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!virtqueue_enable_cb_delayed(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (read(to_guest[0], buf, sizeof(buf)) < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) receives++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) printf("Guest: notified %lu, pinged %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) gvdev.notifies, receives);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int main(int argc, char *argv[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct virtio_device vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct vringh vrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct scatterlist guest_sg[RINGSIZE], *sgs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct iovec host_riov[2], host_wiov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct vringh_iov riov, wiov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct vring_used_elem used[RINGSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) char buf[28];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u16 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bool fast_vringh = false, parallel = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) getrange = getrange_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) vdev.features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) INIT_LIST_HEAD(&vdev.vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) while (argv[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (strcmp(argv[1], "--indirect") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) __virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) else if (strcmp(argv[1], "--eventidx") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) else if (strcmp(argv[1], "--virtio-1") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __virtio_set_bit(&vdev, VIRTIO_F_VERSION_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) else if (strcmp(argv[1], "--slow-range") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) getrange = getrange_slow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) else if (strcmp(argv[1], "--fast-vringh") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) fast_vringh = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) else if (strcmp(argv[1], "--parallel") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) parallel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) errx(1, "Unknown arg %s", argv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) argv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (parallel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return parallel_test(vdev.features, getrange, fast_vringh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) abort();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) __user_addr_max = __user_addr_min + USER_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Set up guest side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) __user_addr_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) never_notify_host, never_callback_guest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) "guest vq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Set up host side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) vringh_init_user(&vrh, vdev.features, RINGSIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) vrh.vring.desc, vrh.vring.avail, vrh.vring.used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* No descriptor to get yet... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) errx(1, "vringh_getdesc_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Guest puts in a descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) memcpy(__user_addr_max - 1, "a", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) sg_init_table(guest_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sg_init_table(guest_sg+1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) sgs[0] = &guest_sg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) sgs[1] = &guest_sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* May allocate an indirect, so force it to allocate user addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) errx(1, "virtqueue_add_sgs: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __kmalloc_fake = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Host retreives it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) errx(1, "vringh_getdesc_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) assert(riov.used == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) assert(riov.iov[0].iov_base == __user_addr_max - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) assert(riov.iov[0].iov_len == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (getrange != getrange_slow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) assert(wiov.used == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) assert(wiov.iov[0].iov_base == __user_addr_max - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) assert(wiov.iov[0].iov_len == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) assert(wiov.used == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) assert(wiov.iov[0].iov_base == __user_addr_max - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) assert(wiov.iov[0].iov_len == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) assert(wiov.iov[1].iov_base == __user_addr_max - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) assert(wiov.iov[1].iov_len == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) err = vringh_iov_pull_user(&riov, buf, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) errx(1, "vringh_iov_pull_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) assert(buf[0] == 'a');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) assert(riov.i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) assert(vringh_iov_pull_user(&riov, buf, 5) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) memcpy(buf, "bcdef", 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) err = vringh_iov_push_user(&wiov, buf, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (err != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) errx(1, "vringh_iov_push_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) assert(memcmp(__user_addr_max - 3, "bc", 2) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) assert(wiov.i == wiov.used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) assert(vringh_iov_push_user(&wiov, buf, 5) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Host is done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) err = vringh_complete_user(&vrh, head, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) errx(1, "vringh_complete_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Guest should see used token now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) __kfree_ignore_end = __kfree_ignore_start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = virtqueue_get_buf(vq, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret != &err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) errx(1, "virtqueue_get_buf: %p", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) assert(i == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Guest puts in a huge descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sg_init_table(guest_sg, RINGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) for (i = 0; i < RINGSIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sg_set_buf(&guest_sg[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) __user_addr_max - USER_MEM/4, USER_MEM/4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Fill contents with recognisable garbage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) for (i = 0; i < USER_MEM/4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ((char *)__user_addr_max - USER_MEM/4)[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* This will allocate an indirect, so force it to allocate user addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) errx(1, "virtqueue_add_outbuf (large): %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) __kmalloc_fake = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Host picks it up (allocates new iov). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) errx(1, "vringh_getdesc_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) assert(riov.max_num & VRINGH_IOV_ALLOCATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) assert(riov.iov != host_riov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (getrange != getrange_slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) assert(riov.used == RINGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) assert(riov.used == RINGSIZE * USER_MEM/4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) assert(wiov.used == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Pull data back out (in odd chunks), should be as expected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) err = vringh_iov_pull_user(&riov, buf, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) errx(1, "vringh_iov_pull_user large: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) assert(buf[0] == (char)i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) assert(err < 2 || buf[1] == (char)(i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) assert(err < 3 || buf[2] == (char)(i + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) assert(riov.i == riov.used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) vringh_iov_cleanup(&riov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) vringh_iov_cleanup(&wiov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Complete using multi interface, just because we can. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) used[0].id = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) used[0].len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) err = vringh_complete_multi_user(&vrh, used, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) errx(1, "vringh_complete_multi_user(1): %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Free up those descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ret = virtqueue_get_buf(vq, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (ret != &err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) errx(1, "virtqueue_get_buf: %p", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Add lots of descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) sg_init_table(guest_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (i = 0; i < RINGSIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) errx(1, "virtqueue_add_outbuf (multiple): %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Now get many, and consume them all at once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) for (i = 0; i < RINGSIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) errx(1, "vringh_getdesc_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) used[i].id = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) used[i].len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* Make sure it wraps around ring, to test! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) assert(vrh.vring.used->idx % RINGSIZE != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) errx(1, "vringh_complete_multi_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Free those buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) for (i = 0; i < RINGSIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) assert(virtqueue_get_buf(vq, &len) != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Test weird (but legal!) indirect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) char *data = __user_addr_max - USER_MEM/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct vring_desc *d = __user_addr_max - USER_MEM/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct vring vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Force creation of direct, which we modify. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) false, __user_addr_min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) never_notify_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) never_callback_guest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) "guest vq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) sg_init_table(guest_sg, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) sg_set_buf(&guest_sg[0], d, sizeof(*d)*2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) sg_set_buf(&guest_sg[2], data + 6, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) errx(1, "virtqueue_add_outbuf (indirect): %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* They're used in order, but double-check... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) assert(vring.desc[0].addr == (unsigned long)d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) assert(vring.desc[1].addr == (unsigned long)(d+2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) assert(vring.desc[2].addr == (unsigned long)data + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) assert(vring.desc[3].addr == (unsigned long)(d+3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) vring.desc[0].flags |= VRING_DESC_F_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) vring.desc[1].flags |= VRING_DESC_F_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) vring.desc[3].flags |= VRING_DESC_F_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* First indirect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) d[0].addr = (unsigned long)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) d[0].len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) d[0].flags = VRING_DESC_F_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) d[0].next = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) d[1].addr = (unsigned long)data + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) d[1].len = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) d[1].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Second indirect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) d[2].addr = (unsigned long)data + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) d[2].len = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) d[2].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Third indirect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) d[3].addr = (unsigned long)data + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) d[3].len = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) d[3].flags = VRING_DESC_F_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) d[3].next = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) d[4].addr = (unsigned long)data + 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) d[4].len = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) d[4].flags = VRING_DESC_F_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) d[4].next = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) d[5].addr = (unsigned long)data + 21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) d[5].len = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) d[5].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Host picks it up (allocates new iov). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (err != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) errx(1, "vringh_getdesc_user: %i", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (head != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) errx(1, "vringh_getdesc_user: head %i not 0", head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) assert(riov.max_num & VRINGH_IOV_ALLOCATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (getrange != getrange_slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) assert(riov.used == 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) assert(riov.used == 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) err = vringh_iov_pull_user(&riov, buf, 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) assert(err == 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Data should be linear. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) for (i = 0; i < err; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) assert(buf[i] == i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) vringh_iov_cleanup(&riov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Don't leak memory... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) free(__user_addr_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }