^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <sys/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "liburing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static int io_uring_mmap(int fd, struct io_uring_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct io_uring_sq *sq, struct io_uring_cq *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (ptr == MAP_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) sq->khead = ptr + p->sq_off.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) sq->ktail = ptr + p->sq_off.tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) sq->kring_mask = ptr + p->sq_off.ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) sq->kring_entries = ptr + p->sq_off.ring_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) sq->kflags = ptr + p->sq_off.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) sq->kdropped = ptr + p->sq_off.dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) sq->array = ptr + p->sq_off.array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) size = p->sq_entries * sizeof(struct io_uring_sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) MAP_SHARED | MAP_POPULATE, fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) IORING_OFF_SQES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (sq->sqes == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ret = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) munmap(sq->khead, sq->ring_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (ptr == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ret = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) cq->khead = ptr + p->cq_off.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) cq->ktail = ptr + p->cq_off.tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) cq->kring_mask = ptr + p->cq_off.ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cq->kring_entries = ptr + p->cq_off.ring_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cq->koverflow = ptr + p->cq_off.overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cq->cqes = ptr + p->cq_off.cqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * For users that want to specify sq_thread_cpu or sq_thread_idle, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * interface is a convenient helper for mmap()ing the rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Returns -1 on error, or zero on success. On success, 'ring'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * contains the necessary information to read/write to the rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) memset(ring, 0, sizeof(*ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ring->ring_fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Returns -1 on error, or zero on success. On success, 'ring'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * contains the necessary information to read/write to the rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct io_uring_params p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int fd, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) memset(&p, 0, sizeof(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) p.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) fd = io_uring_setup(entries, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ret = io_uring_queue_mmap(fd, &p, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void io_uring_queue_exit(struct io_uring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct io_uring_sq *sq = &ring->sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct io_uring_cq *cq = &ring->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) munmap(sq->khead, sq->ring_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) munmap(cq->khead, cq->ring_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) close(ring->ring_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }