^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 - Cambridge Greys Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2011 - 2014 Cisco Systems Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <sys/epoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <irq_user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <os.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <um_malloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* Epoll support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int epollfd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define MAX_EPOLL_EVENTS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Helper to return an Epoll data pointer from an epoll event structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * We need to keep this one on the userspace side to keep includes separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void *os_epoll_get_data_pointer(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return epoll_events[index].data.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Helper to compare events versus the events in the epoll structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Same as above - needs to be on the userspace side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int os_epoll_triggered(int index, int events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return epoll_events[index].events & events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Helper to set the event mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The event mask is opaque to the kernel side, because it does not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * access to the right includes/defines for EPOLL constants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int os_event_mask(int irq_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (irq_type == IRQ_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (irq_type == IRQ_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return EPOLLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Initial Epoll Setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int os_setup_epoll(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) epollfd = epoll_create(MAX_EPOLL_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return epollfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Helper to run the actual epoll_wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int os_waiting_for_events_epoll(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int n, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) n = epoll_wait(epollfd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) (struct epoll_event *) &epoll_events, MAX_EPOLL_EVENTS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (n < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (errno != EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) printk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) UM_KERN_ERR "os_waiting_for_events:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) " epoll returned %d, error = %s\n", n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) strerror(errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Helper to add a fd to epoll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int os_add_epoll_fd(int events, int fd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct epoll_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) event.data.ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) event.events = events | EPOLLET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) result = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if ((result) && (errno == EEXIST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) result = os_mod_epoll_fd(events, fd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) printk("epollctl add err fd %d, %s\n", fd, strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Helper to mod the fd event mask and/or data backreference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int os_mod_epoll_fd(int events, int fd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct epoll_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) event.data.ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) event.events = events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) result = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) printk(UM_KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) "epollctl mod err fd %d, %s\n", fd, strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Helper to delete the epoll fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int os_del_epoll_fd(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct epoll_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* This is quiet as we use this as IO ON/OFF - so it is often
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * invoked on a non-existent fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) result = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void os_set_ioignore(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) signal(SIGIO, SIG_IGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void os_close_epoll_fd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Needed so we do not leak an fd when rebooting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) os_close_file(epollfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }