^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2006-2016 Oracle Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/vbox_err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/vbox_utils.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "vboxguest_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* Get the pointer to the first parameter of a HGCM call request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define VMMDEV_HGCM_CALL_PARMS(a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ((struct vmmdev_hgcm_function_parameter *)( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* The max parameter buffer size for a user request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* The max parameter buffer size for a kernel request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define VBG_DEBUG_PORT 0x504
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static DEFINE_SPINLOCK(vbg_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static char vbg_log_buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define VBG_LOG(name, pr_func) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void name(const char *fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) va_list args; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int i, count; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) va_start(args, fmt); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) spin_lock_irqsave(&vbg_log_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) for (i = 0; i < count; i++) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pr_func("%s", vbg_log_buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) spin_unlock_irqrestore(&vbg_log_lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) va_end(args); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) EXPORT_SYMBOL(name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) VBG_LOG(vbg_info, pr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) VBG_LOG(vbg_warn, pr_warn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) VBG_LOG(vbg_err, pr_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) VBG_LOG(vbg_debug, pr_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 requestor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct vmmdev_request_header *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int order = get_order(PAGE_ALIGN(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) memset(req, 0xaa, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) req->size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) req->version = VMMDEV_REQUEST_HEADER_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) req->request_type = req_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) req->rc = VERR_GENERAL_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) req->reserved1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) req->requestor = requestor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void vbg_req_free(void *req, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Note this function returns a VBox status code, not a negative errno!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int vbg_req_perform(struct vbg_dev *gdev, void *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long phys_req = virt_to_phys(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * The host changes the request as a result of the outl, make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * the outl and any reads of the req happen in the correct order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ((struct vmmdev_request_header *)req)->rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static bool hgcm_req_done(struct vbg_dev *gdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct vmmdev_hgcmreq_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spin_lock_irqsave(&gdev->event_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) done = header->flags & VMMDEV_HGCM_REQ_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spin_unlock_irqrestore(&gdev->event_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct vmmdev_hgcm_service_location *loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 *client_id, int *vbox_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct vmmdev_hgcm_connect *hgcm_connect = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) VMMDEVREQ_HGCM_CONNECT, requestor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!hgcm_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) hgcm_connect->header.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) hgcm_connect->client_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) rc = vbg_req_perform(gdev, hgcm_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (rc == VINF_HGCM_ASYNC_EXECUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) wait_event(gdev->hgcm_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) hgcm_req_done(gdev, &hgcm_connect->header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (rc >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *client_id = hgcm_connect->client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) rc = hgcm_connect->header.result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *vbox_status = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) EXPORT_SYMBOL(vbg_hgcm_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u32 client_id, int *vbox_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) VMMDEVREQ_HGCM_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) requestor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!hgcm_disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) hgcm_disconnect->header.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) hgcm_disconnect->client_id = client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) rc = vbg_req_perform(gdev, hgcm_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (rc == VINF_HGCM_ASYNC_EXECUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) wait_event(gdev->hgcm_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) hgcm_req_done(gdev, &hgcm_disconnect->header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rc = hgcm_disconnect->header.result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *vbox_status = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) EXPORT_SYMBOL(vbg_hgcm_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) page_count = hgcm_call_buf_size_in_pages(buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int hgcm_call_preprocess_linaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) const struct vmmdev_hgcm_function_parameter *src_parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void **bounce_buf_ret, size_t *extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void *buf, *bounce_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bool copy_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) buf = (void *)src_parm->u.pointer.u.linear_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) len = src_parm->u.pointer.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (len > VBG_MAX_HGCM_USER_PARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) bounce_buf = kvmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!bounce_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *bounce_buf_ret = bounce_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (copy_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = copy_from_user(bounce_buf, (void __user *)buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) memset(bounce_buf, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) hgcm_call_add_pagelist_size(bounce_buf, len, extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * figure out how much extra storage we need for page lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Return: 0 or negative errno value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @src_parm: Pointer to source function call parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @parm_count: Number of function call parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @extra: Where to return the extra request space needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * physical page lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int hgcm_call_preprocess(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) const struct vmmdev_hgcm_function_parameter *src_parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void *buf, **bounce_bufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) for (i = 0; i < parm_count; i++, src_parm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) switch (src_parm->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case VMMDEV_HGCM_PARM_TYPE_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) case VMMDEV_HGCM_PARM_TYPE_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) case VMMDEV_HGCM_PARM_TYPE_LINADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!bounce_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bounce_bufs = kcalloc(parm_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!bounce_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *bounce_bufs_ret = bounce_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = hgcm_call_preprocess_linaddr(src_parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) &bounce_bufs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) buf = (void *)src_parm->u.pointer.u.linear_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) len = src_parm->u.pointer.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) hgcm_call_add_pagelist_size(buf, len, extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Translates linear address types to page list direction flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Return: page list flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @type: The type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) enum vmmdev_hgcm_function_parameter_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case VMMDEV_HGCM_PARM_TYPE_LINADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct vmmdev_hgcm_pagelist *dst_pg_lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bool is_vmalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 i, page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dst_parm->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dst_parm->u.pointer.size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dst_parm->u.pointer.u.linear_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dst_pg_lst = (void *)call + *off_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) page_count = hgcm_call_buf_size_in_pages(buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) is_vmalloc = is_vmalloc_addr(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dst_parm->u.page_list.size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dst_parm->u.page_list.offset = *off_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dst_pg_lst->page_count = page_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (i = 0; i < page_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (is_vmalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) page = vmalloc_to_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) page = virt_to_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dst_pg_lst->pages[i] = page_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) buf += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Initializes the call request that we're sending to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @call: The call to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @client_id: The client ID of the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * @function: The function number of the function to call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @src_parm: Pointer to source function call parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @parm_count: Number of function call parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @bounce_bufs: The bouncebuffer array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void hgcm_call_init_call(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) const struct vmmdev_hgcm_function_parameter *src_parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 parm_count, void **bounce_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct vmmdev_hgcm_function_parameter *dst_parm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) VMMDEV_HGCM_CALL_PARMS(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) call->header.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) call->header.result = VINF_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) call->client_id = client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) call->function = function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) call->parm_count = parm_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) switch (src_parm->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) case VMMDEV_HGCM_PARM_TYPE_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) case VMMDEV_HGCM_PARM_TYPE_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *dst_parm = *src_parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case VMMDEV_HGCM_PARM_TYPE_LINADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) src_parm->u.pointer.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) src_parm->type, &off_extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) buf = (void *)src_parm->u.pointer.u.linear_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) hgcm_call_init_linaddr(call, dst_parm, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) src_parm->u.pointer.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) src_parm->type, &off_extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Tries to cancel a pending HGCM call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Return: VBox status code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * We use a pre-allocated request for cancellations, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * protected by cancel_req_mutex. This means that all cancellations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * get serialized, this should be fine since they should be rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mutex_lock(&gdev->cancel_req_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rc = vbg_req_perform(gdev, gdev->cancel_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mutex_unlock(&gdev->cancel_req_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (rc == VERR_NOT_IMPLEMENTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rc = vbg_req_perform(gdev, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (rc == VERR_INVALID_PARAMETER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rc = VERR_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Performs the call and completion wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Return: 0 or negative errno value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * @gdev: The VBoxGuest device extension.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * @call: The call to execute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * @timeout_ms: Timeout in ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * @leak_it: Where to return the leak it / free it, indicator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Cancellation fun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u32 timeout_ms, bool interruptible, bool *leak_it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int rc, cancel_rc, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *leak_it = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) rc = vbg_req_perform(gdev, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * If the call failed, then pretend success. Upper layers will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * interpret the result code in the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) call->header.result = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (rc != VINF_HGCM_ASYNC_EXECUTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Host decided to process the request asynchronously, wait for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (timeout_ms == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) timeout = MAX_SCHEDULE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) timeout = msecs_to_jiffies(timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (interruptible) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) hgcm_req_done(gdev, &call->header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) timeout = wait_event_timeout(gdev->hgcm_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) hgcm_req_done(gdev, &call->header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* timeout > 0 means hgcm_req_done has returned true, so success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (timeout > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (timeout == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Cancel the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cancel_rc = hgcm_cancel_call(gdev, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (cancel_rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Failed to cancel, this should mean that the cancel has lost the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * race with normal completion, wait while the host completes it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) timeout = msecs_to_jiffies(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) timeout = msecs_to_jiffies(2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) timeout = wait_event_timeout(gdev->hgcm_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) hgcm_req_done(gdev, &call->header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (WARN_ON(timeout == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* We really should never get here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *leak_it = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* The call has completed normally after all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Copies the result of the call back to the caller info structure and user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Return: 0 or negative errno value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @call: HGCM call request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @dst_parm: Pointer to function call parameters destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * @parm_count: Number of function call parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @bounce_bufs: The bouncebuffer array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int hgcm_call_copy_back_result(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) const struct vmmdev_hgcm_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct vmmdev_hgcm_function_parameter *dst_parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 parm_count, void **bounce_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) const struct vmmdev_hgcm_function_parameter *src_parm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) VMMDEV_HGCM_CALL_PARMS(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void __user *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Copy back parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) switch (dst_parm->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) case VMMDEV_HGCM_PARM_TYPE_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) case VMMDEV_HGCM_PARM_TYPE_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *dst_parm = *src_parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dst_parm->u.page_list.size = src_parm->u.page_list.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dst_parm->u.pointer.size = src_parm->u.pointer.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) case VMMDEV_HGCM_PARM_TYPE_LINADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dst_parm->u.pointer.size = src_parm->u.pointer.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) p = (void __user *)dst_parm->u.pointer.u.linear_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = copy_to_user(p, bounce_bufs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) min(src_parm->u.pointer.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dst_parm->u.pointer.size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u32 function, u32 timeout_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int *vbox_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct vmmdev_hgcm_call *call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) void **bounce_bufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) bool leak_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) size = sizeof(struct vmmdev_hgcm_call) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Validate and buffer the parameters for the call. This also increases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * call_size with the amount of extra space needed for page lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* Even on error bounce bufs may still have been allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto free_bounce_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) goto free_bounce_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) hgcm_call_init_call(call, client_id, function, parms, parm_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bounce_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *vbox_status = call->header.result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = hgcm_call_copy_back_result(call, parms, parm_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) bounce_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!leak_it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) vbg_req_free(call, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) free_bounce_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (bounce_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) for (i = 0; i < parm_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) kvfree(bounce_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) kfree(bounce_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) EXPORT_SYMBOL(vbg_hgcm_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int vbg_hgcm_call32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32 parm_count, int *vbox_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct vmmdev_hgcm_function_parameter *parm64 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u32 i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* KISS allocate a temporary request and convert the parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) parm64 = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!parm64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) for (i = 0; i < parm_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) switch (parm32[i].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) case VMMDEV_HGCM_PARM_TYPE_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) parm64[i].u.value32 = parm32[i].u.value32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) case VMMDEV_HGCM_PARM_TYPE_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) parm64[i].u.value64 = parm32[i].u.value64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) case VMMDEV_HGCM_PARM_TYPE_LINADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) parm64[i].type = parm32[i].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) parm64[i].u.pointer.size = parm32[i].u.pointer.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) parm64[i].u.pointer.u.linear_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) parm32[i].u.pointer.u.linear_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) parm64, parm_count, vbox_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Copy back. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) for (i = 0; i < parm_count; i++, parm32++, parm64++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) switch (parm64[i].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case VMMDEV_HGCM_PARM_TYPE_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) parm32[i].u.value32 = parm64[i].u.value32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) case VMMDEV_HGCM_PARM_TYPE_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) parm32[i].u.value64 = parm64[i].u.value64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) case VMMDEV_HGCM_PARM_TYPE_LINADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) parm32[i].u.pointer.size = parm64[i].u.pointer.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) kfree(parm64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static const int vbg_status_code_to_errno_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) [-VERR_ACCESS_DENIED] = -EPERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) [-VERR_FILE_NOT_FOUND] = -ENOENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) [-VERR_INTERRUPTED] = -EINTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) [-VERR_DEV_IO_ERROR] = -EIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) [-VERR_TOO_MUCH_DATA] = -E2BIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) [-VERR_INVALID_HANDLE] = -EBADF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) [-VERR_TRY_AGAIN] = -EAGAIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) [-VERR_NO_MEMORY] = -ENOMEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) [-VERR_INVALID_POINTER] = -EFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) [-VERR_RESOURCE_BUSY] = -EBUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) [-VERR_ALREADY_EXISTS] = -EEXIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) [-VERR_NOT_SAME_DEVICE] = -EXDEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) [-VERR_INVALID_NAME] = -ENOENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) [-VERR_IS_A_DIRECTORY] = -EISDIR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) [-VERR_INVALID_PARAMETER] = -EINVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) [-VERR_INVALID_FUNCTION] = -ENOTTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) [-VERR_SHARING_VIOLATION] = -ETXTBSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) [-VERR_FILE_TOO_BIG] = -EFBIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) [-VERR_DISK_FULL] = -ENOSPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) [-VERR_WRITE_PROTECT] = -EROFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) [-VERR_BROKEN_PIPE] = -EPIPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) [-VERR_DEADLOCK] = -EDEADLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) [-VERR_NOT_SUPPORTED] = -ENOSYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) [-VERR_NO_MORE_FILES] = -ENODATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) [-VERR_NO_DATA] = -ENODATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) [-VERR_NET_NO_NETWORK] = -ENONET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) [-VERR_NO_TRANSLATION] = -EILSEQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) [-VERR_NET_DOWN] = -ENETDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) [-VERR_TIMEOUT] = -ETIMEDOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int vbg_status_code_to_errno(int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) rc = -rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) vbg_status_code_to_errno_table[rc] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return vbg_status_code_to_errno_table[rc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) EXPORT_SYMBOL(vbg_status_code_to_errno);