^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * uio_hv_generic - generic UIO driver for VMBus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2016, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Since the driver does not declare any device ids, you must allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * id and bind the device to the driver yourself. For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Associate Network GUID with UIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Then rebind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * > /sys/bus/vmbus/drivers/hv_netvsc/unbind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/uio_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "../hv/hyperv_vmbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define DRIVER_VERSION "0.02.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define DRIVER_DESC "Generic UIO driver for VMBus devices"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define HV_RING_SIZE 512 /* pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SEND_BUFFER_SIZE (16 * 1024 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define RECV_BUFFER_SIZE (31 * 1024 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * List of resources to be mapped to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * can be extended up to MAX_UIO_MAPS(5) items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum hv_uio_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) TXRX_RING_MAP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) INT_PAGE_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) MON_PAGE_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) RECV_BUF_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) SEND_BUF_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct hv_uio_private_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct uio_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct hv_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) atomic_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void *recv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u32 recv_gpadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) char recv_name[32]; /* "recv_4294967295" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void *send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 send_gpadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) char send_name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * This is the irqcontrol callback to be registered to uio_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * It can be used to disable/enable interrupt from user space processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @param info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * pointer to uio_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @param irq_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * state value. 1 to enable interrupt, 0 to disable interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct hv_uio_private_data *pdata = info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct hv_device *dev = pdata->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) virt_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Callback from vmbus_event when something is in inbound ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void hv_uio_channel_cb(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct vmbus_channel *chan = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct hv_device *hv_dev = chan->device_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) chan->inbound.ring_buffer->interrupt_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) virt_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) uio_event_notify(&pdata->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Callback from vmbus_event when channel is rescinded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void hv_uio_rescind(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct hv_device *hv_dev = channel->primary_channel->device_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Turn off the interrupt file handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Next read for event will return -EIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pdata->info.irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Wake up reader */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) uio_event_notify(&pdata->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Sysfs API to allow mmap of the ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * The ring buffer is allocated as contiguous memory by vmbus_open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct bin_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct vmbus_channel *channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) = container_of(kobj, struct vmbus_channel, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void *ring_buffer = page_address(channel->ringbuffer_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (channel->state != CHANNEL_OPENED_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) channel->ringbuffer_pagecount << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static const struct bin_attribute ring_buffer_bin_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .name = "ring",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .mode = 0600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .size = 2 * HV_RING_SIZE * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .mmap = hv_uio_ring_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Callback from VMBUS subsystem when new channel created. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) hv_uio_new_channel(struct vmbus_channel *new_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct device *device = &hv_dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Create host communication ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) hv_uio_channel_cb, new_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Disable interrupts on sub channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) new_sc->inbound.ring_buffer->interrupt_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) set_channel_read_mode(new_sc, HV_CALL_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) vmbus_close(new_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* free the reserved buffers for send and receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (pdata->send_gpadl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pdata->send_gpadl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) vfree(pdata->send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (pdata->recv_gpadl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pdata->recv_gpadl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) vfree(pdata->recv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* VMBus primary channel is opened on first use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) hv_uio_open(struct uio_info *info, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct hv_uio_private_data *pdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) = container_of(info, struct hv_uio_private_data, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct hv_device *dev = pdata->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (atomic_inc_return(&pdata->refcnt) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = vmbus_connect_ring(dev->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) hv_uio_channel_cb, dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dev->channel->inbound.ring_buffer->interrupt_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) atomic_dec(&pdata->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* VMBus primary channel is closed on last close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) hv_uio_release(struct uio_info *info, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct hv_uio_private_data *pdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) = container_of(info, struct hv_uio_private_data, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct hv_device *dev = pdata->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (atomic_dec_and_test(&pdata->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ret = vmbus_disconnect_ring(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) hv_uio_probe(struct hv_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) const struct hv_vmbus_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct vmbus_channel *channel = dev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct hv_uio_private_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void *ring_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Communicating with host has to be via shared memory not hypercall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!channel->offermsg.monitor_allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dev_err(&dev->device, "vmbus channel requires hypercall\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) HV_RING_SIZE * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) set_channel_read_mode(channel, HV_CALL_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Fill general uio info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pdata->info.name = "uio_hv_generic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pdata->info.version = DRIVER_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pdata->info.irqcontrol = hv_uio_irqcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pdata->info.open = hv_uio_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pdata->info.release = hv_uio_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pdata->info.irq = UIO_IRQ_CUSTOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) atomic_set(&pdata->refcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* mem resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ring_buffer = page_address(channel->ringbuffer_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pdata->info.mem[TXRX_RING_MAP].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) = (uintptr_t)virt_to_phys(ring_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pdata->info.mem[TXRX_RING_MAP].size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) = channel->ringbuffer_pagecount << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) pdata->info.mem[INT_PAGE_MAP].name = "int_page";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pdata->info.mem[INT_PAGE_MAP].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) = (uintptr_t)vmbus_connection.int_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pdata->info.mem[MON_PAGE_MAP].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) = (uintptr_t)vmbus_connection.monitor_pages[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (pdata->recv_buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto fail_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) RECV_BUFFER_SIZE, &pdata->recv_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) vfree(pdata->recv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto fail_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* put Global Physical Address Label in name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) snprintf(pdata->recv_name, sizeof(pdata->recv_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) "recv:%u", pdata->recv_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pdata->info.mem[RECV_BUF_MAP].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) = (uintptr_t)pdata->recv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (pdata->send_buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto fail_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ret = vmbus_establish_gpadl(channel, pdata->send_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) SEND_BUFFER_SIZE, &pdata->send_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) vfree(pdata->send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto fail_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) snprintf(pdata->send_name, sizeof(pdata->send_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) "send:%u", pdata->send_gpadl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) pdata->info.mem[SEND_BUF_MAP].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) = (uintptr_t)pdata->send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pdata->info.priv = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pdata->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ret = uio_register_device(&dev->device, &pdata->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dev_err(&dev->device, "hv_uio register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto fail_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_notice(&dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) "sysfs create ring bin file failed; %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) hv_set_drvdata(dev, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) fail_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) hv_uio_cleanup(dev, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) kfree(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) hv_uio_remove(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) uio_unregister_device(&pdata->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) hv_uio_cleanup(dev, pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) hv_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) vmbus_free_ring(dev->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) kfree(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static struct hv_driver hv_uio_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .name = "uio_hv_generic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) .id_table = NULL, /* only dynamic id's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) .probe = hv_uio_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) .remove = hv_uio_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) hyperv_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return vmbus_driver_register(&hv_uio_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) hyperv_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) vmbus_driver_unregister(&hv_uio_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) module_init(hyperv_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) module_exit(hyperv_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) MODULE_VERSION(DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) MODULE_AUTHOR(DRIVER_AUTHOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) MODULE_DESCRIPTION(DRIVER_DESC);