^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IBM Power Systems Virtual Management Channel Support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2004, 2018 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Dave Engebretsen engebret@us.ibm.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Steven Royer seroyer@linux.vnet.ibm.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Adam Reznechek adreznec@linux.vnet.ibm.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Bryant G. Ly <bryantly@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "ibmvmc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define IBMVMC_DRIVER_VERSION "1.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Static global variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static const char ibmvmc_driver_name[] = "ibmvmc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct ibmvmc_struct ibmvmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static struct ibmvmc_hmc hmcs[MAX_HMCS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static struct crq_server_adapter ibmvmc_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static int ibmvmc_max_hmcs = DEFAULT_HMCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int ibmvmc_max_mtu = DEFAULT_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 dliobn, u64 dlioba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Ensure all writes to source memory are visible before hcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) length, sliobn, slioba, dliobn, dlioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dliobn, dlioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline void h_free_crq(uint32_t unit_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (H_IS_LONG_BUSY(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) msleep(get_longbusy_msecs(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * h_request_vmc: - request a hypervisor virtual management channel device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @vmc_index: drc index of the vmc device created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Requests the hypervisor create a new virtual management channel device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * allowing this partition to send hypervisor virtualization control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline long h_request_vmc(u32 *vmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (H_IS_LONG_BUSY(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) msleep(get_longbusy_msecs(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Call to request the VMC device from phyp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rc = plpar_hcall(H_REQUEST_VMC, retbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *vmc_index = retbuf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* routines for managing a command/response queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * ibmvmc_handle_event: - Interrupt handler for crq events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @irq: number of irq to handle, not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @dev_instance: crq_server_adapter that received interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Disables interrupts and schedules ibmvmc_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Always returns IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct crq_server_adapter *adapter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) (struct crq_server_adapter *)dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) vio_disable_interrupts(to_vio_dev(adapter->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) tasklet_schedule(&adapter->work_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * ibmvmc_release_crq_queue - Release CRQ Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Non-Zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct vio_dev *vdev = to_vio_dev(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct crq_queue *queue = &adapter->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) free_irq(vdev->irq, (void *)adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) tasklet_kill(&adapter->work_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (adapter->reset_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) kthread_stop(adapter->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) h_free_crq(vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) dma_unmap_single(adapter->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) queue->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) free_page((unsigned long)queue->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * ibmvmc_reset_crq_queue - Reset CRQ Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * This function calls h_free_crq and then calls H_REG_CRQ and does all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * bookkeeping to get us back to where we can communicate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Non-Zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct vio_dev *vdev = to_vio_dev(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct crq_queue *queue = &adapter->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Close the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) h_free_crq(vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Clean out the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) memset(queue->msgs, 0x00, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* And re-open it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) rc = plpar_hcall_norets(H_REG_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) queue->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (rc == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Adapter is good, but other end is not ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_warn(adapter->dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * crq_queue_next_crq: - Returns the next entry in message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @queue: crq_queue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Returns pointer to next entry in queue, or NULL if there are no new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * entried in the CRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct ibmvmc_crq_msg *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_lock_irqsave(&queue->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) crq = &queue->msgs[queue->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (crq->valid & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (++queue->cur == queue->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Ensure the read of the valid bit occurs before reading any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * other bits of the CRQ entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) crq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) spin_unlock_irqrestore(&queue->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * ibmvmc_send_crq - Send CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @word1: Word1 Data field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @word2: Word2 Data field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Non-Zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u64 word1, u64 word2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct vio_dev *vdev = to_vio_dev(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) vdev->unit_address, word1, word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Ensure the command buffer is flushed to memory before handing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * over to the other side to prevent it from fetching any stale data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * alloc_dma_buffer - Create DMA Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @vdev: vio_dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @size: Size field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @dma_handle: DMA address field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Allocates memory for the command queue and maps remote memory into an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * ioba.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Returns a pointer to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dma_addr_t *dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* allocate memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void *buffer = kzalloc(size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *dma_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* DMA map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *dma_handle = dma_map_single(&vdev->dev, buffer, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (dma_mapping_error(&vdev->dev, *dma_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *dma_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) kfree_sensitive(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * free_dma_buffer - Free DMA Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @vdev: vio_dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * @size: Size field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @vaddr: Address field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @dma_handle: DMA address field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Releases memory for a command queue and unmaps mapped remote memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dma_addr_t dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* DMA unmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* deallocate memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) kfree_sensitive(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * @hmc_index: HMC Index Field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * Pointer to ibmvmc_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct ibmvmc_buffer *ret_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (hmc_index > ibmvmc.max_hmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) buffer = hmcs[hmc_index].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (buffer[i].valid && buffer[i].free &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) buffer[i].free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ret_buf = &buffer[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @hmc_index: Hmc Index field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Pointer to ibmvmc_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u8 hmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct ibmvmc_buffer *ret_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (hmc_index > ibmvmc.max_hmc_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) buffer = hmcs[hmc_index].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (buffer[i].free &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) buffer[i].free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret_buf = &buffer[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ret_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * ibmvmc_free_hmc_buffer - Free an HMC Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @hmc: ibmvmc_hmc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @buffer: ibmvmc_buffer struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct ibmvmc_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) buffer->free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * ibmvmc_count_hmc_buffers - Count HMC Buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @hmc_index: HMC Index field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @valid: Valid number of buffers field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @free: Free number of buffers field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned int *free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (hmc_index > ibmvmc.max_hmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!valid || !free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *valid = 0; *free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) buffer = hmcs[hmc_index].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (buffer[i].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *valid = *valid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (buffer[i].free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *free = *free + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * ibmvmc_get_free_hmc - Get Free HMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Pointer to an available HMC Connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Null otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Find an available HMC connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_lock_irqsave(&hmcs[i].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (hmcs[i].state == ibmhmc_state_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) hmcs[i].index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) hmcs[i].state = ibmhmc_state_initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_unlock_irqrestore(&hmcs[i].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return &hmcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_unlock_irqrestore(&hmcs[i].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * ibmvmc_return_hmc - Return an HMC Connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * @hmc: ibmvmc_hmc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * @release_readers: Number of readers connected to session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * This function releases the HMC connections back into the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct crq_server_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct vio_dev *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!hmc || !hmc->adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (release_readers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (hmc->file_session) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct ibmvmc_file_session *session = hmc->file_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) session->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) wake_up_interruptible(&ibmvmc_read_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) adapter = hmc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) vdev = to_vio_dev(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) hmc->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) hmc->state = ibmhmc_state_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) hmc->queue_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) hmc->queue_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) buffer = hmc->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (buffer[i].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) free_dma_buffer(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ibmvmc.max_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) buffer[i].real_addr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) buffer[i].dma_addr_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * ibmvmc_send_open - Interface Open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @buffer: Pointer to ibmvmc_buffer struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @hmc: Pointer to ibmvmc_hmc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * This command is sent by the management partition as the result of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * management partition device request. It causes the hypervisor to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * prepare a set of data buffers for the management application connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * indicated HMC idx. A unique HMC Idx would be used if multiple management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * applications running concurrently were desired. Before responding to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * command, the hypervisor must provide the management partition with at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * least one of these new buffers via the Add Buffer. This indicates whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * the messages are inbound or outbound from the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct ibmvmc_hmc *hmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct ibmvmc_crq_msg crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct crq_server_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) __be64 *crq_as_u64 = (__be64 *)&crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!hmc || !hmc->adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) adapter = hmc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (unsigned long)buffer->size, (unsigned long)adapter->liobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) (unsigned long)buffer->dma_addr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) (unsigned long)adapter->riobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (unsigned long)buffer->dma_addr_remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rc = h_copy_rdma(buffer->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) adapter->liobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) buffer->dma_addr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) adapter->riobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) buffer->dma_addr_remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) hmc->state = ibmhmc_state_opening;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) crq_msg.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) crq_msg.type = VMC_MSG_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) crq_msg.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) crq_msg.var1.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) crq_msg.hmc_session = hmc->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) crq_msg.hmc_index = hmc->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) crq_msg.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) crq_msg.var3.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * ibmvmc_send_close - Interface Close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * @hmc: Pointer to ibmvmc_hmc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * This command is sent by the management partition to terminate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * management application to hypervisor connection. When this command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * sent, the management partition has quiesced all I/O operations to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * buffers associated with this management application connection, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * has freed any storage for these buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct ibmvmc_crq_msg crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct crq_server_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) __be64 *crq_as_u64 = (__be64 *)&crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (!hmc || !hmc->adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) adapter = hmc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dev_info(adapter->dev, "CRQ send: close\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) crq_msg.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) crq_msg.type = VMC_MSG_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) crq_msg.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) crq_msg.var1.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) crq_msg.hmc_session = hmc->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) crq_msg.hmc_index = hmc->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) crq_msg.var2.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) crq_msg.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) crq_msg.var3.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * ibmvmc_send_capabilities - Send VMC Capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * The capabilities message is an administrative message sent after the CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * initialization sequence of messages and is used to exchange VMC capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * between the management partition and the hypervisor. The management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * partition must send this message and the hypervisor must respond with VMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * capabilities Response message before HMC interface message can begin. Any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * HMC interface messages received before the exchange of capabilities has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * complete are dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct ibmvmc_admin_crq_msg crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) __be64 *crq_as_u64 = (__be64 *)&crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) crq_msg.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) crq_msg.type = VMC_MSG_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) crq_msg.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) crq_msg.rsvd[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) crq_msg.rsvd[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) crq_msg.max_hmc = ibmvmc_max_hmcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ibmvmc.state = ibmvmc_state_capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * ibmvmc_send_add_buffer_resp - Add Buffer Response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * @status: Status field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * @hmc_session: HMC Session field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * @hmc_index: HMC Index field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @buffer_id: Buffer Id field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * This command is sent by the management partition to the hypervisor in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * response to the Add Buffer message. The Status field indicates the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) u8 status, u8 hmc_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u8 hmc_index, u16 buffer_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct ibmvmc_crq_msg crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) __be64 *crq_as_u64 = (__be64 *)&crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) crq_msg.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) crq_msg.type = VMC_MSG_ADD_BUF_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) crq_msg.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) crq_msg.var1.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) crq_msg.hmc_session = hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) crq_msg.hmc_index = hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) crq_msg.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) crq_msg.var3.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * @status: Status field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * @hmc_session: HMC Session field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * @hmc_index: HMC Index field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * @buffer_id: Buffer Id field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * This command is sent by the management partition to the hypervisor in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * response to the Remove Buffer message. The Buffer ID field indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * which buffer the management partition selected to remove. The Status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * field indicates the result of the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) u8 status, u8 hmc_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) u8 hmc_index, u16 buffer_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct ibmvmc_crq_msg crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) __be64 *crq_as_u64 = (__be64 *)&crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) crq_msg.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) crq_msg.type = VMC_MSG_REM_BUF_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) crq_msg.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) crq_msg.var1.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) crq_msg.hmc_session = hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) crq_msg.hmc_index = hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) crq_msg.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) crq_msg.var3.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * ibmvmc_send_msg - Signal Message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * @buffer: ibmvmc_buffer struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * @hmc: ibmvmc_hmc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * @msg_len: message length field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * This command is sent between the management partition and the hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * in order to signal the arrival of an HMC protocol message. The command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * can be sent by both the management partition and the hypervisor. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * used for all traffic between the management application and the hypervisor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * regardless of who initiated the communication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * There is no response to this message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct ibmvmc_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct ibmvmc_hmc *hmc, int msg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct ibmvmc_crq_msg crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) __be64 *crq_as_u64 = (__be64 *)&crq_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) rc = h_copy_rdma(msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) adapter->liobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) buffer->dma_addr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) adapter->riobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) buffer->dma_addr_remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) crq_msg.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) crq_msg.type = VMC_MSG_SIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) crq_msg.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) crq_msg.var1.rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) crq_msg.hmc_session = hmc->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) crq_msg.hmc_index = hmc->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) crq_msg.var3.msg_len = cpu_to_be32(msg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) buffer->owner = VMC_BUF_OWNER_HV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * ibmvmc_open - Open Session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * @inode: inode struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * @file: file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static int ibmvmc_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct ibmvmc_file_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) (unsigned long)inode, (unsigned long)file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ibmvmc.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) session = kzalloc(sizeof(*session), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) session->file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) file->private_data = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * ibmvmc_close - Close Session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * @inode: inode struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * @file: file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static int ibmvmc_close(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct ibmvmc_file_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) (unsigned long)file, ibmvmc.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) session = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) hmc = session->hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (hmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!hmc->adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (ibmvmc.state == ibmvmc_state_failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) dev_warn(hmc->adapter->dev, "close: state_failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (hmc->state >= ibmhmc_state_opening) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) rc = ibmvmc_send_close(hmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kfree_sensitive(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * ibmvmc_read - Read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * @file: file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * @buf: Character buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * @nbytes: Size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * @ppos: Offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct ibmvmc_file_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct crq_server_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ssize_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ssize_t retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (unsigned long)file, (unsigned long)buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) (unsigned long)nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (nbytes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (nbytes > ibmvmc.max_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) (unsigned int)nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) session = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (!session) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) pr_warn("ibmvmc: read: no session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) hmc = session->hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (!hmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) pr_warn("ibmvmc: read: no hmc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) adapter = hmc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (!adapter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) pr_warn("ibmvmc: read: no adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (hmc->queue_tail != hmc->queue_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Data is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!session->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) retval = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (file->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) retval = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) hmc->queue_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) hmc->queue_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) nbytes = min_t(size_t, nbytes, buffer->msg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ibmvmc_free_hmc_buffer(hmc, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) retval = nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) dev_warn(adapter->dev, "read: copy to user failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) retval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) finish_wait(&ibmvmc_read_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_dbg(adapter->dev, "read: out %ld\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * ibmvmc_poll - Poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * @file: file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * @wait: Poll Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * poll.h return values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct ibmvmc_file_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) unsigned int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) session = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) hmc = session->hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!hmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) poll_wait(file, &ibmvmc_read_wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (hmc->queue_head != hmc->queue_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) mask |= POLLIN | POLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * ibmvmc_write - Write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * @file: file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * @buffer: Character buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * @count: Count field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * @ppos: Offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static ssize_t ibmvmc_write(struct file *file, const char *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct ibmvmc_buffer *vmc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct ibmvmc_file_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct crq_server_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) const char *p = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) size_t c = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) session = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) hmc = session->hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!hmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (hmc->state == ibmhmc_state_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* HMC connection is not valid (possibly was reset under us). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) adapter = hmc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!adapter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (count > ibmvmc.max_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) (unsigned long)count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* Waiting for the open resp message to the ioctl(1) - retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (hmc->state == ibmhmc_state_opening) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Make sure the ioctl() was called & the open msg sent, and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * the HMC connection has not failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (hmc->state != ibmhmc_state_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!vmc_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* No buffer available for the msg send, or we have not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * completed the open/open_resp sequence. Retry until this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!vmc_buffer->real_addr_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_err(adapter->dev, "no buffer storage assigned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) buf = vmc_buffer->real_addr_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) while (c > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) bytes = min_t(size_t, c, vmc_buffer->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) bytes -= copy_from_user(buf, p, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (!bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) c -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) p += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (p == buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) mark_inode_dirty(file->f_path.dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) (unsigned long)file, (unsigned long)count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ret = p - buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return (ssize_t)(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * ibmvmc_setup_hmc - Setup the HMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @session: ibmvmc_file_session struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned int valid, free, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (ibmvmc.state == ibmvmc_state_failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) pr_warn("ibmvmc: Reserve HMC: state_failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (ibmvmc.state < ibmvmc_state_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /* Device is busy until capabilities have been exchanged and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * have a generic buffer for each possible HMC connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ibmvmc_count_hmc_buffers(index, &valid, &free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (valid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) pr_warn("ibmvmc: buffers not ready for index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Get an hmc object, and transition to ibmhmc_state_initial */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) hmc = ibmvmc_get_free_hmc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!hmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) pr_warn("%s: free hmc not found\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) hmc->session = hmc->session + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (hmc->session == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) hmc->session = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) session->hmc = hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) hmc->adapter = &ibmvmc_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) hmc->file_session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) session->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * @session: ibmvmc_file_session struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * @new_hmc_id: HMC id field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * IOCTL command to setup the hmc id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) unsigned char __user *new_hmc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) char print_buffer[HMC_ID_LEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Reserve HMC session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) hmc = session->hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (!hmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) rc = ibmvmc_setup_hmc(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) hmc = session->hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (!hmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) pr_err("ibmvmc: setup_hmc success but no hmc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (hmc->state != ibmhmc_state_initial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) hmc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* Send Open Session command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!buffer || !buffer->real_addr_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) pr_warn("ibmvmc: sethmcid: no buffer available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* Make sure buffer is NULL terminated before trying to print it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) memset(print_buffer, 0, HMC_ID_LEN + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) rc = ibmvmc_send_open(buffer, hmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * ibmvmc_ioctl_query - IOCTL Query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * @session: ibmvmc_file_session struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * @ret_struct: ibmvmc_query_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct ibmvmc_query_struct __user *ret_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct ibmvmc_query_struct query_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) memset(&query_struct, 0, sizeof(query_struct));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) query_struct.state = ibmvmc.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) bytes = copy_to_user(ret_struct, &query_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) sizeof(query_struct));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * @session: ibmvmc_file_session struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * @ret_vmc_index: VMC Index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) u32 __user *ret_vmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* TODO: (adreznec) Add locking to control multiple process access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) u32 vmc_drc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Call to request the VMC device from phyp*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) rc = h_request_vmc(&vmc_drc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (rc == H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) } else if (rc == H_FUNCTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) } else if (rc == H_AUTHORITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) } else if (rc == H_HARDWARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) } else if (rc == H_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) } else if (rc == H_NOT_AVAILABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else if (rc == H_PARAMETER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) pr_err("ibmvmc: requestvmc: invalid parameter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Success, set the vmc index in global struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ibmvmc.vmc_drc_index = vmc_drc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) sizeof(*ret_vmc_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * ibmvmc_ioctl - IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * @file: file information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * @cmd: cmd field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * @arg: Argument field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static long ibmvmc_ioctl(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct ibmvmc_file_session *session = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) (unsigned long)file, cmd, arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) (unsigned long)session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!session) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) pr_warn("ibmvmc: ioctl: no session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) case VMC_IOCTL_SETHMCID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return ibmvmc_ioctl_sethmcid(session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) (unsigned char __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) case VMC_IOCTL_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return ibmvmc_ioctl_query(session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) (struct ibmvmc_query_struct __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) case VMC_IOCTL_REQUESTVMC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return ibmvmc_ioctl_requestvmc(session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) (unsigned int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static const struct file_operations ibmvmc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .read = ibmvmc_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .write = ibmvmc_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .poll = ibmvmc_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .unlocked_ioctl = ibmvmc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .open = ibmvmc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .release = ibmvmc_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * ibmvmc_add_buffer - Add Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * This message transfers a buffer from hypervisor ownership to management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * partition ownership. The LIOBA is obtained from the virtual TCE table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * associated with the hypervisor side of the VMC device, and points to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * buffer of size MTU (as established in the capabilities exchange).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * Typical flow for ading buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * 1. A new management application connection is opened by the management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * 2. The hypervisor assigns new buffers for the traffic associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * that connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * 3. The hypervisor sends VMC Add Buffer messages to the management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * partition, informing it of the new buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * 4. The hypervisor sends an HMC protocol message (to the management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * application) notifying it of the new buffers. This informs the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * application that it has buffers available for sending HMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct ibmvmc_crq_msg *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) u8 hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) u8 hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) u16 buffer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) hmc_session = crq->hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) hmc_index = crq->hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) buffer_id = be16_to_cpu(crq->var2.buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (hmc_index > ibmvmc.max_hmc_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (buffer_id >= ibmvmc.max_buffer_pool_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) buffer = &hmcs[hmc_index].buffer[buffer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (buffer->real_addr_local || buffer->dma_addr_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) (unsigned long)buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ibmvmc.max_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) &buffer->dma_addr_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (!buffer->real_addr_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) buffer->size = ibmvmc.max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) buffer->owner = crq->var1.owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) buffer->free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* Must ensure valid==1 is observable only after all other fields are */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) buffer->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) buffer->id = buffer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) hmc_index, hmc_session, buffer_id, buffer->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) (u32)buffer->dma_addr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) (u32)buffer->dma_addr_remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * ibmvmc_rem_buffer - Remove Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * This message requests an HMC buffer to be transferred from management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * partition ownership to hypervisor ownership. The management partition may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * not be able to satisfy the request at a particular point in time if all its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * buffers are in use. The management partition requires a depth of at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * one inbound buffer to allow management application commands to flow to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * hypervisor. It is, therefore, an interface error for the hypervisor to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * attempt to remove the management partition's last buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * The hypervisor is expected to manage buffer usage with the management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * application directly and inform the management partition when buffers may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * removed. The typical flow for removing buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * 1. The management application no longer needs a communication path to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * particular hypervisor function. That function is closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * 2. The hypervisor and the management application quiesce all traffic to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * function. The hypervisor requests a reduction in buffer pool size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * 3. The management application acknowledges the reduction in buffer pool size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * 4. The hypervisor sends a Remove Buffer message to the management partition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * informing it of the reduction in buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * 5. The management partition verifies it can remove the buffer. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * possible if buffers have been quiesced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * The hypervisor requested that we pick an unused buffer, and return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * Before sending the buffer back, we free any storage associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct ibmvmc_crq_msg *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) u8 hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) u8 hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) u16 buffer_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (!crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) hmc_session = crq->hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) hmc_index = crq->hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (hmc_index > ibmvmc.max_hmc_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) hmc_session, hmc_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) VMC_INVALID_BUFFER_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) buffer_id = buffer->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (buffer->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) free_dma_buffer(to_vio_dev(adapter->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) ibmvmc.max_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) buffer->real_addr_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) buffer->dma_addr_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) memset(buffer, 0, sizeof(struct ibmvmc_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct ibmvmc_crq_msg *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct ibmvmc_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct ibmvmc_hmc *hmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) unsigned long msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) u8 hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) u8 hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) u16 buffer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (!crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Hypervisor writes CRQs directly into our memory in big endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) be64_to_cpu(*((unsigned long *)crq)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) be64_to_cpu(*(((unsigned long *)crq) + 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) hmc_session = crq->hmc_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) hmc_index = crq->hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) buffer_id = be16_to_cpu(crq->var2.buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) msg_len = be32_to_cpu(crq->var3.msg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (hmc_index > ibmvmc.max_hmc_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (buffer_id >= ibmvmc.max_buffer_pool_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) hmc_session, hmc_index, buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) hmc = &hmcs[hmc_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) spin_lock_irqsave(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (hmc->state == ibmhmc_state_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) hmc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* HMC connection is not valid (possibly was reset under us). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) buffer = &hmc->buffer[buffer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) buffer->valid, buffer->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /* RDMA the data into the partition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) rc = h_copy_rdma(msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) adapter->riobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) buffer->dma_addr_remote,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) adapter->liobn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) buffer->dma_addr_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) (unsigned int)msg_len, (unsigned int)buffer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) buffer->msg_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) buffer->free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) buffer->owner = VMC_BUF_OWNER_ALPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* Must be locked because read operates on the same data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) hmc->queue_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (hmc->queue_head == ibmvmc_max_buf_pool_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) hmc->queue_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (hmc->queue_head == hmc->queue_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) spin_unlock_irqrestore(&hmc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) wake_up_interruptible(&ibmvmc_read_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * ibmvmc_process_capabilities - Process Capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * @crqp: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) struct ibmvmc_crq_msg *crqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if ((be16_to_cpu(crq->version) >> 8) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) (IBMVMC_PROTOCOL_VERSION >> 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) be16_to_cpu(crq->version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) IBMVMC_PROTOCOL_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ibmvmc.state = ibmvmc_state_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) be16_to_cpu(crq->pool_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ibmvmc.state = ibmvmc_state_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) ibmvmc.max_hmc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * ibmvmc_validate_hmc_session - Validate HMC Session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct ibmvmc_crq_msg *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) unsigned char hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) hmc_index = crq->hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (crq->hmc_session == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (hmc_index > ibmvmc.max_hmc_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (hmcs[hmc_index].session != crq->hmc_session) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) hmcs[hmc_index].session, crq->hmc_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * ibmvmc_reset - Reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * @xport_event: export_event field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * Closes all HMC sessions and conditionally schedules a CRQ reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * If false, we need to schedule a CRQ reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (ibmvmc.state != ibmvmc_state_sched_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) dev_info(adapter->dev, "*** Reset to initial state.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) for (i = 0; i < ibmvmc_max_hmcs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) ibmvmc_return_hmc(&hmcs[i], xport_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (xport_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* CRQ was closed by the partner. We don't need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * anything except set ourself to the correct state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * handle init msgs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) ibmvmc.state = ibmvmc_state_crqinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) /* The partner did not close their CRQ - instead, we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * closing the CRQ on our end. Need to schedule this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * for process context, because CRQ reset may require a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * Setting ibmvmc.state here immediately prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * ibmvmc_open from completing until the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * completes in process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) ibmvmc.state = ibmvmc_state_sched_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) dev_dbg(adapter->dev, "Device reset scheduled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) wake_up_interruptible(&adapter->reset_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * ibmvmc_reset_task - Reset Task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * @data: Data field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * Performs a CRQ reset of the VMC device in process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * NOTE: This function should not be called directly, use ibmvmc_reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) static int ibmvmc_reset_task(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct crq_server_adapter *adapter = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) set_user_nice(current, -20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) wait_event_interruptible(adapter->reset_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) (ibmvmc.state == ibmvmc_state_sched_reset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) dev_dbg(adapter->dev, "CRQ resetting in process context");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) tasklet_disable(&adapter->work_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) rc = ibmvmc_reset_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (rc != H_SUCCESS && rc != H_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) ibmvmc.state = ibmvmc_state_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ibmvmc.state = ibmvmc_state_crqinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) != 0 && rc != H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) vio_enable_interrupts(to_vio_dev(adapter->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) tasklet_enable(&adapter->work_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * ibmvmc_process_open_resp - Process Open Response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * This command is sent by the hypervisor in response to the Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * Open message. When this message is received, the indicated buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * again available for management partition use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) unsigned char hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) unsigned short buffer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) hmc_index = crq->hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (hmc_index > ibmvmc.max_hmc_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /* Why would PHYP give an index > max negotiated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) ibmvmc_reset(adapter, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (crq->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) crq->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) ibmvmc_return_hmc(&hmcs[hmc_index], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (hmcs[hmc_index].state == ibmhmc_state_opening) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) buffer_id = be16_to_cpu(crq->var2.buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (buffer_id >= ibmvmc.max_buffer_pool_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) buffer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) hmcs[hmc_index].state = ibmhmc_state_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) &hmcs[hmc_index].buffer[buffer_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) hmcs[hmc_index].state = ibmhmc_state_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) hmcs[hmc_index].state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * ibmvmc_process_close_resp - Process Close Response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * This command is sent by the hypervisor in response to the managemant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * application Interface Close message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * If the close fails, simply reset the entire driver as the state of the VMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * must be in tough shape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) unsigned char hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) hmc_index = crq->hmc_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (hmc_index > ibmvmc.max_hmc_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) ibmvmc_reset(adapter, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (crq->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) crq->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) ibmvmc_reset(adapter, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) ibmvmc_return_hmc(&hmcs[hmc_index], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * ibmvmc_crq_process - Process CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * Process the CRQ message based upon the type of message received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct ibmvmc_crq_msg *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) switch (crq->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) case VMC_MSG_CAP_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (ibmvmc.state == ibmvmc_state_capabilities)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ibmvmc_process_capabilities(adapter, crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) ibmvmc.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) case VMC_MSG_OPEN_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) ibmvmc_process_open_resp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) case VMC_MSG_ADD_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) ibmvmc_add_buffer(adapter, crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) case VMC_MSG_REM_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) ibmvmc_rem_buffer(adapter, crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) case VMC_MSG_SIGNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) ibmvmc_recv_msg(adapter, crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) case VMC_MSG_CLOSE_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) ibmvmc_process_close_resp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) case VMC_MSG_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) case VMC_MSG_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) case VMC_MSG_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) case VMC_MSG_ADD_BUF_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) case VMC_MSG_REM_BUF_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * ibmvmc_handle_crq_init - Handle CRQ Init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * Handle the type of crq initialization based on whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * it is a message or a response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) switch (crq->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) case 0x01: /* Initialization message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ibmvmc.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (ibmvmc.state == ibmvmc_state_crqinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) /* Send back a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (ibmvmc_send_crq(adapter, 0xC002000000000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 0) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) ibmvmc_send_capabilities(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) dev_err(adapter->dev, " Unable to send init rsp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ibmvmc.state, ibmvmc.max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) case 0x02: /* Initialization response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) ibmvmc.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (ibmvmc.state == ibmvmc_state_crqinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) ibmvmc_send_capabilities(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) (unsigned long)crq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * ibmvmc_handle_crq - Handle CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * @crq: ibmvmc_crq_msg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) * Read the command elements from the command queue and execute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * requests based upon the type of crq message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) switch (crq->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) case 0xC0: /* initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) ibmvmc_handle_crq_init(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) case 0xFF: /* Hypervisor telling us the connection is closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) ibmvmc_reset(adapter, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) case 0x80: /* real payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ibmvmc_crq_process(adapter, crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) crq->valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static void ibmvmc_task(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct crq_server_adapter *adapter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) (struct crq_server_adapter *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct vio_dev *vdev = to_vio_dev(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct ibmvmc_crq_msg *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) while (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /* Pull all the valid messages off the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) ibmvmc_handle_crq(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) crq->valid = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* CRQ reset was requested, stop processing CRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * Interrupts will be re-enabled by the reset task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (ibmvmc.state == ibmvmc_state_sched_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) vio_enable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) crq = crq_queue_next_crq(&adapter->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (crq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) vio_disable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ibmvmc_handle_crq(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) crq->valid = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* CRQ reset was requested, stop processing CRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * Interrupts will be re-enabled by the reset task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (ibmvmc.state == ibmvmc_state_sched_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * ibmvmc_init_crq_queue - Init CRQ Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * @adapter: crq_server_adapter struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct vio_dev *vdev = to_vio_dev(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct crq_queue *queue = &adapter->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) int retrc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (!queue->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) goto malloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) queue->size = PAGE_SIZE / sizeof(*queue->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) queue->size * sizeof(*queue->msgs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (dma_mapping_error(adapter->dev, queue->msg_token))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) goto map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) retrc = plpar_hcall_norets(H_REG_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) queue->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) rc = retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (rc == H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) rc = ibmvmc_reset_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (rc == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) dev_warn(adapter->dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) retrc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) } else if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) dev_err(adapter->dev, "Error %d opening adapter\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) goto reg_crq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) spin_lock_init(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (request_irq(vdev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) ibmvmc_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 0, "ibmvmc", (void *)adapter) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) dev_err(adapter->dev, "couldn't register irq 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) vdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) rc = vio_enable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) req_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* Cannot have any work since we either never got our IRQ registered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * or never got interrupts enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) tasklet_kill(&adapter->work_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) h_free_crq(vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) reg_crq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) dma_unmap_single(adapter->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) queue->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) free_page((unsigned long)queue->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) malloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /* Fill in the liobn and riobn fields on the adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) static int read_dma_window(struct vio_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) struct crq_server_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) const __be32 *dma_window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) const __be32 *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /* TODO Using of_parse_dma_window would be better, but it doesn't give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * a way to read multiple windows without already knowing the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * a window or the number of windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) dma_window =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (!dma_window) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) adapter->liobn = be32_to_cpu(*dma_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) dma_window++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (!prop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) dma_window++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) dma_window += be32_to_cpu(*prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (!prop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) dma_window++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) dma_window += be32_to_cpu(*prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /* dma_window should point to the second window now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) adapter->riobn = be32_to_cpu(*dma_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) struct crq_server_adapter *adapter = &ibmvmc_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) dev_set_drvdata(&vdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) memset(adapter, 0, sizeof(*adapter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) adapter->dev = &vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) rc = read_dma_window(vdev, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) ibmvmc.state = ibmvmc_state_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) adapter->liobn, adapter->riobn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) init_waitqueue_head(&adapter->reset_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (IS_ERR(adapter->reset_task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) dev_err(adapter->dev, "Failed to start reset thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) ibmvmc.state = ibmvmc_state_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) rc = PTR_ERR(adapter->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) adapter->reset_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) rc = ibmvmc_init_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (rc != 0 && rc != H_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ibmvmc.state = ibmvmc_state_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) goto crq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) ibmvmc.state = ibmvmc_state_crqinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) /* Try to send an initialization message. Note that this is allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * to fail if the other end is not acive. In that case we just wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * for the other side to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) rc != H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) dev_set_drvdata(&vdev->dev, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) crq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) kthread_stop(adapter->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) adapter->reset_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static int ibmvmc_remove(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) ibmvmc_release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) static struct vio_device_id ibmvmc_device_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) { "ibm,vmc", "IBM,vmc" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) { "", "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) static struct vio_driver ibmvmc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .name = ibmvmc_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) .id_table = ibmvmc_device_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) .probe = ibmvmc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) .remove = ibmvmc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) static void __init ibmvmc_scrub_module_parms(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (ibmvmc_max_mtu > MAX_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) ibmvmc_max_mtu = MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) } else if (ibmvmc_max_mtu < MIN_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) ibmvmc_max_mtu = MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) MAX_BUF_POOL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) MIN_BUF_POOL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (ibmvmc_max_hmcs > MAX_HMCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) ibmvmc_max_hmcs = MAX_HMCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) } else if (ibmvmc_max_hmcs < MIN_HMCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) ibmvmc_max_hmcs = MIN_HMCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) static struct miscdevice ibmvmc_miscdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) .name = ibmvmc_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) .minor = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) .fops = &ibmvmc_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) static int __init ibmvmc_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) int rc, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ibmvmc.state = ibmvmc_state_initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) rc = misc_register(&ibmvmc_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) pr_err("ibmvmc: misc registration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) goto misc_register_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) ibmvmc_miscdev.minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* Initialize data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) for (i = 0; i < MAX_HMCS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) spin_lock_init(&hmcs[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) hmcs[i].state = ibmhmc_state_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) /* Sanity check module parms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) ibmvmc_scrub_module_parms();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * Initialize some reasonable values. Might be negotiated smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * values during the capabilities exchange.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ibmvmc.max_mtu = ibmvmc_max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) rc = vio_register_driver(&ibmvmc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) goto vio_reg_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) vio_reg_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) misc_deregister(&ibmvmc_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) misc_register_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) static void __exit ibmvmc_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) pr_info("ibmvmc: module exit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) vio_unregister_driver(&ibmvmc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) misc_deregister(&ibmvmc_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) module_init(ibmvmc_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) module_exit(ibmvmc_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) MODULE_PARM_DESC(max_hmcs, "Max HMCs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) MODULE_PARM_DESC(max_mtu, "Max MTU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) MODULE_DESCRIPTION("IBM VMC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) MODULE_VERSION(IBMVMC_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) MODULE_LICENSE("GPL v2");