^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Freescale Hypervisor Management Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Timur Tabi <timur@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is licensed under the terms of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * version 2. This program is licensed "as is" without any warranty of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The Freescale hypervisor management driver provides several services to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * drivers and applications related to the Freescale hypervisor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 1. An ioctl interface for querying and managing partitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 2. A file interface to reading incoming doorbells.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * 3. An interrupt handler for shutting down the partition upon receiving the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * shutdown doorbell from a manager partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * 4. A kernel interface for receiving callbacks when a managed partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * shuts down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/fsl_hcalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/fsl_hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static BLOCKING_NOTIFIER_HEAD(failover_subscribers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Ioctl interface for FSL_HV_IOCTL_PARTITION_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Restart a running partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static long ioctl_restart(struct fsl_hv_ioctl_restart __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct fsl_hv_ioctl_restart param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Get the parameters from the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_restart)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) param.ret = fh_partition_restart(param.partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Ioctl interface for FSL_HV_IOCTL_PARTITION_STATUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Query the status of a partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static long ioctl_status(struct fsl_hv_ioctl_status __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct fsl_hv_ioctl_status param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Get the parameters from the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_status)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) param.ret = fh_partition_get_status(param.partition, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!param.ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) param.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (copy_to_user(p, ¶m, sizeof(struct fsl_hv_ioctl_status)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Ioctl interface for FSL_HV_IOCTL_PARTITION_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Start a stopped partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static long ioctl_start(struct fsl_hv_ioctl_start __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct fsl_hv_ioctl_start param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Get the parameters from the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_start)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) param.ret = fh_partition_start(param.partition, param.entry_point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) param.load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Ioctl interface for FSL_HV_IOCTL_PARTITION_STOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Stop a running partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static long ioctl_stop(struct fsl_hv_ioctl_stop __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct fsl_hv_ioctl_stop param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Get the parameters from the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_stop)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) param.ret = fh_partition_stop(param.partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Ioctl interface for FSL_HV_IOCTL_MEMCPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * The FH_MEMCPY hypercall takes an array of address/address/size structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * to represent the data being copied. As a convenience to the user, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * ioctl takes a user-create buffer and a pointer to a guest physically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * contiguous buffer in the remote partition, and creates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * address/address/size array for the hypercall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct fsl_hv_ioctl_memcpy param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct page **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void *sg_list_unaligned = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct fh_sg_list *sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned int num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long lb_offset; /* Offset within a page of the local buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int num_pinned = 0; /* return value from get_user_pages_fast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) phys_addr_t remote_paddr; /* The next address in the remote buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) uint32_t count; /* The number of bytes left to copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Get the parameters from the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_memcpy)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * One partition must be local, the other must be remote. In other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * words, if source and target are both -1, or are both not -1, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if ((param.source == -1) == (param.target == -1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * The array of pages returned by get_user_pages_fast() covers only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * page-aligned memory. Since the user buffer is probably not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * page-aligned, we need to handle the discrepancy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * We calculate the offset within a page of the S/G list, and make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * adjustments accordingly. This will result in a page list that looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * ---- <-- first page starts before the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * |////|-> ----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * ---- | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * ---- | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * ---- | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * ---- | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * ---- | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * ---- | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * |////| | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * |////|-> ----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * | | <-- last page ends after the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * ----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * The distance between the start of the first page and the start of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * buffer is lb_offset. The hashed (///) areas are the parts of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * page list that contain the actual buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * The advantage of this approach is that the number of pages is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * equal to the number of entries in the S/G list that we give to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (param.count == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Allocate the buffers we need */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * 'pages' is an array of struct page pointers that's initialized by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * get_user_pages_fast().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pr_debug("fsl-hv: could not allocate page list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * sg_list is the list of fh_sg_list objects that we pass to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sizeof(struct fh_sg_list) - 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!sg_list_unaligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pr_debug("fsl-hv: could not allocate S/G list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Get the physical addresses of the source buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (num_pinned != num_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pr_debug("fsl-hv: could not lock source buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ret = (num_pinned < 0) ? num_pinned : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * Build the fh_sg_list[] array. The first page is special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * because it's misaligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (param.source == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) sg_list[0].source = page_to_phys(pages[0]) + lb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) sg_list[0].target = param.remote_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) sg_list[0].source = param.remote_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) sg_list[0].target = page_to_phys(pages[0]) + lb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) remote_paddr = param.remote_paddr + sg_list[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) count = param.count - sg_list[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for (i = 1; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (param.source == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* local to remote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) sg_list[i].source = page_to_phys(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) sg_list[i].target = remote_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* remote to local */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) sg_list[i].source = remote_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sg_list[i].target = page_to_phys(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) sg_list[i].size = min_t(uint64_t, count, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) remote_paddr += sg_list[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) count -= sg_list[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) param.ret = fh_partition_memcpy(param.source, param.target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) virt_to_phys(sg_list), num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (pages && (num_pinned > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) for (i = 0; i < num_pinned; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) put_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kfree(sg_list_unaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Ioctl interface for FSL_HV_IOCTL_DOORBELL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Ring a doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static long ioctl_doorbell(struct fsl_hv_ioctl_doorbell __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct fsl_hv_ioctl_doorbell param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Get the parameters from the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_doorbell)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) param.ret = ev_doorbell_send(param.doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct fsl_hv_ioctl_prop param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) char __user *upath, *upropname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) void __user *upropval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) char *path, *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void *propval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Get the parameters from the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_prop)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) upath = (char __user *)(uintptr_t)param.path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) upropname = (char __user *)(uintptr_t)param.propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) upropval = (void __user *)(uintptr_t)param.propval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (IS_ERR(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (IS_ERR(propname)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = PTR_ERR(propname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto err_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) goto err_free_propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) propval = kmalloc(param.proplen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!propval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto err_free_propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (copy_from_user(propval, upropval, param.proplen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto err_free_propval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) param.ret = fh_partition_set_dtprop(param.handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) virt_to_phys(path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) virt_to_phys(propname),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) virt_to_phys(propval),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) param.proplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) param.ret = fh_partition_get_dtprop(param.handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) virt_to_phys(path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) virt_to_phys(propname),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) virt_to_phys(propval),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ¶m.proplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (param.ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (copy_to_user(upropval, propval, param.proplen) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) put_user(param.proplen, &p->proplen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) goto err_free_propval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (put_user(param.ret, &p->ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) err_free_propval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) kfree(propval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) err_free_propname:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) kfree(propname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) err_free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Ioctl main entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static long fsl_hv_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long argaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void __user *arg = (void __user *)argaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) case FSL_HV_IOCTL_PARTITION_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = ioctl_restart(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) case FSL_HV_IOCTL_PARTITION_GET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = ioctl_status(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case FSL_HV_IOCTL_PARTITION_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ret = ioctl_start(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case FSL_HV_IOCTL_PARTITION_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = ioctl_stop(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case FSL_HV_IOCTL_MEMCPY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ret = ioctl_memcpy(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case FSL_HV_IOCTL_DOORBELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = ioctl_doorbell(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case FSL_HV_IOCTL_GETPROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ret = ioctl_dtprop(arg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case FSL_HV_IOCTL_SETPROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = ioctl_dtprop(arg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) pr_debug("fsl-hv: bad ioctl dir=%u type=%u cmd=%u size=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) _IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) _IOC_SIZE(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Linked list of processes that have us open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static struct list_head db_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* spinlock for db_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static DEFINE_SPINLOCK(db_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* The size of the doorbell event queue. This must be a power of two. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) #define QSIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* Returns the next head/tail pointer, wrapping around the queue if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #define nextp(x) (((x) + 1) & (QSIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* Per-open data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct doorbell_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) uint32_t q[QSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Linked list of ISRs that we registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct list_head isr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Per-ISR data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct doorbell_isr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) uint32_t doorbell; /* The doorbell handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) uint32_t partition; /* The partition handle, if used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Add a doorbell to all of the doorbell queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void fsl_hv_queue_doorbell(uint32_t doorbell)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct doorbell_queue *dbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Prevent another core from modifying db_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_lock_irqsave(&db_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) list_for_each_entry(dbq, &db_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (dbq->head != nextp(dbq->tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dbq->q[dbq->tail] = doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * This memory barrier eliminates the need to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * the spinlock for dbq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dbq->tail = nextp(dbq->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) wake_up_interruptible(&dbq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) spin_unlock_irqrestore(&db_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Interrupt handler for all doorbells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * We use the same interrupt handler for all doorbells. Whenever a doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * is rung, and we receive an interrupt, we just put the handle for that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * doorbell (passed to us as *data) into all of the queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static irqreturn_t fsl_hv_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) fsl_hv_queue_doorbell((uintptr_t) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * State change thread function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * The state change notification arrives in an interrupt, but we can't call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * blocking_notifier_call_chain() in an interrupt handler. We could call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * atomic_notifier_call_chain(), but that would require the clients' call-back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * function to run in interrupt context. Since we don't want to impose that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * restriction on the clients, we use a threaded IRQ to process the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * notification in kernel context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static irqreturn_t fsl_hv_state_change_thread(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct doorbell_isr *dbisr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) blocking_notifier_call_chain(&failover_subscribers, dbisr->partition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Interrupt handler for state-change doorbells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct doorbell_isr *dbisr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* It's still a doorbell, so add it to all the queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) fsl_hv_queue_doorbell(dbisr->doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Determine the new state, and if it's stopped, notify the clients. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = fh_partition_get_status(dbisr->partition, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!ret && (status == FH_PARTITION_STOPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Returns a bitmask indicating whether a read will block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct doorbell_queue *dbq = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) __poll_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) spin_lock_irqsave(&dbq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) poll_wait(filp, &dbq->wait, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) mask = (dbq->head == dbq->tail) ? 0 : (EPOLLIN | EPOLLRDNORM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_unlock_irqrestore(&dbq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Return the handles for any incoming doorbells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * If there are doorbell handles in the queue for this open instance, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * return them to the caller as an array of 32-bit integers. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * block until there is at least one handle to return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static ssize_t fsl_hv_read(struct file *filp, char __user *buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) loff_t *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct doorbell_queue *dbq = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) uint32_t __user *p = (uint32_t __user *) buf; /* for put_user() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ssize_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Make sure we stop when the user buffer is full. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) while (len >= sizeof(uint32_t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) uint32_t dbell; /* Local copy of doorbell queue data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) spin_lock_irqsave(&dbq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * If the queue is empty, then either we're done or we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * to block. If the application specified O_NONBLOCK, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * we return the appropriate error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (dbq->head == dbq->tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) spin_unlock_irqrestore(&dbq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (filp->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (wait_event_interruptible(dbq->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dbq->head != dbq->tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Even though we have an smp_wmb() in the ISR, the core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * might speculatively execute the "dbell = ..." below while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * it's evaluating the if-statement above. In that case, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * value put into dbell could be stale if the core accepts the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * speculation. To prevent that, we need a read memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * here as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Copy the data to a temporary local buffer, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * we can't call copy_to_user() from inside a spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dbell = dbq->q[dbq->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dbq->head = nextp(dbq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) spin_unlock_irqrestore(&dbq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (put_user(dbell, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) count += sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) len -= sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Open the driver and prepare for reading doorbells.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Every time an application opens the driver, we create a doorbell queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * for that file handle. This queue is used for any incoming doorbells.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int fsl_hv_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct doorbell_queue *dbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!dbq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) pr_err("fsl-hv: out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) spin_lock_init(&dbq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) init_waitqueue_head(&dbq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_lock_irqsave(&db_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) list_add(&dbq->list, &db_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_unlock_irqrestore(&db_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) filp->private_data = dbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Close the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int fsl_hv_close(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct doorbell_queue *dbq = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_lock_irqsave(&db_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) list_del(&dbq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_unlock_irqrestore(&db_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) kfree(dbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static const struct file_operations fsl_hv_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .open = fsl_hv_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .release = fsl_hv_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .poll = fsl_hv_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) .read = fsl_hv_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .unlocked_ioctl = fsl_hv_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static struct miscdevice fsl_hv_misc_dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) "fsl-hv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) &fsl_hv_fops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static irqreturn_t fsl_hv_shutdown_isr(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) orderly_poweroff(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Returns the handle of the parent of the given node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * The handle is the value of the 'hv-handle' property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static int get_parent_handle(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct device_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) const uint32_t *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) uint32_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) parent = of_get_parent(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* It's not really possible for this to fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * The proper name for the handle property is "hv-handle", but some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * older versions of the hypervisor used "reg".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) prop = of_get_property(parent, "hv-handle", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) prop = of_get_property(parent, "reg", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!prop || (len != sizeof(uint32_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* This can happen only if the node is malformed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) of_node_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) handle = be32_to_cpup(prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) of_node_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * Register a callback for failover events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * This function is called by device drivers to register their callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * functions for fail-over events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int fsl_hv_failover_register(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return blocking_notifier_chain_register(&failover_subscribers, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) EXPORT_SYMBOL(fsl_hv_failover_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Unregister a callback for failover events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int fsl_hv_failover_unregister(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return blocking_notifier_chain_unregister(&failover_subscribers, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) EXPORT_SYMBOL(fsl_hv_failover_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Return TRUE if we're running under FSL hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * This function checks to see if we're running under the Freescale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * hypervisor, and returns zero if we're not, or non-zero if we are.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * First, it checks if MSR[GS]==1, which means we're running under some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * hypervisor. Then it checks if there is a hypervisor node in the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * tree. Currently, that means there needs to be a node in the root called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * "hypervisor" and which has a property named "fsl,hv-version".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int has_fsl_hypervisor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct device_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) node = of_find_node_by_path("/hypervisor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ret = of_find_property(node, "fsl,hv-version", NULL) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) of_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Freescale hypervisor management driver init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * This function is called when this module is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * Register ourselves as a miscellaneous driver. This will register the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * fops structure and create the right sysfs entries for udev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static int __init fsl_hypervisor_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct doorbell_isr *dbisr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) pr_info("Freescale hypervisor management driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!has_fsl_hypervisor()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) pr_info("fsl-hv: no hypervisor found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ret = misc_register(&fsl_hv_misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) pr_err("fsl-hv: cannot register device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) INIT_LIST_HEAD(&db_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) INIT_LIST_HEAD(&isr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) for_each_compatible_node(np, NULL, "epapr,hv-receive-doorbell") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) const uint32_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) handle = of_get_property(np, "interrupts", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!handle || (irq == NO_IRQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) pr_err("fsl-hv: no 'interrupts' property in %pOF node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dbisr = kzalloc(sizeof(*dbisr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!dbisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dbisr->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) dbisr->doorbell = be32_to_cpup(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (of_device_is_compatible(np, "fsl,hv-shutdown-doorbell")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* The shutdown doorbell gets its own ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ret = request_irq(irq, fsl_hv_shutdown_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) np->name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) } else if (of_device_is_compatible(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) "fsl,hv-state-change-doorbell")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * The state change doorbell triggers a notification if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * the state of the managed partition changes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * "stopped". We need a separate interrupt handler for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * that, and we also need to know the handle of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * target partition, not just the handle of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) dbisr->partition = ret = get_parent_handle(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pr_err("fsl-hv: node %pOF has missing or "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) "malformed parent\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) kfree(dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = request_threaded_irq(irq, fsl_hv_state_change_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fsl_hv_state_change_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 0, np->name, dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ret = request_irq(irq, fsl_hv_isr, 0, np->name, dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pr_err("fsl-hv: could not request irq %u for node %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) irq, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) kfree(dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) list_add(&dbisr->list, &isr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) pr_info("fsl-hv: registered handler for doorbell %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dbisr->doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) list_for_each_entry_safe(dbisr, n, &isr_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) free_irq(dbisr->irq, dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_del(&dbisr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) kfree(dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) misc_deregister(&fsl_hv_misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Freescale hypervisor management driver termination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * This function is called when this driver is unloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static void __exit fsl_hypervisor_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct doorbell_isr *dbisr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) list_for_each_entry_safe(dbisr, n, &isr_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) free_irq(dbisr->irq, dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) list_del(&dbisr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) kfree(dbisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) misc_deregister(&fsl_hv_misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) module_init(fsl_hypervisor_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) module_exit(fsl_hypervisor_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) MODULE_DESCRIPTION("Freescale hypervisor management driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) MODULE_LICENSE("GPL v2");