^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Linux MegaRAID device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2003-2004 LSI Logic Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * FILE : megaraid_mm.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Version : v2.20.2.7 (Jul 16 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Common management module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "megaraid_mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) // Entry points for char node driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static DEFINE_MUTEX(mraid_mm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int mraid_mm_open(struct inode *, struct file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) // routines to convert to and from the old the format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int kioc_to_mimd(uioc_t *, mimd_t __user *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) // Helper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int handle_drvrcmd(void __user *, uint8_t, int *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void ioctl_done(uioc_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void lld_timedout(struct timer_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) MODULE_AUTHOR("LSI Logic Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) MODULE_DESCRIPTION("LSI Logic Management Module");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_VERSION(LSI_COMMON_MOD_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int dbglevel = CL_ANN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) module_param_named(dlevel, dbglevel, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) EXPORT_SYMBOL(mraid_mm_register_adp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) EXPORT_SYMBOL(mraid_mm_unregister_adp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static uint32_t drvr_ver = 0x02200207;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int adapters_count_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct list_head adapters_list_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static wait_queue_head_t wait_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static const struct file_operations lsi_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .open = mraid_mm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) .unlocked_ioctl = mraid_mm_unlocked_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static struct miscdevice megaraid_mm_dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .minor = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .name = "megadev0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .fops = &lsi_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * mraid_mm_open - open routine for char node interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @inode : unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @filep : unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Allow ioctl operations by apps only if they have superuser privilege.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) mraid_mm_open(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Only allow superuser to access private ioctl interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * mraid_mm_ioctl - module entry-point for ioctls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @filep : file operations pointer (ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @cmd : ioctl command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @arg : user ioctl packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) uioc_t *kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) char signature[EXT_IOCTL_SIGN_SZ] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mraid_mmadp_t *adp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) uint8_t old_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int drvrcmd_rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Make sure only USCSICMD are issued through this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * MIMD application would still fire different command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Look for signature to see if this is the new or old ioctl format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) con_log(CL_ANN, (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) "megaraid cmm: copy from usr addr failed\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) old_ioctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) old_ioctl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * At present, we don't support the new ioctl packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!old_ioctl )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * If it is a driver ioctl (as opposed to fw ioctls), then we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * handle the command locally. rval > 0 means it is not a drvr cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (rval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) else if (rval == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return drvrcmd_rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Check if adapter can accept ioctl. We may have marked it offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * if any previous kioc had timedout on this controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!adp->quiescent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) con_log(CL_ANN, (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) "megaraid cmm: controller cannot accept cmds due to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) "earlier errors\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * The following call will block till a kioc is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * or return NULL if the list head is empty for the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * of type mraid_mmapt passed to mraid_mm_alloc_kioc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) kioc = mraid_mm_alloc_kioc(adp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!kioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if ((rval = mimd_to_kioc(argp, adp, kioc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mraid_mm_dealloc_kioc(adp, kioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) kioc->done = ioctl_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Issue the IOCTL to the low level driver. After the IOCTL completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * release the kioc if and only if it was _not_ timedout. If it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * timedout, that means that resources are still with low level driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if ((rval = lld_ioctl(adp, kioc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!kioc->timedout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mraid_mm_dealloc_kioc(adp, kioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Convert the kioc back to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rval = kioc_to_mimd(kioc, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Return the kioc to free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mraid_mm_dealloc_kioc(adp, kioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mutex_lock(&mraid_mm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) err = mraid_mm_ioctl(filep, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) mutex_unlock(&mraid_mm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @umimd : User space mimd_t ioctl packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @rval : returned success/error status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * The function return value is a pointer to the located @adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static mraid_mmadp_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mraid_mmadp_t *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mimd_t mimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) uint32_t adapno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int iterator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool is_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *rval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) adapno = GETADAP(mimd.ui.fcs.adapno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (adapno >= adapters_count_g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *rval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) adapter = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) iterator = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) is_found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_for_each_entry(adapter, &adapters_list_g, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (iterator++ == adapno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) is_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!is_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *rval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @arg : packet sent by the user app
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @old_ioctl : mimd if 1; uioc otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @rval : pointer for command's returned value (not function status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mimd_t __user *umimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mimd_t kmimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) uint8_t opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) uint8_t subopcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (old_ioctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto old_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto new_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) new_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return (-ENOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) old_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) umimd = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) opcode = kmimd.ui.fcs.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) subopcode = kmimd.ui.fcs.subopcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * GET_NUMADP, then we can handle. Otherwise we should return 1 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * indicate that we cannot handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (opcode != 0x82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) switch (subopcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case MEGAIOC_QDRVRVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case MEGAIOC_QNADAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *rval = adapters_count_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (copy_to_user(kmimd.data, &adapters_count_g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sizeof(uint32_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* cannot handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * mimd_to_kioc - Converter from old to new ioctl format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @umimd : user space old MIMD IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @adp : adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @kioc : kernel space new format IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * new packet is in kernel space so that driver can perform operations on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * freely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mbox64_t *mbox64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) mbox_t *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mraid_passthru_t *pthru32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) uint32_t adapno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) uint8_t opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) uint8_t subopcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mimd_t mimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Applications are not allowed to send extd pthru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) opcode = mimd.ui.fcs.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) subopcode = mimd.ui.fcs.subopcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) adapno = GETADAP(mimd.ui.fcs.adapno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (adapno >= adapters_count_g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return (-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) kioc->adapno = adapno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) kioc->mb_type = MBOX_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) kioc->app_type = APPTYPE_MIMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case 0x82:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (subopcode == MEGAIOC_QADAPINFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) kioc->opcode = GET_ADAP_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) kioc->data_dir = UIOC_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) kioc->xferlen = sizeof(mraid_hba_info_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) con_log(CL_ANN, (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) "megaraid cmm: Invalid subop\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case 0x81:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) kioc->opcode = MBOX_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kioc->xferlen = mimd.ui.fcs.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) kioc->user_data_len = kioc->xferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) kioc->user_data = mimd.ui.fcs.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (mimd.outlen) kioc->data_dir = UIOC_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (mimd.inlen) kioc->data_dir |= UIOC_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case 0x80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) kioc->opcode = MBOX_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) kioc->xferlen = (mimd.outlen > mimd.inlen) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mimd.outlen : mimd.inlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) kioc->user_data_len = kioc->xferlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) kioc->user_data = mimd.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (mimd.outlen) kioc->data_dir = UIOC_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (mimd.inlen) kioc->data_dir |= UIOC_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * If driver command, nothing else to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (opcode == 0x82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * This is a mailbox cmd; copy the mailbox from mimd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mbox = &mbox64->mbox32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) memcpy(mbox, mimd.mbox, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mbox->xferaddr = (uint32_t)kioc->buf_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (kioc->data_dir & UIOC_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) kioc->xferlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Just like in above case, the beginning for memblk is treated as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * a mailbox. The passthru will begin at next 1K boundary. And the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * data will start 1K after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pthru32 = kioc->pthru32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kioc->user_pthru = &umimd->pthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mbox->xferaddr = (uint32_t)kioc->pthru32_h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (copy_from_user(pthru32, kioc->user_pthru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) sizeof(mraid_passthru_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pthru32->dataxferaddr = kioc->buf_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (kioc->data_dir & UIOC_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (pthru32->dataxferlen > kioc->xferlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pthru32->dataxferlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * mraid_mm_attch_buf - Attach a free dma buffer for required size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @adp : Adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * @kioc : kioc that the buffer needs to be attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @xferlen : required length for buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * First we search for a pool with smallest buffer that is >= @xferlen. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * that pool has no free buffer, we will try for the next bigger size. If none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * is available, we will try to allocate the smallest buffer that is >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @xferlen and attach it the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) mm_dmapool_t *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int right_pool = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) kioc->pool_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) kioc->buf_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) kioc->buf_paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) kioc->free_buf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * We need xferlen amount of memory. See if we can get it from our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * dma pools. If we don't get exact size, we will try bigger buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) for (i = 0; i < MAX_DMA_POOLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pool = &adp->dma_pool_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (xferlen > pool->buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (right_pool == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) right_pool = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!pool->in_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pool->in_use = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) kioc->pool_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) kioc->buf_vaddr = pool->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) kioc->buf_paddr = pool->paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * If xferlen doesn't match any of our pools, return error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (right_pool == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * We did not get any buffer from the preallocated pool. Let us try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * to allocate one new buffer. NOTE: This is a blocking call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pool = &adp->dma_pool_list[right_pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kioc->pool_index = right_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kioc->free_buf = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) &kioc->buf_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!kioc->buf_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * mraid_mm_alloc_kioc - Returns a uioc_t from free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * @adp : Adapter softstate for this module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * The kioc_semaphore is initialized with number of kioc nodes in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * free kioc pool. If the kioc pool is empty, this function blocks till
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * a kioc becomes free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static uioc_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) uioc_t *kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct list_head* head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) down(&adp->kioc_semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) spin_lock_irqsave(&adp->kioc_pool_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) head = &adp->kioc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) up(&adp->kioc_semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) kioc = list_entry(head->next, uioc_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) list_del_init(&kioc->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) kioc->buf_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kioc->buf_paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) kioc->pool_index =-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) kioc->free_buf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) kioc->user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) kioc->user_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kioc->user_pthru = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) kioc->timedout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * mraid_mm_dealloc_kioc - Return kioc to free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @adp : Adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @kioc : uioc_t node to be returned to free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) mm_dmapool_t *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (kioc->pool_index != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) pool = &adp->dma_pool_list[kioc->pool_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* This routine may be called in non-isr context also */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * While attaching the dma buffer, if we didn't get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * required buffer from the pool, we would have allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * it at the run time and set the free_buf flag. We must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * free that buffer. Otherwise, just mark that the buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * not in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (kioc->free_buf == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dma_pool_free(pool->handle, kioc->buf_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) kioc->buf_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) pool->in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Return the kioc to the free pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) spin_lock_irqsave(&adp->kioc_pool_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) list_add(&kioc->list, &adp->kioc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* increment the free kioc count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) up(&adp->kioc_semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * lld_ioctl - Routine to issue ioctl to low level drvr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @adp : The adapter handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * @kioc : The ioctl packet with kernel addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct uioc_timeout timeout = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) kioc->status = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (rval) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Start the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (adp->timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) timeout.uioc = kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) timeout.timer.expires = jiffies + adp->timeout * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) add_timer(&timeout.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * Wait till the low level driver completes the ioctl. After this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * call, the ioctl either completed successfully or timedout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) wait_event(wait_q, (kioc->status != -ENODATA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (timeout.timer.function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) del_timer_sync(&timeout.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) destroy_timer_on_stack(&timeout.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * If the command had timedout, we mark the controller offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * before returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (kioc->timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) adp->quiescent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return kioc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * ioctl_done - callback from the low level driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * @kioc : completed ioctl packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ioctl_done(uioc_t *kioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) uint32_t adapno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int iterator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mraid_mmadp_t* adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) bool is_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * When the kioc returns from driver, make sure it still doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * have ENODATA in status. Otherwise, driver will hang on wait_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * forever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (kioc->status == -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) con_log(CL_ANN, (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) "megaraid cmm: lld didn't change status!\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) kioc->status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * Check if this kioc was timedout before. If so, nobody is waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * on this kioc. We don't have to wake up anybody. Instead, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * have to free the kioc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (kioc->timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) iterator = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) adapter = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) adapno = kioc->adapno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) is_found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) "ioctl that was timedout before\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) list_for_each_entry(adapter, &adapters_list_g, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (iterator++ == adapno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) is_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) kioc->timedout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (is_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) mraid_mm_dealloc_kioc( adapter, kioc );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) wake_up(&wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * lld_timedout - callback from the expired timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @t : timer that timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) lld_timedout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct uioc_timeout *timeout = from_timer(timeout, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) uioc_t *kioc = timeout->uioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) kioc->status = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) kioc->timedout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) wake_up(&wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * kioc_to_mimd - Converter from new back to old format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @kioc : Kernel space IOCTL packet (successfully issued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @mimd : User space MIMD packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mimd_t kmimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) uint8_t opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) uint8_t subopcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) mbox64_t *mbox64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mraid_passthru_t __user *upthru32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mraid_passthru_t *kpthru32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mcontroller_t cinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) mraid_hba_info_t *hinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) opcode = kmimd.ui.fcs.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) subopcode = kmimd.ui.fcs.subopcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (opcode == 0x82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) switch (subopcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) case MEGAIOC_QADAPINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) hinfo = (mraid_hba_info_t *)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) kioc->buf_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) hinfo_to_cinfo(hinfo, &cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (kioc->user_pthru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) upthru32 = kioc->user_pthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) kpthru32 = kioc->pthru32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (copy_to_user(&upthru32->scsistatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) &kpthru32->scsistatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) sizeof(uint8_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (kioc->user_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) kioc->user_data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (copy_to_user(&mimd->mbox[17],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) &mbox64->mbox32.status, sizeof(uint8_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return (-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * hinfo_to_cinfo - Convert new format hba info into old format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * @hinfo : New format, more comprehensive adapter info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @cinfo : Old format adapter info to support mimd_t apps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (!hinfo || !cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) cinfo->base = hinfo->baseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) cinfo->irq = hinfo->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) cinfo->numldrv = hinfo->num_ldrv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) cinfo->pcibus = hinfo->pci_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) cinfo->pcidev = hinfo->pci_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) cinfo->pciid = hinfo->pci_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cinfo->pcivendor = hinfo->pci_vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cinfo->pcislot = hinfo->pci_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) cinfo->uid = hinfo->unique_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * mraid_mm_register_adp - Registration routine for low level drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * @lld_adp : Adapter object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mraid_mmadp_t *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) mbox64_t *mbox_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) uioc_t *kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) uint32_t rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (lld_adp->drvr_type != DRVRTYPE_MBOX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return (-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) adapter->unique_id = lld_adp->unique_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) adapter->drvr_type = lld_adp->drvr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) adapter->drvr_data = lld_adp->drvr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) adapter->pdev = lld_adp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) adapter->issue_uioc = lld_adp->issue_uioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) adapter->timeout = lld_adp->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) adapter->max_kioc = lld_adp->max_kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) adapter->quiescent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Allocate single blocks of memory for all required kiocs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * mailboxes and passthru structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) sizeof(uioc_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) sizeof(mbox64_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) &adapter->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) sizeof(mraid_passthru_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 16, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!adapter->kioc_list || !adapter->mbox_list ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) !adapter->pthru_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) con_log(CL_ANN, (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) "megaraid cmm: out of memory, %s %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) __LINE__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) rval = (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) goto memalloc_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * Slice kioc_list and make a kioc_pool with the individiual kiocs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) INIT_LIST_HEAD(&adapter->kioc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) spin_lock_init(&adapter->kioc_pool_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) mbox_list = (mbox64_t *)adapter->mbox_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) for (i = 0; i < lld_adp->max_kioc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) kioc = adapter->kioc_list + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) GFP_KERNEL, &kioc->pthru32_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!kioc->pthru32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) con_log(CL_ANN, (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) "megaraid cmm: out of memory, %s %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) __func__, __LINE__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) rval = (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) goto pthru_dma_pool_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) list_add_tail(&kioc->list, &adapter->kioc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) // Setup the dma pools for data buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto dma_pool_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) list_add_tail(&adapter->list, &adapters_list_g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) adapters_count_g++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dma_pool_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* Do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) pthru_dma_pool_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) for (i = 0; i < lld_adp->max_kioc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) kioc = adapter->kioc_list + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (kioc->pthru32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) kioc->pthru32_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) memalloc_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) kfree(adapter->kioc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) kfree(adapter->mbox_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) dma_pool_destroy(adapter->pthru_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) kfree(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * mraid_mm_adapter_app_handle - return the application handle for this adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * @unique_id : adapter unique identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * For the given driver data, locate the adapter in our global list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * return the corresponding handle, which is also used by applications to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * uniquely identify an adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Return adapter handle if found in the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Return 0 if adapter could not be located, should never happen though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) uint32_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) mraid_mm_adapter_app_handle(uint32_t unique_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) mraid_mmadp_t *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) mraid_mmadp_t *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (adapter->unique_id == unique_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return MKADAP(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * @adp : Adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * We maintain a pool of dma buffers per each adapter. Each pool has one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * dont' want to waste too much memory by allocating more buffers per each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) mm_dmapool_t *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * Create MAX_DMA_POOLS number of pools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) bufsize = MRAID_MM_INIT_BUFF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) for (i = 0; i < MAX_DMA_POOLS; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) pool = &adp->dma_pool_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pool->buf_size = bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) spin_lock_init(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) pool->handle = dma_pool_create("megaraid mm data buffer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) &adp->pdev->dev, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 16, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!pool->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) goto dma_pool_setup_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) &pool->paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (!pool->vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) goto dma_pool_setup_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) bufsize = bufsize * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dma_pool_setup_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) mraid_mm_teardown_dma_pools(adp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return (-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * mraid_mm_unregister_adp - Unregister routine for low level drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * @unique_id : UID of the adpater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * Assumes no outstanding ioctls to llds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mraid_mm_unregister_adp(uint32_t unique_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mraid_mmadp_t *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) mraid_mmadp_t *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (adapter->unique_id == unique_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) adapters_count_g--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) list_del_init(&adapter->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) mraid_mm_free_adp_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) kfree(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) con_log(CL_ANN, (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) "megaraid cmm: Unregistered one adapter:%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) unique_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return (-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * mraid_mm_free_adp_resources - Free adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * @adp : Adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) uioc_t *kioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) mraid_mm_teardown_dma_pools(adp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) for (i = 0; i < adp->max_kioc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) kioc = adp->kioc_list + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) kioc->pthru32_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) kfree(adp->kioc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) kfree(adp->mbox_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dma_pool_destroy(adp->pthru_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * @adp : Adapter softstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) mm_dmapool_t *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) for (i = 0; i < MAX_DMA_POOLS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) pool = &adp->dma_pool_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (pool->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (pool->vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dma_pool_free(pool->handle, pool->vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) pool->paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dma_pool_destroy(pool->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) pool->handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * mraid_mm_init - Module entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) mraid_mm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) // Announce the driver version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) err = misc_register(&megaraid_mm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) init_waitqueue_head(&wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) INIT_LIST_HEAD(&adapters_list_g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * mraid_mm_exit - Module exit point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) mraid_mm_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) con_log(CL_DLEVEL1 , ("exiting common mod\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) misc_deregister(&megaraid_mm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) module_init(mraid_mm_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) module_exit(mraid_mm_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* vi: set ts=8 sw=8 tw=78: */