Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/intel-svm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/io-64-nonatomic-lo-hi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <uapi/linux/idxd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "idxd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct idxd_cdev_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	dev_t devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct ida minor_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * ictx is an array based off of accelerator types. enum idxd_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * is used as index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	{ .name = "dsa" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) struct idxd_user_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct idxd_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static void idxd_cdev_dev_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct idxd_cdev_context *cdev_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct idxd_wq *wq = idxd_cdev->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	cdev_ctx = &ictx[wq->idxd->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	kfree(idxd_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static struct device_type idxd_cdev_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	.name = "idxd_cdev",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	.release = idxd_cdev_dev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct cdev *cdev = inode->i_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return container_of(cdev, struct idxd_cdev, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static inline struct idxd_wq *inode_wq(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return idxd_cdev->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static int idxd_cdev_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct idxd_user_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct idxd_device *idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct idxd_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	wq = inode_wq(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	mutex_lock(&wq->wq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ctx->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	filp->private_data = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	idxd_wq_get(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	mutex_unlock(&wq->wq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	mutex_unlock(&wq->wq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int idxd_cdev_release(struct inode *node, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct idxd_user_context *ctx = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct idxd_wq *wq = ctx->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct device *dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	dev_dbg(dev, "%s called\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	filep->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/* Wait for in-flight operations to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	idxd_wq_drain(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mutex_lock(&wq->wq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	idxd_wq_put(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	mutex_unlock(&wq->wq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		     const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct device *dev = &wq->idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		dev_info_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				     "%s: %s: mapping too large: %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 				     current->comm, func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				     vma->vm_end - vma->vm_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct idxd_user_context *ctx = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct idxd_wq *wq = ctx->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct pci_dev *pdev = idxd->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	dev_dbg(&pdev->dev, "%s called\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	rc = check_vma(wq, vma, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	vma->vm_flags |= VM_DONTCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	vma->vm_private_data = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static __poll_t idxd_cdev_poll(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			       struct poll_table_struct *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct idxd_user_context *ctx = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct idxd_wq *wq = ctx->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	__poll_t out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	poll_wait(filp, &wq->err_queue, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	spin_lock_irqsave(&idxd->dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (idxd->sw_err.valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		out = EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	return out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static const struct file_operations idxd_cdev_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	.open = idxd_cdev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	.release = idxd_cdev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	.mmap = idxd_cdev_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	.poll = idxd_cdev_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int idxd_cdev_get_major(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return MAJOR(ictx[idxd->type].devt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int idxd_wq_add_cdev(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct idxd_cdev *idxd_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct cdev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	struct idxd_cdev_context *cdev_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	int rc, minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (!idxd_cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	idxd_cdev->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	cdev = &idxd_cdev->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	dev = &idxd_cdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	cdev_ctx = &ictx[wq->idxd->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (minor < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		kfree(idxd_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	idxd_cdev->minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	device_initialize(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	dev->parent = &wq->conf_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	dev->bus = idxd_get_bus_type(idxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	dev->type = &idxd_cdev_device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			  idxd->id, wq->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	wq->idxd_cdev = idxd_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	cdev_init(cdev, &idxd_cdev_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	rc = cdev_device_add(cdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	wq->idxd_cdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void idxd_wq_del_cdev(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct idxd_cdev *idxd_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	struct idxd_cdev_context *cdev_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	cdev_ctx = &ictx[wq->idxd->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	idxd_cdev = wq->idxd_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	wq->idxd_cdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	put_device(&idxd_cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int idxd_cdev_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		ida_init(&ictx[i].minor_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 					 ictx[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void idxd_cdev_remove(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		unregister_chrdev_region(ictx[i].devt, MINORMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		ida_destroy(&ictx[i].minor_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }