Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * CXL Flash Device Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2018 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/pseudo_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/xive.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <misc/ocxl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <uapi/misc/cxl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "backend.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "ocxl_hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * Pseudo-filesystem to allocate inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define OCXLFLASH_FS_MAGIC      0x1697698f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static int ocxlflash_fs_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static struct vfsmount *ocxlflash_vfs_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static struct file_system_type ocxlflash_fs_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	.name		= "ocxlflash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	.init_fs_context = ocxlflash_fs_init_fs_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	.kill_sb	= kill_anon_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * ocxlflash_release_mapping() - release the memory mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * @ctx:	Context whose mapping is to be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	if (ctx->mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	ctx->mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * @dev:	Generic device of the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * @name:	Name of the pseudo filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * @fops:	File operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * @priv:	Private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * @flags:	Flags for the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * Return: pointer to the file on success, ERR_PTR on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static struct file *ocxlflash_getfile(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 				      const struct file_operations *fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 				      void *priv, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (fops->owner && !try_module_get(fops->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		dev_err(dev, "%s: Owner does not exist\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 			   &ocxlflash_fs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (unlikely(rc < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		rc = PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 				 flags & (O_ACCMODE | O_NONBLOCK), fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		rc = PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		dev_err(dev, "%s: alloc_file failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		goto err4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	file->private_data = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) err4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	module_put(fops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	file = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * ocxlflash_psa_map() - map the process specific MMIO space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * @ctx_cookie:	Adapter context for which the mapping needs to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * Return: MMIO pointer of the mapped region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	mutex_lock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (ctx->state != STARTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		dev_err(dev, "%s: Context not started, state=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			ctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	return ioremap(ctx->psn_phys, ctx->psn_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * ocxlflash_psa_unmap() - unmap the process specific MMIO space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * @addr:	MMIO pointer to unmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void ocxlflash_psa_unmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * ocxlflash_process_element() - get process element of the adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * @ctx_cookie:	Adapter context associated with the process element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * Return: process element of the adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static int ocxlflash_process_element(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	return ctx->pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * afu_map_irq() - map the interrupt of the adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  * @flags:	Flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * @ctx:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * @num:	Per-context AFU interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * @handler:	Interrupt handler to register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * @cookie:	Interrupt handler private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * @name:	Name of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		       irq_handler_t handler, void *cookie, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct ocxlflash_irqs *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	u32 virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (num < 0 || num >= ctx->num_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	irq = &ctx->irqs[num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	virq = irq_create_mapping(NULL, irq->hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	if (unlikely(!virq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	rc = request_irq(virq, handler, 0, name, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	xd = irq_get_handler_data(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	if (unlikely(!xd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		dev_err(dev, "%s: Can't get interrupt data\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	irq->virq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	irq->vtrig = xd->trig_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	free_irq(virq, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	irq_dispose_mapping(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * @ctx_cookie:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * @num:	Per-context AFU interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * @handler:	Interrupt handler to register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * @cookie:	Interrupt handler private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * @name:	Name of the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 				 irq_handler_t handler, void *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 				 char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  * afu_unmap_irq() - unmap the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * @flags:	Flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * @ctx:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * @num:	Per-context AFU interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * @cookie:	Interrupt handler private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			  void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct ocxlflash_irqs *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if (num < 0 || num >= ctx->num_irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	irq = &ctx->irqs[num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (irq_find_mapping(NULL, irq->hwirq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		free_irq(irq->virq, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		irq_dispose_mapping(irq->virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	memset(irq, 0, sizeof(*irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  * ocxlflash_unmap_afu_irq() - unmap the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * @ctx_cookie:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  * @num:	Per-context AFU interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * @cookie:	Interrupt handler private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	return afu_unmap_irq(0, ctx_cookie, num, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * @ctx_cookie:	Context associated with the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * @irq:	Interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * Return: effective address of the mapped region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (irq < 0 || irq >= ctx->num_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return (__force u64)ctx->irqs[irq].vtrig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * ocxlflash_xsl_fault() - callback when translation error is triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * @data:	Private data provided at callback registration, the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * @addr:	Address that triggered the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * @dsisr:	Value of dsisr register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct ocxlflash_context *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	spin_lock(&ctx->slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	ctx->fault_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	ctx->fault_dsisr = dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	ctx->pending_fault = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	spin_unlock(&ctx->slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	wake_up_all(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * start_context() - local routine to start a context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * @ctx:	Adapter context to be started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * Assign the context specific MMIO space, add and enable the PE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) static int start_context(struct ocxlflash_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	struct ocxl_afu_config *acfg = &afu->acfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	void *link_token = afu->link_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	bool master = ctx->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	u32 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	mutex_lock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (ctx->state != OPENED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		dev_err(dev, "%s: Context state invalid, state=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			__func__, ctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		ctx->psn_size = acfg->global_mmio_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		ctx->psn_phys = afu->gmmio_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		ctx->psn_size = acfg->pp_mmio_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	/* pid and mm not set for master contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		pid = current->mm->context.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			      ocxlflash_xsl_fault, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	ctx->state = STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * ocxlflash_start_context() - start a kernel context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * @ctx_cookie:	Adapter context to be started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static int ocxlflash_start_context(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	return start_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * ocxlflash_stop_context() - stop a context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  * @ctx_cookie:	Adapter context to be stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) static int ocxlflash_stop_context(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct ocxl_afu_config *acfg = &afu->acfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct pci_dev *pdev = afu->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	enum ocxlflash_ctx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	mutex_lock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	state = ctx->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	ctx->state = CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (state != STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 					 ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		/* If EBUSY, PE could be referenced in future by the AFU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		if (rc == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * ocxlflash_afu_reset() - reset the AFU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * @ctx_cookie:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) static int ocxlflash_afu_reset(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	/* Pending implementation from OCXL transport services */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	/* Silently return success until it is implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * ocxlflash_set_master() - sets the context as master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  * @ctx_cookie:	Adapter context to set as master.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static void ocxlflash_set_master(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	ctx->master = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  * ocxlflash_get_context() - obtains the context associated with the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  * @afu_cookie:	Hardware AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * Return: returns the pointer to host adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct ocxl_hw_afu *afu = afu_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	return afu->ocxl_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  * ocxlflash_dev_context_init() - allocate and initialize an adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * @afu_cookie:	Hardware AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * Return: returns the adapter context on success, ERR_PTR on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	struct ocxl_hw_afu *afu = afu_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct ocxlflash_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (unlikely(!ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		dev_err(dev, "%s: Context allocation failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (unlikely(rc < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	spin_lock_init(&ctx->slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	init_waitqueue_head(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	mutex_init(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	ctx->state = OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	ctx->pe = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	ctx->master = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	ctx->mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	ctx->hw_afu = afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	ctx->irq_bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	ctx->pending_irq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	ctx->pending_fault = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	ctx = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  * ocxlflash_release_context() - releases an adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * @ctx_cookie:	Adapter context to be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static int ocxlflash_release_context(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	mutex_lock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (ctx->state >= STARTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		dev_err(dev, "%s: Context in use, state=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			ctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	idr_remove(&ctx->hw_afu->idr, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	ocxlflash_release_mapping(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * ocxlflash_perst_reloads_same_image() - sets the image reload policy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * @afu_cookie:	Hardware AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * @image:	Whether to load the same image on PERST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct ocxl_hw_afu *afu = afu_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	afu->perst_same_image = image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * ocxlflash_read_adapter_vpd() - reads the adapter VPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * @buf:	Buffer to get the VPD data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * @count:	Size of buffer (maximum bytes that can be read).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * Return: size of VPD on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 					  size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	return pci_read_vpd(pdev, 0, count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * free_afu_irqs() - internal service to free interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * @ctx:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static void free_afu_irqs(struct ocxlflash_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (!ctx->irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		dev_err(dev, "%s: Interrupts not allocated\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	for (i = ctx->num_irqs; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	kfree(ctx->irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	ctx->irqs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * alloc_afu_irqs() - internal service to allocate interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * @ctx:	Context associated with the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * @num:	Number of interrupts requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct ocxlflash_irqs *irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	int hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (ctx->irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		dev_err(dev, "%s: Interrupts already allocated\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		rc = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (num > OCXL_MAX_IRQS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (unlikely(!irqs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		irqs[i].hwirq = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	ctx->irqs = irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	ctx->num_irqs = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	for (i = i-1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	kfree(irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  * @ctx_cookie:	Context associated with the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * @num:	Number of interrupts requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	return alloc_afu_irqs(ctx_cookie, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * @ctx_cookie:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) static void ocxlflash_free_afu_irqs(void *ctx_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	free_afu_irqs(ctx_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * ocxlflash_unconfig_afu() - unconfigure the AFU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * @afu: AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (afu->gmmio_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		iounmap(afu->gmmio_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		afu->gmmio_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * ocxlflash_destroy_afu() - destroy the AFU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * @afu_cookie:	AFU to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static void ocxlflash_destroy_afu(void *afu_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct ocxl_hw_afu *afu = afu_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (!afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	ocxlflash_release_context(afu->ocxl_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	idr_destroy(&afu->idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/* Disable the AFU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	pos = afu->acfg.dvsec_afu_control_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	ocxl_config_set_afu_state(afu->pdev, pos, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	ocxlflash_unconfig_afu(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	kfree(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * ocxlflash_config_fn() - configure the host function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * @afu:	AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct ocxl_fn_config *fcfg = &afu->fcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	u16 base, enabled, supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* Read DVSEC config of the function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	rc = ocxl_config_read_function(pdev, fcfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* Check if function has AFUs defined, only 1 per function supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (fcfg->max_afu_index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		afu->is_present = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		if (fcfg->max_afu_index != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			dev_warn(dev, "%s: Unexpected AFU index value %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 				 __func__, fcfg->max_afu_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	afu->fn_actag_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	afu->fn_actag_enabled = enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		__func__, base, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	rc = ocxl_link_setup(pdev, 0, &afu->link_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	ocxl_link_release(pdev, afu->link_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * ocxlflash_unconfig_fn() - unconfigure the host function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * @afu:	AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	ocxl_link_release(pdev, afu->link_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  * ocxlflash_map_mmio() - map the AFU MMIO space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  * @afu: AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	struct ocxl_afu_config *acfg = &afu->acfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	struct pci_dev *pdev = afu->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	phys_addr_t gmmio, ppmmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	gmmio += acfg->global_mmio_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	ppmmio += acfg->pp_mmio_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (unlikely(!afu->gmmio_virt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		dev_err(dev, "%s: MMIO mapping failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	afu->gmmio_phys = gmmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	afu->ppmmio_phys = ppmmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	pci_release_region(pdev, acfg->pp_mmio_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	pci_release_region(pdev, acfg->global_mmio_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * ocxlflash_config_afu() - configure the host AFU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  * @afu:	AFU associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  * Must be called _after_ host function configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	struct ocxl_afu_config *acfg = &afu->acfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	struct ocxl_fn_config *fcfg = &afu->fcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	int base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	/* This HW AFU function does not have any AFUs defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (!afu->is_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	/* Read AFU config at index 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	/* Only one AFU per function is supported, so actag_base is same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	base = afu->fn_actag_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	pos = acfg->dvsec_afu_control_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	ocxl_config_set_afu_actag(pdev, pos, base, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	afu->afu_actag_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	afu->afu_actag_enabled = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	afu->max_pasid = 1 << acfg->pasid_supported_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	rc = ocxlflash_map_mmio(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/* Enable the AFU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * ocxlflash_create_afu() - create the AFU for OCXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * @pdev:	PCI device associated with the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  * Return: AFU on success, NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) static void *ocxlflash_create_afu(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct ocxlflash_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct ocxl_hw_afu *afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	afu = kzalloc(sizeof(*afu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (unlikely(!afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	afu->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	afu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	idr_init(&afu->idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	rc = ocxlflash_config_fn(pdev, afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		dev_err(dev, "%s: Function configuration failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	rc = ocxlflash_config_afu(pdev, afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		dev_err(dev, "%s: AFU configuration failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	ctx = ocxlflash_dev_context_init(pdev, afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (IS_ERR(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		rc = PTR_ERR(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	afu->ocxl_ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	ocxlflash_unconfig_afu(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	ocxlflash_unconfig_fn(pdev, afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	idr_destroy(&afu->idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	kfree(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	afu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * ctx_event_pending() - check for any event pending on the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  * @ctx:	Context to be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * Return: true if there is an event pending, false if none pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (ctx->pending_irq || ctx->pending_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  * afu_poll() - poll the AFU for events on the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * @file:	File associated with the adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * @poll:	Poll structure from the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * Return: poll mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct ocxlflash_context *ctx = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	ulong lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	poll_wait(file, &ctx->wq, poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	spin_lock_irqsave(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (ctx_event_pending(ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		mask |= POLLIN | POLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	else if (ctx->state == CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		mask |= POLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		__func__, ctx->pe, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * afu_read() - perform a read on the context for any event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * @file:	File associated with the adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * @buf:	Buffer to receive the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * @count:	Size of buffer (maximum bytes that can be read).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * @off:	Offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * Return: size of the data read on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			loff_t *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct ocxlflash_context *ctx = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	struct cxl_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	ulong lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	ssize_t esize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	DEFINE_WAIT(event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (*off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			__func__, *off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	spin_lock_irqsave(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		if (file->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			dev_err(dev, "%s: File cannot be blocked on I/O\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			dev_err(dev, "%s: Signal pending on the process\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		spin_unlock_irqrestore(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		spin_lock_irqsave(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	finish_wait(&ctx->wq, &event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	event.header.process_element = ctx->pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	event.header.size = sizeof(struct cxl_event_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (ctx->pending_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		esize = sizeof(struct cxl_event_afu_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		event.header.size += esize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		event.header.type = CXL_EVENT_AFU_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		clear_bit(bit, &ctx->irq_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		event.irq.irq = bit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			ctx->pending_irq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	} else if (ctx->pending_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		event.header.size += sizeof(struct cxl_event_data_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		event.header.type = CXL_EVENT_DATA_STORAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		event.fault.addr = ctx->fault_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		event.fault.dsisr = ctx->fault_dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		ctx->pending_fault = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (copy_to_user(buf, &event, event.header.size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		dev_err(dev, "%s: copy_to_user failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	rc = event.header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	finish_wait(&ctx->wq, &event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  * afu_release() - release and free the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * @inode:	File inode pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * @file:	File associated with the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static int afu_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	struct ocxlflash_context *ctx = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	/* Unmap and free the interrupts associated with the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	for (i = ctx->num_irqs; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		afu_unmap_irq(0, ctx, i, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	free_afu_irqs(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return ocxlflash_release_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * ocxlflash_mmap_fault() - mmap fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * @vmf:	VM fault associated with current fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	struct vm_area_struct *vma = vmf->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct ocxlflash_context *ctx = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	u64 mmio_area, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	offset = vmf->pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	if (offset >= ctx->psn_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	mutex_lock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	if (ctx->state != STARTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		dev_err(dev, "%s: Context not started, state=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			__func__, ctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	mutex_unlock(&ctx->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	mmio_area = ctx->psn_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	mmio_area += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static const struct vm_operations_struct ocxlflash_vmops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	.fault = ocxlflash_mmap_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  * afu_mmap() - map the fault handler operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  * @file:	File associated with the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  * @vma:	VM area associated with mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int afu_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct ocxlflash_context *ctx = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if ((vma_pages(vma) + vma->vm_pgoff) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	    (ctx->psn_size >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	vma->vm_flags |= VM_IO | VM_PFNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	vma->vm_ops = &ocxlflash_vmops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static const struct file_operations ocxl_afu_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	.poll		= afu_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	.read		= afu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	.release	= afu_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	.mmap		= afu_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) #define PATCH_FOPS(NAME)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * ocxlflash_get_fd() - get file descriptor for an adapter context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * @ctx_cookie:	Adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * @fops:	File operations to be associated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * @fd:		File descriptor to be returned back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  * Return: pointer to the file on success, ERR_PTR on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static struct file *ocxlflash_get_fd(void *ctx_cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 				     struct file_operations *fops, int *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	int flags, fdtmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	/* Only allow one fd per context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (ctx->mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		dev_err(dev, "%s: Context is already mapped to an fd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		rc = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	flags = O_RDWR | O_CLOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/* This code is similar to anon_inode_getfd() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	rc = get_unused_fd_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	if (unlikely(rc < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	fdtmp = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	/* Patch the file ops that are not defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		PATCH_FOPS(poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		PATCH_FOPS(read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		PATCH_FOPS(release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		PATCH_FOPS(mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	} else /* Use default ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		fops = (struct file_operations *)&ocxl_afu_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	file = ocxlflash_getfile(dev, name, fops, ctx, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		rc = PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	ctx->mapping = file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	*fd = fdtmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	put_unused_fd(fdtmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	file = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * ocxlflash_fops_get_context() - get the context associated with the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * @file:	File associated with the adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  * Return: pointer to the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static void *ocxlflash_fops_get_context(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	return file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * ocxlflash_afu_irq() - interrupt handler for user contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  * @irq:	Interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * @data:	Private data provided at interrupt registration, the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  * Return: Always return IRQ_HANDLED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	struct ocxlflash_context *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	struct device *dev = ctx->hw_afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		__func__, ctx->pe, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	for (i = 0; i < ctx->num_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		if (ctx->irqs[i].virq == irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (unlikely(i >= ctx->num_irqs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	spin_lock(&ctx->slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	set_bit(i - 1, &ctx->irq_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	ctx->pending_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	spin_unlock(&ctx->slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	wake_up_all(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  * ocxlflash_start_work() - start a user context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  * @ctx_cookie:	Context to be started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  * @num_irqs:	Number of interrupts requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	struct ocxlflash_context *ctx = ctx_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	struct ocxl_hw_afu *afu = ctx->hw_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	struct device *dev = afu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	rc = alloc_afu_irqs(ctx, num_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (unlikely(rc < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	for (i = 0; i < num_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				 dev_name(dev), ctx->pe, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		if (unlikely(rc < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				__func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	rc = start_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	for (i = i-1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		afu_unmap_irq(0, ctx, i, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	free_afu_irqs(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  * @file:	File installed with adapter file descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  * @vma:	VM area associated with mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	return afu_mmap(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)  * ocxlflash_fd_release() - release the context associated with the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)  * @inode:	File inode pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)  * @file:	File associated with the adapter context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)  * Return: 0 on success, -errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static int ocxlflash_fd_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	return afu_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /* Backend ops to ocxlflash services */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	.module			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	.psa_map		= ocxlflash_psa_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	.psa_unmap		= ocxlflash_psa_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	.process_element	= ocxlflash_process_element,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	.map_afu_irq		= ocxlflash_map_afu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	.unmap_afu_irq		= ocxlflash_unmap_afu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	.get_irq_objhndl	= ocxlflash_get_irq_objhndl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	.start_context		= ocxlflash_start_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	.stop_context		= ocxlflash_stop_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	.afu_reset		= ocxlflash_afu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	.set_master		= ocxlflash_set_master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	.get_context		= ocxlflash_get_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	.dev_context_init	= ocxlflash_dev_context_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	.release_context	= ocxlflash_release_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	.perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	.read_adapter_vpd	= ocxlflash_read_adapter_vpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	.allocate_afu_irqs	= ocxlflash_allocate_afu_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	.free_afu_irqs		= ocxlflash_free_afu_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	.create_afu		= ocxlflash_create_afu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	.destroy_afu		= ocxlflash_destroy_afu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	.get_fd			= ocxlflash_get_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	.fops_get_context	= ocxlflash_fops_get_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	.start_work		= ocxlflash_start_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	.fd_mmap		= ocxlflash_fd_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	.fd_release		= ocxlflash_fd_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };