Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Test driver to test endpoint functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2017 Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Kishon Vijay Abraham I <kishon@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/pci_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/pci-epc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pci-epf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/pci_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define IRQ_TYPE_LEGACY			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define IRQ_TYPE_MSI			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define IRQ_TYPE_MSIX			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define COMMAND_RAISE_MSI_IRQ		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define COMMAND_READ			BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define COMMAND_WRITE			BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define COMMAND_COPY			BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define STATUS_READ_SUCCESS		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define STATUS_READ_FAIL		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define STATUS_WRITE_SUCCESS		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define STATUS_WRITE_FAIL		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define STATUS_COPY_SUCCESS		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define STATUS_COPY_FAIL		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define STATUS_IRQ_RAISED		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define STATUS_SRC_ADDR_INVALID		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define STATUS_DST_ADDR_INVALID		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define FLAG_USE_DMA			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define TIMER_RESOLUTION		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static struct workqueue_struct *kpcitest_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) struct pci_epf_test {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	void			*reg[PCI_STD_NUM_BARS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct pci_epf		*epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	enum pci_barno		test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	size_t			msix_table_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct delayed_work	cmd_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct dma_chan		*dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct completion	transfer_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	bool			dma_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	const struct pci_epc_features *epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct pci_epf_test_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u32	magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32	command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	u32	status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	u64	src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	u64	dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	u32	size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	u32	checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u32	irq_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u32	irq_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	u32	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static struct pci_epf_header test_header = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	.vendorid	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	.deviceid	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	.baseclass_code = PCI_CLASS_OTHERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	.interrupt_pin	= PCI_INTERRUPT_INTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void pci_epf_test_dma_callback(void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct pci_epf_test *epf_test = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	complete(&epf_test->transfer_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *				  data between PCIe EP and remote PCIe RC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * @epf_test: the EPF test device that performs the data transfer operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @dma_dst: The destination address of the data transfer. It can be a physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * @dma_src: The source address of the data transfer. It can be a physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * @len: The size of the data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * Function that uses dmaengine API to transfer data between PCIe EP and remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * PCIe RC. The source and destination address can be a physical address given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * The function returns '0' on success and negative value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 				      dma_addr_t dma_dst, dma_addr_t dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				      size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct dma_chan *chan = epf_test->dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (IS_ERR_OR_NULL(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		dev_err(dev, "Invalid DMA memcpy channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (!tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		dev_err(dev, "Failed to prepare DMA memcpy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	tx->callback = pci_epf_test_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	tx->callback_param = epf_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	cookie = tx->tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	reinit_completion(&epf_test->transfer_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	ret = dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		dmaengine_terminate_sync(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		dev_err(dev, "DMA wait_for_completion_timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * @epf_test: the EPF test device that performs data transfer operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * Function to initialize EPF test DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	dma_cap_set(DMA_MEMCPY, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	dma_chan = dma_request_chan_by_mask(&mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (IS_ERR(dma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		ret = PTR_ERR(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			dev_err(dev, "Failed to get DMA channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	init_completion(&epf_test->transfer_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	epf_test->dma_chan = dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * @epf_test: the EPF test device that performs data transfer operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * Helper to cleanup EPF test DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (!epf_test->dma_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	dma_release_channel(epf_test->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	epf_test->dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void pci_epf_test_print_rate(const char *ops, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				    struct timespec64 *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				    struct timespec64 *end, bool dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	u64 rate, ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	ts = timespec64_sub(*end, *start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* convert both size (stored in 'rate') and time in terms of 'ns' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	ns = timespec64_to_ns(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	rate = size * NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	/* Divide both size (stored in 'rate') and ns by a common factor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	while (ns > UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		rate >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		ns >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (!ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	/* calculate the rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	do_div(rate, (uint32_t)ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int pci_epf_test_copy(struct pci_epf_test *epf_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	void __iomem *src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	void __iomem *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	phys_addr_t src_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	phys_addr_t dst_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct timespec64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (!src_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		dev_err(dev, "Failed to allocate source address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		reg->status = STATUS_SRC_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			       reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		dev_err(dev, "Failed to map source address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		reg->status = STATUS_SRC_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		goto err_src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (!dst_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		dev_err(dev, "Failed to allocate destination address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		reg->status = STATUS_DST_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		goto err_src_map_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			       reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		dev_err(dev, "Failed to map destination address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		reg->status = STATUS_DST_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		goto err_dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	ktime_get_ts64(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	use_dma = !!(reg->flags & FLAG_USE_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		if (!epf_test->dma_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			dev_err(dev, "Cannot transfer data using DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			goto err_map_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 						 src_phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			dev_err(dev, "Data transfer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		memcpy(dst_addr, src_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	ktime_get_ts64(&end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) err_map_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) err_dst_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) err_src_map_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) err_src_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int pci_epf_test_read(struct pci_epf_test *epf_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	void __iomem *src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	u32 crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	phys_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	phys_addr_t dst_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct timespec64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	struct device *dma_dev = epf->epc->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (!src_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		dev_err(dev, "Failed to allocate address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		reg->status = STATUS_SRC_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			       reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		dev_err(dev, "Failed to map address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		reg->status = STATUS_SRC_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		goto err_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	buf = kzalloc(reg->size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		goto err_map_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	use_dma = !!(reg->flags & FLAG_USE_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		if (!epf_test->dma_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			dev_err(dev, "Cannot transfer data using DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			goto err_dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 					       DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			dev_err(dev, "Failed to map destination buffer addr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			goto err_dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		ktime_get_ts64(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 						 phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			dev_err(dev, "Data transfer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		ktime_get_ts64(&end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		ktime_get_ts64(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		memcpy_fromio(buf, src_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		ktime_get_ts64(&end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	crc32 = crc32_le(~0, buf, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	if (crc32 != reg->checksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) err_dma_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) err_map_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) err_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int pci_epf_test_write(struct pci_epf_test *epf_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	void __iomem *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	phys_addr_t phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	phys_addr_t src_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	struct timespec64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct device *dma_dev = epf->epc->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (!dst_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		dev_err(dev, "Failed to allocate address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		reg->status = STATUS_DST_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			       reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		dev_err(dev, "Failed to map address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		reg->status = STATUS_DST_ADDR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		goto err_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	buf = kzalloc(reg->size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		goto err_map_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	get_random_bytes(buf, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	reg->checksum = crc32_le(~0, buf, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	use_dma = !!(reg->flags & FLAG_USE_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	if (use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		if (!epf_test->dma_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			dev_err(dev, "Cannot transfer data using DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			goto err_map_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 					       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		if (dma_mapping_error(dma_dev, src_phys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			dev_err(dev, "Failed to map source buffer addr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			goto err_dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		ktime_get_ts64(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 						 src_phys_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			dev_err(dev, "Data transfer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		ktime_get_ts64(&end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		ktime_get_ts64(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		memcpy_toio(dst_addr, buf, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		ktime_get_ts64(&end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 * wait 1ms inorder for the write to complete. Without this delay L3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 * error in observed in the host system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) err_dma_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err_map_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) err_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 				   u16 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	reg->status |= STATUS_IRQ_RAISED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	switch (irq_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	case IRQ_TYPE_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	case IRQ_TYPE_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	case IRQ_TYPE_MSIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void pci_epf_test_cmd_handler(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	u32 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 						     cmd_handler.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	struct pci_epf *epf = epf_test->epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	command = reg->command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (!command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	reg->command = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	reg->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (reg->irq_type > IRQ_TYPE_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		dev_err(dev, "Failed to detect IRQ type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (command & COMMAND_RAISE_LEGACY_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		reg->status = STATUS_IRQ_RAISED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (command & COMMAND_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		ret = pci_epf_test_write(epf_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			reg->status |= STATUS_WRITE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			reg->status |= STATUS_WRITE_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 				       reg->irq_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (command & COMMAND_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		ret = pci_epf_test_read(epf_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			reg->status |= STATUS_READ_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			reg->status |= STATUS_READ_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 				       reg->irq_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	if (command & COMMAND_COPY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		ret = pci_epf_test_copy(epf_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			reg->status |= STATUS_COPY_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			reg->status |= STATUS_COPY_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 				       reg->irq_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	if (command & COMMAND_RAISE_MSI_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		count = pci_epc_get_msi(epc, epf->func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		if (reg->irq_number > count || count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		reg->status = STATUS_IRQ_RAISED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 				  reg->irq_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (command & COMMAND_RAISE_MSIX_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		count = pci_epc_get_msix(epc, epf->func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		if (reg->irq_number > count || count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		reg->status = STATUS_IRQ_RAISED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 				  reg->irq_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		goto reset_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) reset_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 			   msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void pci_epf_test_unbind(struct pci_epf *epf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	struct pci_epf_bar *epf_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	int bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	cancel_delayed_work(&epf_test->cmd_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	pci_epf_test_clean_dma_chan(epf_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	pci_epc_stop(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		epf_bar = &epf->bar[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		if (epf_test->reg[bar]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			pci_epf_free_space(epf, epf_test->reg[bar], bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int pci_epf_test_set_bar(struct pci_epf *epf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	int bar, add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	struct pci_epf_bar *epf_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	const struct pci_epc_features *epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	epc_features = epf_test->epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		epf_bar = &epf->bar[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		 * if the specific implementation required a 64-bit BAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		 * even if we only requested a 32-bit BAR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		if (!!(epc_features->reserved_bar & (1 << bar)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			pci_epf_free_space(epf, epf_test->reg[bar], bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			dev_err(dev, "Failed to set BAR%d\n", bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			if (bar == test_reg_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static int pci_epf_test_core_init(struct pci_epf *epf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	struct pci_epf_header *header = epf->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	const struct pci_epc_features *epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	bool msix_capable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	bool msi_capable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	epc_features = pci_epc_get_features(epc, epf->func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	if (epc_features) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		msix_capable = epc_features->msix_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		msi_capable = epc_features->msi_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	ret = pci_epc_write_header(epc, epf->func_no, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		dev_err(dev, "Configuration header write failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	ret = pci_epf_test_set_bar(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (msi_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 			dev_err(dev, "MSI configuration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	if (msix_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 				       epf_test->test_reg_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 				       epf_test->msix_table_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			dev_err(dev, "MSI-X configuration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 				 void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	case CORE_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		ret = pci_epf_test_core_init(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 			return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	case LINK_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 				   msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		dev_err(&epf->dev, "Invalid EPF test notifier event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static int pci_epf_test_alloc_space(struct pci_epf *epf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	struct pci_epf_bar *epf_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	size_t msix_table_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	size_t test_reg_bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	size_t pba_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	bool msix_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	int bar, add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	const struct pci_epc_features *epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	size_t test_reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	epc_features = epf_test->epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	msix_capable = epc_features->msix_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	if (msix_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		epf_test->msix_table_offset = test_reg_bar_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		/* Align to QWORD or 8 Bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	if (epc_features->bar_fixed_size[test_reg_bar]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		if (test_reg_size > bar_size[test_reg_bar])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		test_reg_size = bar_size[test_reg_bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 				   epc_features->align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		dev_err(dev, "Failed to allocated register space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	epf_test->reg[test_reg_bar] = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 		epf_bar = &epf->bar[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		if (bar == test_reg_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		if (!!(epc_features->reserved_bar & (1 << bar)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 					   epc_features->align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 		if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 			dev_err(dev, "Failed to allocate space for BAR%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 				bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		epf_test->reg[bar] = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static void pci_epf_configure_bar(struct pci_epf *epf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 				  const struct pci_epc_features *epc_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	struct pci_epf_bar *epf_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	bool bar_fixed_64bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 		epf_bar = &epf->bar[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		if (bar_fixed_64bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		if (epc_features->bar_fixed_size[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 			bar_size[i] = epc_features->bar_fixed_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int pci_epf_test_bind(struct pci_epf *epf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	const struct pci_epc_features *epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	enum pci_barno test_reg_bar = BAR_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	struct pci_epc *epc = epf->epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	bool linkup_notifier = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	bool core_init_notifier = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	if (WARN_ON_ONCE(!epc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	epc_features = pci_epc_get_features(epc, epf->func_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	if (!epc_features) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		dev_err(&epf->dev, "epc_features not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	linkup_notifier = epc_features->linkup_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	core_init_notifier = epc_features->core_init_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	if (test_reg_bar < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	pci_epf_configure_bar(epf, epc_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	epf_test->test_reg_bar = test_reg_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	epf_test->epc_features = epc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	ret = pci_epf_test_alloc_space(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	if (!core_init_notifier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 		ret = pci_epf_test_core_init(epf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	epf_test->dma_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	ret = pci_epf_test_init_dma_chan(epf_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		epf_test->dma_supported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	if (linkup_notifier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 		epf->nb.notifier_call = pci_epf_test_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 		pci_epc_register_notifier(epc, &epf->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static const struct pci_epf_device_id pci_epf_test_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 		.name = "pci_epf_test",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static int pci_epf_test_probe(struct pci_epf *epf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	struct pci_epf_test *epf_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	struct device *dev = &epf->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	if (!epf_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	epf->header = &test_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	epf_test->epf = epf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	epf_set_drvdata(epf, epf_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static struct pci_epf_ops ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 	.unbind	= pci_epf_test_unbind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	.bind	= pci_epf_test_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static struct pci_epf_driver test_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	.driver.name	= "pci_epf_test",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	.probe		= pci_epf_test_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 	.id_table	= pci_epf_test_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	.ops		= &ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static int __init pci_epf_test_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	kpcitest_workqueue = alloc_workqueue("kpcitest",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	if (!kpcitest_workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 		pr_err("Failed to allocate the kpcitest work queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 	ret = pci_epf_register_driver(&test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 		destroy_workqueue(kpcitest_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) module_init(pci_epf_test_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static void __exit pci_epf_test_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 	if (kpcitest_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 		destroy_workqueue(kpcitest_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 	pci_epf_unregister_driver(&test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) module_exit(pci_epf_test_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) MODULE_LICENSE("GPL v2");