Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for the Micron P320 SSD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *   Copyright (C) 2011 Micron Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Portions of this code were derived from works subjected to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * following copyright:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *    Copyright (C) 2009 Integrated Device Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/ata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <../drivers/ata/ahci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "mtip32xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define HW_CMD_SLOT_SZ		(MTIP_MAX_COMMAND_SLOTS * 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define AHCI_RX_FIS_SZ          0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define AHCI_RX_FIS_OFFSET      0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define AHCI_IDFY_SZ            ATA_SECT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define AHCI_IDFY_OFFSET        0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define AHCI_SECTBUF_SZ         ATA_SECT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define AHCI_SECTBUF_OFFSET     0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define AHCI_SMARTBUF_SZ        ATA_SECT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define AHCI_SMARTBUF_OFFSET    0xC00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define BLOCK_DMA_ALLOC_SZ      4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /* DMA region containing command table (should be 8192 bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define AHCI_CMD_SLOT_SZ        sizeof(struct mtip_cmd_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define AHCI_CMD_TBL_SZ         (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define AHCI_CMD_TBL_OFFSET     0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) /* DMA region per command (contains header and SGL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define AHCI_CMD_TBL_HDR_SZ     0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define AHCI_CMD_TBL_HDR_OFFSET 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define AHCI_CMD_TBL_SGL_SZ     (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define CMD_DMA_ALLOC_SZ        (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define HOST_CAP_NZDMA		(1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define HOST_HSORG		0xFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define HSORG_HWREV		0xFF00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define HSORG_STYLE		0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define HSORG_SLOTGROUPS	0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define PORT_COMMAND_ISSUE	0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define PORT_SDBV		0x7C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define PORT_OFFSET		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define PORT_MEM_SIZE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define PORT_IRQ_ERR \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	(PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	 PORT_IRQ_OVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define PORT_IRQ_LEGACY \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	(PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define PORT_IRQ_HANDLED \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	(PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define DEF_PORT_IRQ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	(PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) /* product numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define MTIP_PRODUCT_UNKNOWN	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define MTIP_PRODUCT_ASICFPGA	0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) /* Device instance number, incremented each time a device is probed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static struct list_head online_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static struct list_head removing_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static spinlock_t dev_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Global variable used to hold the major block device number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * allocated in mtip_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int mtip_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) static struct dentry *dfs_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static struct dentry *dfs_device_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static u32 cpu_use[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) static DEFINE_IDA(rssd_index_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static int mtip_block_initialize(struct driver_data *dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct mtip_compat_ide_task_request_s {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	__u8		io_ports[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	__u8		hob_ports[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	ide_reg_valid_t	out_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	ide_reg_valid_t	in_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	int		data_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	int		req_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	compat_ulong_t	out_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	compat_ulong_t	in_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * This function check_for_surprise_removal is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * while card is removed from the system and it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * read the vendor id from the configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * @pdev Pointer to the pci_dev structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  *	 true if device removed, else false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) static bool mtip_check_surprise_removal(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	u16 vendor_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct driver_data *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (dd->sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)        /* Read the vendorID from the configuration space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	pci_read_config_word(pdev, 0x00, &vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	if (vendor_id == 0xFFFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		dd->sr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		if (dd->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				"%s: dd->queue is NULL\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		return true; /* device removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	return false; /* device present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 					  unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * Reset the HBA (without sleeping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  *	0	The reset was successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  *	-1	The HBA Reset bit did not clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static int mtip_hba_reset(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/* Set the reset bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	writel(HOST_RESET, dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	/* Flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	readl(dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	 * Spin for up to 10 seconds waiting for reset acknowledgement. Spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	 * is 1 sec but in LUN failure conditions, up to 10 secs are required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	timeout = jiffies + msecs_to_jiffies(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		 && time_before(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * Issue a command to the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * Set the appropriate bit in the s_active and Command Issue hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * registers, causing hardware command processing to begin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * @port Pointer to the port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * @tag  The tag of the command to be issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *      None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	int group = tag >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	/* guard SACT and CI registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	spin_lock(&port->cmd_issue_lock[group]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	writel((1 << MTIP_TAG_BIT(tag)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			port->s_active[MTIP_TAG_INDEX(tag)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	writel((1 << MTIP_TAG_BIT(tag)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			port->cmd_issue[MTIP_TAG_INDEX(tag)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	spin_unlock(&port->cmd_issue_lock[group]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Enable/disable the reception of FIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * @port   Pointer to the port data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * @enable 1 to enable, 0 to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  *	Previous state: 1 enabled, 0 disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static int mtip_enable_fis(struct mtip_port *port, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	/* enable FIS reception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	tmp = readl(port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	/* Flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	readl(port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * Enable/disable the DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * @port   Pointer to the port data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * @enable 1 to enable, 0 to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  *	Previous state: 1 enabled, 0 disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) static int mtip_enable_engine(struct mtip_port *port, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	/* enable FIS reception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	tmp = readl(port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	readl(port->mmio + PORT_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	return (((tmp & PORT_CMD_START) == PORT_CMD_START));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * Enables the port DMA engine and FIS reception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static inline void mtip_start_port(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	/* Enable FIS reception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	mtip_enable_fis(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	/* Enable the DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	mtip_enable_engine(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * Deinitialize a port by disabling port interrupts, the DMA engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * and FIS reception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * @port Pointer to the port structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static inline void mtip_deinit_port(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	/* Disable interrupts on this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	writel(0, port->mmio + PORT_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/* Disable the DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	mtip_enable_engine(port, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	/* Disable FIS reception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	mtip_enable_fis(port, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * Initialize a port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * This function deinitializes the port by calling mtip_deinit_port() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * then initializes it by setting the command header and RX FIS addresses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  * clearing the SError register and any pending port interrupts before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * re-enabling the default set of port interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * @port Pointer to the port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static void mtip_init_port(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	mtip_deinit_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	/* Program the command list base and FIS base addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		writel((port->command_list_dma >> 16) >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			 port->mmio + PORT_LST_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		writel((port->rxfis_dma >> 16) >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			 port->mmio + PORT_FIS_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		set_bit(MTIP_PF_HOST_CAP_64, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	writel(port->command_list_dma & 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			port->mmio + PORT_LST_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* Clear SError */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	/* reset the completed registers.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	for (i = 0; i < port->dd->slot_groups; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		writel(0xFFFFFFFF, port->completed[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	/* Clear any pending interrupts for this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	/* Clear any pending interrupts on the HBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	writel(readl(port->dd->mmio + HOST_IRQ_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 					port->dd->mmio + HOST_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	/* Enable port interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  * Restart a port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * @port Pointer to the port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static void mtip_restart_port(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/* Disable the DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	mtip_enable_engine(port, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	timeout = jiffies + msecs_to_jiffies(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		 && time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * Chip quirk: escalate to hba reset if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * PxCMD.CR not clear after 500 ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		dev_warn(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			"PxCMD.CR not clear, escalating reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		if (mtip_hba_reset(port->dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			dev_err(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 				"HBA reset escalation failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		/* 30 ms delay before com reset to quiesce chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		mdelay(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	/* Set PxSCTL.DET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	writel(readl(port->mmio + PORT_SCR_CTL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			 1, port->mmio + PORT_SCR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	readl(port->mmio + PORT_SCR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	/* Wait 1 ms to quiesce chip function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	timeout = jiffies + msecs_to_jiffies(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	while (time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	/* Clear PxSCTL.DET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			 port->mmio + PORT_SCR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	readl(port->mmio + PORT_SCR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	timeout = jiffies + msecs_to_jiffies(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			 && time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		dev_warn(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			"COM reset failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	mtip_init_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	mtip_start_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) static int mtip_device_reset(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	if (mtip_check_surprise_removal(dd->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (mtip_hba_reset(dd) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	mtip_init_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	mtip_start_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	/* Enable interrupts on the HBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 					dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * Helper function for tag logging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static void print_tags(struct driver_data *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			char *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			unsigned long *tagbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	unsigned char tagmap[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	int group, tagmap_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	memset(tagmap, 0, sizeof(tagmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	for (group = SLOTBITS_IN_LONGS; group > 0; group--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 						tagbits[group-1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 				dma_addr_t buffer_dma, unsigned int sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 						struct smart_attr *attrib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct request *req = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	cmd->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (likely(!blk_should_fake_timeout(req->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * Handle an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * @dd Pointer to the DRIVER_DATA structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static void mtip_handle_tfe(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	int group, tag, bit, reissue, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct mtip_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct mtip_cmd  *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	u32 completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct host_to_dev_fis *fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	unsigned long tagaccum[SLOTBITS_IN_LONGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	unsigned int cmd_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	char *fail_reason = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	dev_warn(&dd->pdev->dev, "Taskfile error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		mtip_complete_command(cmd, BLK_STS_IOERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* clear the tag accumulator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* Loop through all the groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	for (group = 0; group < dd->slot_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		completed = readl(port->completed[group]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		/* clear completed status register in the hardware.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		writel(completed, port->completed[group]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		/* Process successfully completed commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		for (bit = 0; bit < 32 && completed; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			if (!(completed & (1<<bit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			tag = (group << 5) + bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			/* Skip the internal command slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			if (tag == MTIP_TAG_INTERNAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			cmd = mtip_cmd_from_tag(dd, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			mtip_complete_command(cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			set_bit(tag, tagaccum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			cmd_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	/* Restart the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	mdelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	mtip_restart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	/* Trying to determine the cause of the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				dd->port->log_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 				dd->port->log_buf_dma, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			"Error in READ LOG EXT (10h) command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		/* non-critical error, don't fail the load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		buf = (unsigned char *)dd->port->log_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		if (buf[259] & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 				"Write protect bit is set.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			fail_all_ncq_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			fail_reason = "write protect";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		if (buf[288] == 0xF7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 				"Exceeded Tmax, drive in thermal shutdown.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			fail_all_ncq_cmds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			fail_reason = "thermal shutdown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		if (buf[288] == 0xBF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 				"Drive indicates rebuild has failed. Secure erase required.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			fail_all_ncq_cmds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			fail_reason = "rebuild failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	/* clear the tag accumulator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/* Loop through all the groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	for (group = 0; group < dd->slot_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		for (bit = 0; bit < 32; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			reissue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			tag = (group << 5) + bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			cmd = mtip_cmd_from_tag(dd, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			fis = (struct host_to_dev_fis *)cmd->command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			/* Should re-issue? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			if (tag == MTIP_TAG_INTERNAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			    fis->command == ATA_CMD_SET_FEATURES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				reissue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				if (fail_all_ncq_cmds ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 					(fail_all_ncq_write &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 					fis->command == ATA_CMD_FPDMA_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 					dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 					"  Fail: %s w/tag %d [%s].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 					fis->command == ATA_CMD_FPDMA_WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 						"write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 					tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 					fail_reason != NULL ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 						fail_reason : "unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 					mtip_complete_command(cmd, BLK_STS_MEDIUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			 * First check if this command has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			 *  exceeded its retries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			if (reissue && (cmd->retries-- > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 				set_bit(tag, tagaccum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 				/* Re-issue the command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 				mtip_issue_ncq_command(port, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			/* Retire a command that will not be reissued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			dev_warn(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 				"retiring tag %d\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			mtip_complete_command(cmd, BLK_STS_IOERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * Handle a set device bits interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 							u32 completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct driver_data *dd = port->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	int tag, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct mtip_cmd *command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (!completed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		WARN_ON_ONCE(!completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* clear completed status register in the hardware.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	writel(completed, port->completed[group]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Process completed commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	for (bit = 0; (bit < 32) && completed; bit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		if (completed & 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			tag = (group << 5) | bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			/* skip internal command slot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			if (unlikely(tag == MTIP_TAG_INTERNAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			command = mtip_cmd_from_tag(dd, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			mtip_complete_command(command, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		completed >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/* If last, re-enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (atomic_dec_return(&dd->irq_workers_active) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * Process legacy pio and d2h interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		int status = readl(port->cmd_issue[group]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			mtip_complete_command(cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  * Demux and handle errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			"Clearing PxSERR.DIAG.x\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			"Clearing PxSERR.DIAG.n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			"Port stat errors %x unhandled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			(port_stat & ~PORT_IRQ_HANDLED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		if (mtip_check_surprise_removal(dd->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		wake_up_interruptible(&dd->port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct driver_data *dd = (struct driver_data *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	u32 hba_stat, port_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	int rv = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	int do_irq_enable = 1, i, workers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct mtip_work *twork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (hba_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		rv = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		/* Acknowledge the interrupt status on the port.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		port_stat = readl(port->mmio + PORT_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		if (unlikely(port_stat == 0xFFFFFFFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			mtip_check_surprise_removal(dd->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		writel(port_stat, port->mmio + PORT_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		/* Demux port status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			do_irq_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			/* Start at 1: group zero is always local? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 									i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				twork = &dd->work[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 				twork->completed = readl(port->completed[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				if (twork->completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 					workers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			atomic_set(&dd->irq_workers_active, workers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			if (workers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 				for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 					twork = &dd->work[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 					if (twork->completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 						queue_work_on(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 							twork->cpu_binding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 							dd->isr_workq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 							&twork->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 				if (likely(dd->work[0].completed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 					mtip_workq_sdbfx(port, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 							dd->work[0].completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				 * Chip quirk: SDB interrupt but nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				 * to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 				do_irq_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		if (unlikely(port_stat & PORT_IRQ_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 				/* don't proceed further */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 							&dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (unlikely(port_stat & PORT_IRQ_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	/* acknowledge interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (unlikely(do_irq_enable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  * HBA interrupt subroutine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * @irq		IRQ number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * @instance	Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  *	IRQ_HANDLED	A HBA interrupt was pending and handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  *	IRQ_NONE	This interrupt was not for the HBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) static irqreturn_t mtip_irq_handler(int irq, void *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct driver_data *dd = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	return mtip_handle_irq(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) static bool mtip_pause_ncq(struct mtip_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				struct host_to_dev_fis *fis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned long task_file_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	task_file_data = readl(port->mmio+PORT_TFDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if ((task_file_data & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		port->ic_pause_timer = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 					(fis->features == 0x03)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		port->ic_pause_timer = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	} else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		((fis->command == 0xFC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			(fis->features == 0x27 || fis->features == 0x72 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			 fis->features == 0x62 || fis->features == 0x26))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		/* Com reset after secure erase or lowlevel format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		mtip_restart_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static bool mtip_commands_active(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	unsigned int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * Ignore s_active bit 0 of array element 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * This bit will always be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	active = readl(port->s_active[0]) & 0xFFFFFFFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	for (n = 1; n < port->dd->slot_groups; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		active |= readl(port->s_active[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	return active != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * Wait for port to quiesce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * @port    Pointer to port data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * @timeout Max duration to wait (ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  *	0	Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  *	-EBUSY  Commands still active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	unsigned long to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	bool active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	blk_mq_quiesce_queue(port->dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	to = jiffies + msecs_to_jiffies(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			continue; /* svc thd is actively issuing commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		if (mtip_check_surprise_removal(port->dd->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			goto err_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		active = mtip_commands_active(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (!active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	} while (time_before(jiffies, to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	blk_mq_unquiesce_queue(port->dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	return active ? -EBUSY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) err_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	blk_mq_unquiesce_queue(port->dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) struct mtip_int_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	int fis_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	dma_addr_t buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	int buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	u32 opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * Execute an internal command and wait for the completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  * @port    Pointer to the port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  * @fis     Pointer to the FIS that describes the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * @fis_len  Length in WORDS of the FIS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  * @buffer  DMA accessible for command data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  * @buf_len  Length, in bytes, of the data buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * @opts    Command header options, excluding the FIS length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  *             and the number of PRD entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * @timeout Time in ms to wait for the command to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  *	0	 Command completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *	-EFAULT  The buffer address is not correctly aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  *	-EBUSY   Internal command or other IO in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  *	-EAGAIN  Time out waiting for command to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static int mtip_exec_internal_command(struct mtip_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 					struct host_to_dev_fis *fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					int fis_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 					dma_addr_t buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 					int buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 					u32 opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 					unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	struct mtip_cmd *int_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	struct driver_data *dd = port->dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct mtip_int_cmd icmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		.fis_len = fis_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		.buffer = buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		.buf_len = buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		.opts = opts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	/* Make sure the buffer is 8 byte aligned. This is asic specific. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (buffer & 0x00000007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (mtip_check_surprise_removal(dd->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (fis->command == ATA_CMD_SEC_ERASE_PREP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (fis->command != ATA_CMD_STANDBYNOW1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		/* wait for io to complete if non atomic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			wake_up_interruptible(&port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	/* Copy the command to the command table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	int_cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	int_cmd->icmd = &icmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	memcpy(int_cmd->command, fis, fis_len*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	rq->timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/* insert request and run queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	blk_execute_rq(rq->q, NULL, rq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (int_cmd->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 				fis->command, int_cmd->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		rv = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		if (mtip_check_surprise_removal(dd->pdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 					&dd->dd_flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 				"Internal command [%02X] wait returned due to SR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 				fis->command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			rv = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			goto exec_ic_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		mtip_device_reset(dd); /* recover from timeout issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		rv = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		goto exec_ic_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			& (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		rv = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			mtip_device_reset(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			rv = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) exec_ic_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	/* Clear the allocated and active bits for the internal command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (rv >= 0 && mtip_pause_ncq(port, fis)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		/* NCQ paused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	wake_up_interruptible(&port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * Byte-swap ATA ID strings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  * ATA identify data contains strings in byte-swapped 16-bit words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)  * They must be swapped (on all architectures) to be usable as C strings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)  * This function swaps bytes in-place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)  * @buf The buffer location of the string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)  * @len The number of bytes to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static inline void ata_swap_string(u16 *buf, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	for (i = 0; i < (len/2); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		be16_to_cpus(&buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void mtip_set_timeout(struct driver_data *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 					struct host_to_dev_fis *fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 					unsigned int *timeout, u8 erasemode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	switch (fis->command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	case ATA_CMD_DOWNLOAD_MICRO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		*timeout = 120000; /* 2 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	case ATA_CMD_SEC_ERASE_UNIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	case 0xFC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		if (erasemode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	case ATA_CMD_STANDBYNOW1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		*timeout = 120000;  /* 2 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	case 0xF7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	case 0xFA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		*timeout = 60000;  /* 60 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	case ATA_CMD_SMART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		*timeout = 15000;  /* 15 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		*timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * Request the device identity information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  * If a user space buffer is not specified, i.e. is NULL, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  * identify information is still read from the drive and placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  * into the identify data buffer (@e port->identify) in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * When the identify buffer contains valid identify information @e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  * port->identify_valid is non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * @port	 Pointer to the port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * @user_buffer  A user space buffer where the identify data should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  *                    copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  *	0	Command completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  *	-EFAULT An error occurred while coping data to the user buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  *	-1	Command failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	struct host_to_dev_fis fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/* Build the FIS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	fis.command	= ATA_CMD_ID_ATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	/* Set the identify information as invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	port->identify_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	/* Clear the identify information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	/* Execute the command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	if (mtip_exec_internal_command(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				&fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 				5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 				port->identify_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 				sizeof(u16) * ATA_ID_WORDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 				0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 				MTIP_INT_CMD_TIMEOUT_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				< 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		rv = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 * Perform any necessary byte-swapping.  Yes, the kernel does in fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 * perform field-sensitive swapping on the string fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	 * See the kernel use of ata_id_string() for proof of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	ata_swap_string(port->identify + 27, 40);  /* model string*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	ata_swap_string(port->identify + 23, 8);   /* firmware string*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	ata_swap_string(port->identify + 10, 20);  /* serial# string*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		for (i = 0; i < ATA_ID_WORDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			port->identify[i] = le16_to_cpu(port->identify[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	/* Check security locked state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (port->identify[128] & 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	/* Set the identify buffer as valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	port->identify_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (user_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		if (copy_to_user(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			user_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			port->identify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			ATA_ID_WORDS * sizeof(u16))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  * Issue a standby immediate command to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * @port Pointer to the port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  *	0	Command was executed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  *	-1	An error occurred while executing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static int mtip_standby_immediate(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct host_to_dev_fis	fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	unsigned int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	/* Build the FIS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	fis.command	= ATA_CMD_STANDBYNOW1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	mtip_set_timeout(port->dd, &fis, &timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	rv = mtip_exec_internal_command(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 					&fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 					5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 					0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 					0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 					0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 					timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			jiffies_to_msecs(jiffies - start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		dev_warn(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			"STANDBY IMMEDIATE command failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * Issue a READ LOG EXT command to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  * @port	pointer to the port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * @page	page number to fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * @buffer	pointer to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * @buffer_dma	dma address corresponding to @buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * @sectors	page length to fetch, in sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  *	@rv	return value from mtip_exec_internal_command()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				dma_addr_t buffer_dma, unsigned int sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct host_to_dev_fis fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	fis.command	= ATA_CMD_READ_LOG_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	fis.sect_count	= sectors & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	fis.sect_cnt_ex	= (sectors >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	fis.lba_low	= page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	fis.lba_mid	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	fis.device	= ATA_DEVICE_OBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	memset(buffer, 0, sectors * ATA_SECT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	return mtip_exec_internal_command(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 					&fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 					5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 					buffer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 					sectors * ATA_SECT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 					0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 					MTIP_INT_CMD_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * Issue a SMART READ DATA command to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * @port	pointer to the port structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * @buffer	pointer to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * @buffer_dma	dma address corresponding to @buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  *	@rv	return value from mtip_exec_internal_command()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 					dma_addr_t buffer_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	struct host_to_dev_fis fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	fis.command	= ATA_CMD_SMART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	fis.features	= 0xD0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	fis.sect_count	= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	fis.lba_mid	= 0x4F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	fis.lba_hi	= 0xC2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	fis.device	= ATA_DEVICE_OBS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	return mtip_exec_internal_command(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 					&fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 					5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 					buffer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 					ATA_SECT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 					0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 					15000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  * Get the value of a smart attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  * @port	pointer to the port structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * @id		attribute number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * @attrib	pointer to return attrib information corresponding to @id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  *	-EINVAL	NULL buffer passed or unsupported attribute @id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *	-EPERM	Identify data not valid, SMART not supported or not enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 						struct smart_attr *attrib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	int rv, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct smart_attr *pattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	if (!attrib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	if (!port->identify_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	if (!(port->identify[82] & 0x1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (!(port->identify[85] & 0x1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	memset(port->smart_buf, 0, ATA_SECT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	pattr = (struct smart_attr *)(port->smart_buf + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	for (i = 0; i < 29; i++, pattr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		if (pattr->attr_id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			memcpy(attrib, pattr, sizeof(struct smart_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (i == 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		dev_warn(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			"Query for invalid SMART attribute ID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * Get the drive capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * @dd      Pointer to the device data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * @sectors Pointer to the variable that will receive the sector count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  *	1 Capacity was returned successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  *	0 The identify information is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	u64 total, raw0, raw1, raw2, raw3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	raw0 = port->identify[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	raw1 = port->identify[101];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	raw2 = port->identify[102];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	raw3 = port->identify[103];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	*sectors = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	return (bool) !!port->identify_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)  * Display the identify command data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * @port Pointer to the port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static void mtip_dump_identify(struct mtip_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	sector_t sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	unsigned short revid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	char cbuf[42];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (!port->identify_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	strlcpy(cbuf, (char *)(port->identify+10), 21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	dev_info(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		"Serial No.: %s\n", cbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	strlcpy(cbuf, (char *)(port->identify+23), 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	dev_info(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		"Firmware Ver.: %s\n", cbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	strlcpy(cbuf, (char *)(port->identify+27), 41);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		port->identify[128],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		port->identify[128] & 0x4 ? "(LOCKED)" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	if (mtip_hw_get_capacity(port->dd, &sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		dev_info(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			"Capacity: %llu sectors (%llu MB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			 (u64)sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			 ((u64)sectors) * ATA_SECT_SIZE >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	switch (revid & 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		strlcpy(cbuf, "A0", 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		strlcpy(cbuf, "A2", 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		strlcpy(cbuf, "?", 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	dev_info(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		"Card Type: %s\n", cbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * Map the commands scatter list into the command table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * @command Pointer to the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * @nents Number of scatter list entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static inline void fill_command_sg(struct driver_data *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 				struct mtip_cmd *command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 				int nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	unsigned int dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	struct mtip_cmd_sg *command_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	for_each_sg(command->sg, sg, nents, n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		dma_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		if (dma_len > 0x400000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 				"DMA segment length truncated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		command_sg->dba	=  cpu_to_le32(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		command_sg->dba_upper =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		command_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)  * @brief Execute a drive command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  * return value 0 The command completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)  * return value -1 An error occurred while executing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static int exec_drive_task(struct mtip_port *port, u8 *command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct host_to_dev_fis	fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	unsigned int to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	/* Build the FIS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	fis.command	= command[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	fis.features	= command[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	fis.sect_count	= command[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	fis.sector	= command[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	fis.cyl_low	= command[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	fis.cyl_hi	= command[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	fis.device	= command[6] & ~0x10; /* Clear the dev bit*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	mtip_set_timeout(port->dd, &fis, &to, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		command[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		command[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		command[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		command[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		command[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		command[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		command[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	/* Execute the command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	if (mtip_exec_internal_command(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 				 &fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 				 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 				 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 				 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 				 to) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	command[0] = reply->command; /* Status*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	command[1] = reply->features; /* Error*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	command[4] = reply->cyl_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	command[5] = reply->cyl_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		command[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		command[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		command[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		command[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  * @brief Execute a drive command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * @param port Pointer to the port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * @param command Pointer to the user specified command parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * @param user_buffer Pointer to the user space buffer where read sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  *                   data should be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  * return value 0 The command completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  * return value -EFAULT An error occurred while copying the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  *                 data to the user space buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  * return value -1 An error occurred while executing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static int exec_drive_command(struct mtip_port *port, u8 *command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 				void __user *user_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	struct host_to_dev_fis	fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	struct host_to_dev_fis *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	u8 *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	dma_addr_t dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	int rv = 0, xfer_sz = command[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	unsigned int to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	if (xfer_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		if (!user_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		buf = dma_alloc_coherent(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 				ATA_SECT_SIZE * xfer_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 				&dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			dev_err(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 				"Memory allocation failed (%d bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 				ATA_SECT_SIZE * xfer_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	/* Build the FIS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	fis.command	= command[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	fis.features	= command[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	fis.sect_count	= command[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (fis.command == ATA_CMD_SMART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		fis.sector	= command[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		fis.cyl_low	= 0x4F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		fis.cyl_hi	= 0xC2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	mtip_set_timeout(port->dd, &fis, &to, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if (xfer_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		reply = (port->rxfis + RX_FIS_PIO_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		reply = (port->rxfis + RX_FIS_D2H_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	dbg_printk(MTIP_DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		" %s: User Command: cmd %x, sect %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		"feat %x, sectcnt %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		command[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		command[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		command[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		command[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	/* Execute the command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	if (mtip_exec_internal_command(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 				&fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 				 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 				 (xfer_sz ? dma_addr : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 				 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 				 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 				 to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 				 < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		goto exit_drive_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	/* Collect the completion status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	command[0] = reply->command; /* Status*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	command[1] = reply->features; /* Error*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	command[2] = reply->sect_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	dbg_printk(MTIP_DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		" %s: Completion Status: stat %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		"err %x, nsect %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		command[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		command[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		command[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	if (xfer_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		if (copy_to_user(user_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 				 buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 				 ATA_SECT_SIZE * command[3])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			goto exit_drive_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) exit_drive_command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		dma_free_coherent(&port->dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 				ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  *  Indicates whether a command has a single sector payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  *  @command passed to the device to perform the certain event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  *  @features passed to the device to perform the certain event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)  *  return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)  *	1	command is one that always has a single sector payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)  *		regardless of the value in the Sector Count field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)  *      0       otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static unsigned int implicit_sector(unsigned char command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 				    unsigned char features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	unsigned int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	/* list of commands that have an implicit sector count of 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	switch (command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	case ATA_CMD_SEC_SET_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	case ATA_CMD_SEC_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	case ATA_CMD_SEC_ERASE_PREP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	case ATA_CMD_SEC_ERASE_UNIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	case ATA_CMD_SEC_FREEZE_LOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	case ATA_CMD_SEC_DISABLE_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	case ATA_CMD_PMP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	case ATA_CMD_PMP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	case ATA_CMD_SET_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		if (features == ATA_SET_MAX_UNLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	case ATA_CMD_SMART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		if ((features == ATA_SMART_READ_VALUES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				(features == ATA_SMART_READ_THRESHOLDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	case ATA_CMD_CONF_OVERLAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if ((features == ATA_DCO_IDENTIFY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				(features == ATA_DCO_SET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  * Executes a taskfile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  * See ide_taskfile_ioctl() for derivation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static int exec_drive_taskfile(struct driver_data *dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			       void __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			       ide_task_request_t *req_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			       int outtotal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	struct host_to_dev_fis	fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	struct host_to_dev_fis *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	u8 *outbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	u8 *inbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	dma_addr_t outbuf_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	dma_addr_t inbuf_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	dma_addr_t dma_buffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	unsigned int taskin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	unsigned int taskout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	u8 nsect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	unsigned int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	unsigned int force_single_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	unsigned int transfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	unsigned long task_file_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	int intotal = outtotal + req_task->out_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	int erasemode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	taskout = req_task->out_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	taskin = req_task->in_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	/* 130560 = 512 * 0xFF*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (taskin > 130560 || taskout > 130560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (taskout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		outbuf = memdup_user(buf + outtotal, taskout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		if (IS_ERR(outbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			return PTR_ERR(outbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 					    taskout, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		dma_buffer = outbuf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	if (taskin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		inbuf = memdup_user(buf + intotal, taskin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		if (IS_ERR(inbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			err = PTR_ERR(inbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			inbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 					   taskin, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		dma_buffer = inbuf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	/* only supports PIO and non-data commands from this ioctl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	switch (req_task->data_phase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	case TASKFILE_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		nsect = taskout / ATA_SECT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	case TASKFILE_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	case TASKFILE_NO_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		reply = (dd->port->rxfis + RX_FIS_D2H_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	/* Build the FIS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	fis.type	= 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	fis.opts	= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	fis.command	= req_task->io_ports[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	fis.features	= req_task->io_ports[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	fis.sect_count	= req_task->io_ports[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	fis.lba_low	= req_task->io_ports[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	fis.lba_mid	= req_task->io_ports[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	fis.lba_hi	= req_task->io_ports[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	 /* Clear the dev bit*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	fis.device	= req_task->io_ports[6] & ~0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		req_task->in_flags.all	=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			IDE_TASKFILE_STD_IN_FLAGS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			(IDE_HOB_STD_IN_FLAGS << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		fis.lba_low_ex		= req_task->hob_ports[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		fis.lba_mid_ex		= req_task->hob_ports[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		fis.lba_hi_ex		= req_task->hob_ports[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		fis.features_ex		= req_task->hob_ports[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		fis.sect_cnt_ex		= req_task->hob_ports[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	force_single_sector = implicit_sector(fis.command, fis.features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	if ((taskin || taskout) && (!fis.sect_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		if (nsect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			fis.sect_count = nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 			if (!force_single_sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 				dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 					"data movement but "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 					"sect_count is 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 				goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	dbg_printk(MTIP_DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		" %s: cmd %x, feat %x, nsect %x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		" sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		" head/dev %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		fis.command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		fis.features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		fis.sect_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		fis.lba_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		fis.lba_mid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		fis.lba_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		fis.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	/* check for erase mode support during secure erase.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 					(outbuf[0] & MTIP_SEC_ERASE_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		erasemode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	mtip_set_timeout(dd, &fis, &timeout, erasemode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	/* Determine the correct transfer size.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (force_single_sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		transfer_size = ATA_SECT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		transfer_size = ATA_SECT_SIZE * fis.sect_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	/* Execute the command.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	if (mtip_exec_internal_command(dd->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 				 &fis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 				 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 				 dma_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 				 transfer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 				 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 				 timeout) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	task_file_data = readl(dd->port->mmio+PORT_TFDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		req_task->io_ports[7] = reply->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		reply = dd->port->rxfis + RX_FIS_D2H_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		req_task->io_ports[7] = reply->command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	/* reclaim the DMA buffers.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (inbuf_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	if (outbuf_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	inbuf_dma  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	outbuf_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	/* return the ATA registers to the caller.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	req_task->io_ports[1] = reply->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	req_task->io_ports[2] = reply->sect_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	req_task->io_ports[3] = reply->lba_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	req_task->io_ports[4] = reply->lba_mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	req_task->io_ports[5] = reply->lba_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	req_task->io_ports[6] = reply->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	if (req_task->out_flags.all & 1)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		req_task->hob_ports[3] = reply->lba_low_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		req_task->hob_ports[4] = reply->lba_mid_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		req_task->hob_ports[5] = reply->lba_hi_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		req_task->hob_ports[1] = reply->features_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		req_task->hob_ports[2] = reply->sect_cnt_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	dbg_printk(MTIP_DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		" %s: Completion: stat %x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		"err %x, sect_cnt %x, lbalo %x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		"lbamid %x, lbahi %x, dev %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		req_task->io_ports[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		req_task->io_ports[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		req_task->io_ports[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		req_task->io_ports[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		req_task->io_ports[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		req_task->io_ports[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		req_task->io_ports[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	if (taskout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		if (copy_to_user(buf + outtotal, outbuf, taskout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (taskin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		if (copy_to_user(buf + intotal, inbuf, taskin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	if (inbuf_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (outbuf_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	kfree(outbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	kfree(inbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)  * Handle IOCTL calls from the Block Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)  * This function is called by the Block Layer when it receives an IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)  * command that it does not understand. If the IOCTL command is not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)  * this function returns -ENOTTY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)  * @dd  Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)  * @cmd IOCTL command passed from the Block Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)  * @arg IOCTL argument passed from the Block Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)  *	0	The IOCTL completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  *	-ENOTTY The specified command is not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)  *	-EFAULT An error occurred copying data to a user space buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)  *	-EIO	An error occurred while executing the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 			 unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	case HDIO_GET_IDENTITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		if (copy_to_user((void __user *)arg, dd->port->identify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 						sizeof(u16) * ATA_ID_WORDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	case HDIO_DRIVE_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		u8 drive_command[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		/* Copy the user command info to our buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		if (copy_from_user(drive_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 					 (void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 					 sizeof(drive_command)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		/* Execute the drive command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		if (exec_drive_command(dd->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 					 drive_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 					 (void __user *) (arg+4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		/* Copy the status back to the users buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		if (copy_to_user((void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 					 drive_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 					 sizeof(drive_command)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	case HDIO_DRIVE_TASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		u8 drive_command[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		/* Copy the user command info to our buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		if (copy_from_user(drive_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 					 (void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 					 sizeof(drive_command)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		/* Execute the drive command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		if (exec_drive_task(dd->port, drive_command))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		/* Copy the status back to the users buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		if (copy_to_user((void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 					 drive_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 					 sizeof(drive_command)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	case HDIO_DRIVE_TASKFILE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		ide_task_request_t req_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		int ret, outtotal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		if (copy_from_user(&req_task, (void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 					sizeof(req_task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		outtotal = sizeof(req_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		ret = exec_drive_taskfile(dd, (void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 						&req_task, outtotal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		if (copy_to_user((void __user *) arg, &req_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 							sizeof(req_task)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  * Submit an IO to the hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  * This function is called by the block layer to issue an io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  * to the device. Upon completion, the callback function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  * be called with the data parameter passed as the callback data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  * @dd       Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  * @start    First sector to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  * @nsect    Number of sectors to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)  * @tag      The tag of this read command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)  * @callback Pointer to the function that should be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)  *	     when the read completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)  * @data     Callback data passed to the callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)  *	     when the read completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)  * @dir      Direction (read or write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			      struct mtip_cmd *command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			      struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	struct mtip_cmd_hdr *hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	struct host_to_dev_fis	*fis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	u64 start = blk_rq_pos(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	unsigned int nsect = blk_rq_sectors(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	unsigned int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	/* Map the scatter list for DMA access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	prefetch(&port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	command->scatter_ents = nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	 * The number of retries for this command before it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 * reported as a failure to the upper layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	command->retries = MTIP_MAX_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	/* Fill out fis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	fis = command->command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	fis->type        = 0x27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	fis->opts        = 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	if (dma_dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		fis->command = ATA_CMD_FPDMA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		fis->command = ATA_CMD_FPDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	fis->lba_low     = start & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	fis->lba_mid     = (start >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	fis->lba_hi      = (start >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	fis->lba_low_ex  = (start >> 24) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	fis->lba_mid_ex  = (start >> 32) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	fis->lba_hi_ex   = (start >> 40) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	fis->device	 = 1 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	fis->features    = nsect & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	fis->features_ex = (nsect >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	fis->sect_count  = ((rq->tag << 3) | (rq->tag >> 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	fis->sect_cnt_ex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	fis->control     = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	fis->res2        = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	fis->res3        = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	fill_command_sg(dd, command, nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (unlikely(command->unaligned))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		fis->device |= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	/* Populate the command header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	hdr->byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	command->direction = dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	 * To prevent this command from being issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	 * if an internal command is in progress or error handling is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		set_bit(rq->tag, port->cmds_to_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	/* Issue the command to the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	mtip_issue_ncq_command(port, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * Sysfs status dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * @dev  Pointer to the device structure, passed by the kernrel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * @attr Pointer to the device_attribute structure passed by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * @buf  Pointer to the char buffer that will receive the stats info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  *	The size, in bytes, of the data copied into buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) static ssize_t mtip_hw_show_status(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 				struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 				char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	struct driver_data *dd = dev_to_disk(dev)->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		size += sprintf(buf, "%s", "thermal_shutdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		size += sprintf(buf, "%s", "write_protect\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		size += sprintf(buf, "%s", "online\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /* debugsfs entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) static ssize_t show_device_status(struct device_driver *drv, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	struct driver_data *dd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	char id_buf[42];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	spin_lock_irqsave(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	size += sprintf(&buf[size], "Devices Present:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		if (dd->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 			if (dd->port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			    dd->port->identify &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			    dd->port->identify_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 				strlcpy(id_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 					(char *) (dd->port->identify + 10), 21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				status = *(dd->port->identify + 141);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				memset(id_buf, 0, 42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 				status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 			if (dd->port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 			    test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 				size += sprintf(&buf[size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 					" device %s %s (ftl rebuild %d %%)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 					dev_name(&dd->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 					id_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 					status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 				size += sprintf(&buf[size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 					" device %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 					dev_name(&dd->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 					id_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	size += sprintf(&buf[size], "Devices Being Removed:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		if (dd->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			if (dd->port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			    dd->port->identify &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			    dd->port->identify_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 				strlcpy(id_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 					(char *) (dd->port->identify+10), 21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 				status = *(dd->port->identify + 141);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 				memset(id_buf, 0, 42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 				status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			if (dd->port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			    test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 				size += sprintf(&buf[size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 					" device %s %s (ftl rebuild %d %%)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 					dev_name(&dd->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 					id_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 					status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 				size += sprintf(&buf[size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 					" device %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 					dev_name(&dd->pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 					id_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	spin_unlock_irqrestore(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 						size_t len, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	struct driver_data *dd =  (struct driver_data *)f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	int size = *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	if (!len || *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			"Memory allocation: status buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	size += show_device_status(NULL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	*offset = size <= len ? size : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	size = copy_to_user(ubuf, buf, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	return rv ? rv : *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 				  size_t len, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	struct driver_data *dd =  (struct driver_data *)f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	u32 group_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	int size = *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	int n, rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	if (!len || size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 			"Memory allocation: register buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	for (n = dd->slot_groups-1; n >= 0; n--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		size += sprintf(&buf[size], "%08X ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 					 readl(dd->port->s_active[n]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	size += sprintf(&buf[size], "]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	for (n = dd->slot_groups-1; n >= 0; n--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		size += sprintf(&buf[size], "%08X ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 					readl(dd->port->cmd_issue[n]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	size += sprintf(&buf[size], "]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	size += sprintf(&buf[size], "H/ Completed     : [ 0x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	for (n = dd->slot_groups-1; n >= 0; n--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		size += sprintf(&buf[size], "%08X ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 				readl(dd->port->completed[n]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	size += sprintf(&buf[size], "]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 				readl(dd->port->mmio + PORT_IRQ_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 				readl(dd->mmio + HOST_IRQ_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	size += sprintf(&buf[size], "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	for (n = dd->slot_groups-1; n >= 0; n--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		if (sizeof(long) > sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 			group_allocated =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 				dd->port->cmds_to_issue[n/2] >> (32*(n&1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			group_allocated = dd->port->cmds_to_issue[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		size += sprintf(&buf[size], "%08X ", group_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	size += sprintf(&buf[size], "]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	*offset = size <= len ? size : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	size = copy_to_user(ubuf, buf, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	return rv ? rv : *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 				  size_t len, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	struct driver_data *dd =  (struct driver_data *)f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	int size = *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	if (!len || size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 			"Memory allocation: flag buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 							dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 							dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	*offset = size <= len ? size : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	size = copy_to_user(ubuf, buf, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	return rv ? rv : *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) static const struct file_operations mtip_device_status_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	.owner  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	.open   = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	.read   = mtip_hw_read_device_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	.llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) static const struct file_operations mtip_regs_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	.owner  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	.open   = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	.read   = mtip_hw_read_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	.llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) static const struct file_operations mtip_flags_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	.owner  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	.open   = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	.read   = mtip_hw_read_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	.llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)  * Create the sysfs related attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)  * @dd   Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  * @kobj Pointer to the kobj for the block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)  *	0	Operation completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)  *	-EINVAL Invalid parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	if (!kobj || !dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	if (sysfs_create_file(kobj, &dev_attr_status.attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 			"Error creating 'status' sysfs entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)  * Remove the sysfs related attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)  * @dd   Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)  * @kobj Pointer to the kobj for the block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)  *	0	Operation completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)  *	-EINVAL Invalid parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (!kobj || !dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	sysfs_remove_file(kobj, &dev_attr_status.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) static int mtip_hw_debugfs_init(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	if (!dfs_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	if (IS_ERR_OR_NULL(dd->dfs_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 			"Error creating node %s under debugfs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 						dd->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		dd->dfs_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	debugfs_create_file("registers", 0444, dd->dfs_node, dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			    &mtip_regs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static void mtip_hw_debugfs_exit(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	debugfs_remove_recursive(dd->dfs_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)  * Perform any init/resume time hardware setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) static inline void hba_setup(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	u32 hwdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	hwdata = readl(dd->mmio + HOST_HSORG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	/* interrupt bug workaround: use only 1 IS bit.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	writel(hwdata |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		HSORG_DISABLE_SLOTGRP_INTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		HSORG_DISABLE_SLOTGRP_PXIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		dd->mmio + HOST_HSORG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) static int mtip_device_unaligned_constrained(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)  * Detect the details of the product, and store anything needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)  * into the driver data structure.  This includes product type and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)  * version and number of slot groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) static void mtip_detect_product(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	u32 hwdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	unsigned int rev, slotgroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	 * HBA base + 0xFC [15:0] - vendor-specific hardware interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	 * info register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	 * [15:8] hardware/software interface rev#
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	 * [   3] asic-style interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	hwdata = readl(dd->mmio + HOST_HSORG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	dd->product_type = MTIP_PRODUCT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	dd->slot_groups = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	if (hwdata & 0x8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		dd->product_type = MTIP_PRODUCT_ASICFPGA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		rev = (hwdata & HSORG_HWREV) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 			"ASIC-FPGA design, HS rev 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 			"%i slot groups [%i slots]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 			 rev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 			 slotgroups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 			 slotgroups * 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 			dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 				"Warning: driver only supports "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 				"%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 			slotgroups = MTIP_MAX_SLOT_GROUPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		dd->slot_groups = slotgroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)  * Blocking wait for FTL rebuild to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)  * @dd Pointer to the DRIVER_DATA structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)  *	0	FTL rebuild completed successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)  *	-EFAULT FTL rebuild error/timeout/interruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) static int mtip_ftl_rebuild_poll(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	unsigned long timeout, cnt = 0, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		"FTL rebuild in progress. Polling for completion.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				&dd->dd_flag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		if (mtip_check_surprise_removal(dd->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		if (mtip_get_identify(dd->port, NULL) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 			MTIP_FTL_REBUILD_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 			ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			/* Print message every 3 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			if (cnt++ >= 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 				dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 				"FTL rebuild in progress (%d secs).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 				jiffies_to_msecs(jiffies - start) / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 				cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 				"FTL rebuild complete (%d secs).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			jiffies_to_msecs(jiffies - start) / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			mtip_block_initialize(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	} while (time_before(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	/* Check for timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		"Timed out waiting for FTL rebuild to complete (%d secs).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		jiffies_to_msecs(jiffies - start) / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) static void mtip_softirq_done_fn(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	struct driver_data *dd = rq->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	/* Unmap the DMA scatter list entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 							cmd->direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	if (unlikely(cmd->unaligned))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		atomic_inc(&dd->port->cmd_slot_unal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	blk_mq_end_request(rq, cmd->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	struct driver_data *dd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	clear_bit(req->tag, dd->port->cmds_to_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	cmd->status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	mtip_softirq_done_fn(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	struct driver_data *dd = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	set_bit(req->tag, dd->port->cmds_to_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	blk_abort_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)  * service thread to issue queued commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)  * @data Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)  *	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) static int mtip_service_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	struct driver_data *dd = (struct driver_data *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	unsigned long slot, slot_start, slot_wrap, to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	unsigned int num_cmd_slots = dd->slot_groups * 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		if (kthread_should_stop() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 			goto st_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		 * the condition is to check neither an internal command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		 * is in progress nor error handling is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		wait_event_interruptible(port->svc_wait, (port->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 			(port->flags & MTIP_PF_SVC_THD_WORK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		if (kthread_should_stop() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 			goto st_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 				&dd->dd_flag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 			goto st_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) restart_eh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		/* Demux bits: start with error handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 			mtip_handle_tfe(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 			clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 			goto restart_eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 			to = jiffies + msecs_to_jiffies(5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 				mdelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 			} while (atomic_read(&dd->irq_workers_active) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 				time_before(jiffies, to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 			if (atomic_read(&dd->irq_workers_active) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 				dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 					"Completion workers still active!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			blk_mq_quiesce_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 			if (mtip_device_reset(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 				blk_mq_tagset_busy_iter(&dd->tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 							mtip_abort_cmd, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 			clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 			blk_mq_unquiesce_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 			slot = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 			/* used to restrict the loop to one iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 			slot_start = num_cmd_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			slot_wrap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 				slot = find_next_bit(port->cmds_to_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 						num_cmd_slots, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 				if (slot_wrap == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 					if ((slot_start >= slot) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 						(slot >= num_cmd_slots))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 				if (unlikely(slot_start == num_cmd_slots))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 					slot_start = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 				if (unlikely(slot == num_cmd_slots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 					slot = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 					slot_wrap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 				/* Issue the command to the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 				mtip_issue_ncq_command(port, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 				clear_bit(slot, port->cmds_to_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 			if (mtip_ftl_rebuild_poll(dd) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 				clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) st_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  * DMA region teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)  * @dd Pointer to driver_data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)  *      None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) static void mtip_dma_free(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	if (port->block1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 					port->block1, port->block1_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	if (port->command_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 				port->command_list, port->command_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)  * DMA region setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)  * @dd Pointer to driver_data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)  *      -ENOMEM Not enough free DMA region space to initialize driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) static int mtip_dma_alloc(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	struct mtip_port *port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	/* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	port->block1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 					&port->block1_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	if (!port->block1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	/* Allocate dma memory for command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	port->command_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 					&port->command_list_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	if (!port->command_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 					port->block1, port->block1_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		port->block1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 		port->block1_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	/* Setup all pointers into first DMA region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	port->rxfis         = port->block1 + AHCI_RX_FIS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	port->rxfis_dma     = port->block1_dma + AHCI_RX_FIS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	port->identify      = port->block1 + AHCI_IDFY_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	port->identify_dma  = port->block1_dma + AHCI_IDFY_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	port->log_buf       = port->block1 + AHCI_SECTBUF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	port->log_buf_dma   = port->block1_dma + AHCI_SECTBUF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	port->smart_buf     = port->block1 + AHCI_SMARTBUF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) static int mtip_hw_get_identify(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	struct smart_attr attr242;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	if (mtip_get_identify(dd->port, NULL) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		MTIP_FTL_REBUILD_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		return MTIP_FTL_REBUILD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	mtip_dump_identify(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	/* check write protect, over temp and rebuild statuses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 				dd->port->log_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 				dd->port->log_buf_dma, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 			"Error in READ LOG EXT (10h) command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		/* non-critical error, don't fail the load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		buf = (unsigned char *)dd->port->log_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 		if (buf[259] & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 				"Write protect bit is set.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 			set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		if (buf[288] == 0xF7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 				"Exceeded Tmax, drive in thermal shutdown.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 			set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		if (buf[288] == 0xBF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 				"Drive indicates rebuild has failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	/* get write protect progess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	memset(&attr242, 0, sizeof(struct smart_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	if (mtip_get_smart_attr(dd->port, 242, &attr242))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 				"Unable to check write protect progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 		dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 				"Write protect progress: %u%% (%u blocks)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 				attr242.cur, le32_to_cpu(attr242.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)  * Called once for each card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)  *	0 on success, else an error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) static int mtip_hw_init(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	unsigned long timeout, timetaken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	mtip_detect_product(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		rv = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	hba_setup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 				dd->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	if (!dd->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 			"Memory allocation: port structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	/* Continue workqueue setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		dd->work[i].port = dd->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	/* Enable unaligned IO constraints for some devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	if (mtip_device_unaligned_constrained(dd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		dd->unal_qdepth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	/* Spinlock to prevent concurrent issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		spin_lock_init(&dd->port->cmd_issue_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	/* Set the port mmio base address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	dd->port->mmio	= dd->mmio + PORT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	dd->port->dd	= dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	/* DMA allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	rv = mtip_dma_alloc(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	/* Setup the pointers to the extended s_active and CI registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	for (i = 0; i < dd->slot_groups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 		dd->port->s_active[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			dd->port->mmio + i*0x80 + PORT_SCR_ACT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		dd->port->cmd_issue[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 			dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		dd->port->completed[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			dd->port->mmio + i*0x80 + PORT_SDBV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	timetaken = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	timeout = jiffies + msecs_to_jiffies(30000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		 time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		mdelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		timetaken = jiffies - timetaken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 			"Surprise removal detected at %u ms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 			jiffies_to_msecs(timetaken));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 		rv = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		goto out2 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		timetaken = jiffies - timetaken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 			"Removal detected at %u ms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 			jiffies_to_msecs(timetaken));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	/* Conditionally reset the HBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		if (mtip_hba_reset(dd) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 			dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 				"Card did not reset within timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 			rv = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 		/* Clear any pending interrupts on the HBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		writel(readl(dd->mmio + HOST_IRQ_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 			dd->mmio + HOST_IRQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	mtip_init_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	mtip_start_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	/* Setup the ISR and enable interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 			 dev_driver_string(&dd->pdev->dev), dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			"Unable to allocate IRQ %d\n", dd->pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	/* Enable interrupts on the HBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 					dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	init_waitqueue_head(&dd->port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	/* Disable interrupts on the HBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 			dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	/* Release the IRQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	irq_set_affinity_hint(dd->pdev->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	free_irq(dd->pdev->irq, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	mtip_deinit_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	mtip_dma_free(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	/* Free the memory allocated for the for structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	kfree(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static int mtip_standby_drive(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	if (dd->sr || !dd->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	 * Send standby immediate (E0h) to the drive so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	 * saves its state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	    !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		rv = mtip_standby_immediate(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 			dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 				"STANDBY IMMEDIATE failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)  * Called to deinitialize an interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)  *	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) static int mtip_hw_exit(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	if (!dd->sr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 		/* de-initialize the port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		mtip_deinit_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 		/* Disable interrupts on the HBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 				dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	/* Release the IRQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	irq_set_affinity_hint(dd->pdev->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	free_irq(dd->pdev->irq, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	/* Free dma regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	mtip_dma_free(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	/* Free the memory allocated for the for structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	kfree(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	dd->port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)  * Issue a Standby Immediate command to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)  * This function is called by the Block Layer just before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)  * system powers off during a shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)  *	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) static int mtip_hw_shutdown(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	 * Send standby immediate (E0h) to the drive so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	 * saves its state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	mtip_standby_drive(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)  * Suspend function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)  * This function is called by the Block Layer just before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)  * system hibernates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)  *	0	Suspend was successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)  *	-EFAULT Suspend was not successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) static int mtip_hw_suspend(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	 * Send standby immediate (E0h) to the drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	 * so that it saves its state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	if (mtip_standby_drive(dd) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 			"Failed standby-immediate command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	/* Disable interrupts on the HBA.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 			dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	mtip_deinit_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)  * Resume function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)  * This function is called by the Block Layer as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)  * system resumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)  *	0	Resume was successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)  *      -EFAULT Resume was not successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) static int mtip_hw_resume(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	/* Perform any needed hardware setup steps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	hba_setup(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	/* Reset the HBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	if (mtip_hba_reset(dd) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 			"Unable to reset the HBA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	 * Enable the port, DMA engine, and FIS reception specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	 * h/w in controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	mtip_init_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	mtip_start_port(dd->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	/* Enable interrupts on the HBA.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			dd->mmio + HOST_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)  * Helper function for reusing disk name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)  * upon hot insertion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) static int rssd_disk_name_format(char *prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 				 int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 				 char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 				 int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	const int base = 'z' - 'a' + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	char *begin = buf + strlen(prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	char *end = buf + buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	int unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	p = end - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	*p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	unit = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		if (p == begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		*--p = 'a' + (index % unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		index = (index / unit) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	} while (index >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	memmove(begin, p, end - p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 	memcpy(buf, prefix, strlen(prefix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)  * Block layer IOCTL handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)  * @dev Pointer to the block_device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)  * @mode ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)  * @cmd IOCTL command passed from the user application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)  * @arg Argument passed from the user application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)  *	0        IOCTL completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)  *	-ENOTTY  IOCTL not supported or invalid driver data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)  *                 structure pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) static int mtip_block_ioctl(struct block_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 			    fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 			    unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 			    unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	struct driver_data *dd = dev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	case BLKFLSBUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		return mtip_hw_ioctl(dd, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)  * Block layer compat IOCTL handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)  * @dev Pointer to the block_device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)  * @mode ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)  * @cmd IOCTL command passed from the user application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)  * @arg Argument passed from the user application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)  *	0        IOCTL completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)  *	-ENOTTY  IOCTL not supported or invalid driver data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)  *                 structure pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) static int mtip_block_compat_ioctl(struct block_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 			    fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 			    unsigned cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 			    unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	struct driver_data *dd = dev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	case BLKFLSBUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	case HDIO_DRIVE_TASKFILE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 		struct mtip_compat_ide_task_request_s __user *compat_req_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		ide_task_request_t req_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		int compat_tasksize, outtotal, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		compat_tasksize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 			sizeof(struct mtip_compat_ide_task_request_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		compat_req_task =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 			(struct mtip_compat_ide_task_request_s __user *) arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		if (copy_from_user(&req_task, (void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			compat_tasksize - (2 * sizeof(compat_long_t))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 		if (get_user(req_task.out_size, &compat_req_task->out_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		if (get_user(req_task.in_size, &compat_req_task->in_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 		outtotal = sizeof(struct mtip_compat_ide_task_request_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		ret = exec_drive_taskfile(dd, (void __user *) arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 						&req_task, outtotal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 		if (copy_to_user((void __user *) arg, &req_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 				compat_tasksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 				(2 * sizeof(compat_long_t))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		if (put_user(req_task.out_size, &compat_req_task->out_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 		if (put_user(req_task.in_size, &compat_req_task->in_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 		return mtip_hw_ioctl(dd, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)  * Obtain the geometry of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)  * You may think that this function is obsolete, but some applications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)  * fdisk for example still used CHS values. This function describes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)  * device as having 224 heads and 56 sectors per cylinder. These values are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)  * chosen so that each cylinder is aligned on a 4KB boundary. Since a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)  * partition is described in terms of a start and end cylinder this means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)  * that each partition is also 4KB aligned. Non-aligned partitions adversely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)  * affects performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)  * @dev Pointer to the block_device strucutre.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)  * @geo Pointer to a hd_geometry structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)  *	0       Operation completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)  *	-ENOTTY An error occurred while reading the drive capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) static int mtip_block_getgeo(struct block_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 				struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	struct driver_data *dd = dev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	sector_t capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	if (!dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	if (!(mtip_hw_get_capacity(dd, &capacity))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 			"Could not get drive capacity.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	geo->heads = 224;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	geo->sectors = 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	sector_div(capacity, (geo->heads * geo->sectors));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	geo->cylinders = capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) static int mtip_block_open(struct block_device *dev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	struct driver_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	if (dev && dev->bd_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 		dd = (struct driver_data *) dev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 		if (dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 			if (test_bit(MTIP_DDF_REMOVAL_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 							&dd->dd_flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 				return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) static void mtip_block_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)  * Block device operation function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)  * This structure contains pointers to the functions required by the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)  * layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) static const struct block_device_operations mtip_block_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	.open		= mtip_block_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	.release	= mtip_block_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	.ioctl		= mtip_block_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	.compat_ioctl	= mtip_block_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	.getgeo		= mtip_block_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	.owner		= THIS_MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) static inline bool is_se_active(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		if (dd->port->ic_pause_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 			unsigned long to = dd->port->ic_pause_timer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 							msecs_to_jiffies(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 			if (time_after(jiffies, to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 				clear_bit(MTIP_PF_SE_ACTIVE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 							&dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 				clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 				dd->port->ic_pause_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 				wake_up_interruptible(&dd->port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) static inline bool is_stopped(struct driver_data *dd, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	    rq_data_dir(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 				  struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	struct driver_data *dd = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	 * If unaligned depth must be limited on this controller, mark it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	 * as unaligned if the IO isn't on a 4k boundary (start of length).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	if (blk_rq_sectors(rq) <= 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 			cmd->unaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	struct driver_data *dd = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	struct mtip_int_cmd *icmd = cmd->icmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	struct mtip_cmd_hdr *hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 		dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	struct mtip_cmd_sg *command_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	if (mtip_commands_active(dd->port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 		return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	/* Populate the SG list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 	hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	if (icmd->buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		command_sg->dba	= cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 		command_sg->dba_upper =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 			cpu_to_le32((icmd->buffer >> 16) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		hdr->opts |= cpu_to_le32((1 << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	/* Populate the command header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	hdr->byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	mtip_issue_non_ncq_command(dd->port, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 			 const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	struct driver_data *dd = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	struct request *rq = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	if (blk_rq_is_passthrough(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 		return mtip_issue_reserved_cmd(hctx, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	if (unlikely(mtip_check_unal_depth(hctx, rq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 		return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	if (is_se_active(dd) || is_stopped(dd, rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	mtip_hw_submit_io(dd, rq, cmd, hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 			  unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	struct driver_data *dd = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	if (!cmd->command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 			  cmd->command_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 			 unsigned int hctx_idx, unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	struct driver_data *dd = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 			&cmd->command_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	if (!cmd->command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	sg_init_table(cmd->sg, MTIP_MAX_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 								bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	struct driver_data *dd = req->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	if (reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 		cmd->status = BLK_STS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 		blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 		return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	if (test_bit(req->tag, dd->port->cmds_to_issue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		goto exit_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		goto exit_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	wake_up_interruptible(&dd->port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) exit_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) static const struct blk_mq_ops mtip_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	.queue_rq	= mtip_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	.init_request	= mtip_init_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	.exit_request	= mtip_free_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	.complete	= mtip_softirq_done_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	.timeout        = mtip_cmd_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)  * Block layer initialization function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)  * This function is called once by the PCI layer for each P320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)  * device that is connected to the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575)  *	0 on success else an error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) static int mtip_block_initialize(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	int rv = 0, wait_for_rebuild = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	sector_t capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	unsigned int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	struct kobject *kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	if (dd->disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 		goto skip_create_disk; /* hw init done, before rebuild */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	if (mtip_hw_init(dd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 		goto protocol_init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	if (dd->disk  == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 			"Unable to allocate gendisk structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		goto alloc_disk_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		goto ida_get_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	index = rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	rv = rssd_disk_name_format("rssd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 				index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 				dd->disk->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 				DISK_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		goto disk_index_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	dd->disk->major		= dd->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	dd->disk->first_minor	= index * MTIP_MAX_MINORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	dd->disk->minors 	= MTIP_MAX_MINORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	dd->disk->fops		= &mtip_block_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	dd->disk->private_data	= dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	dd->index		= index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	mtip_hw_debugfs_init(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	memset(&dd->tags, 0, sizeof(dd->tags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	dd->tags.ops = &mtip_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	dd->tags.nr_hw_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	dd->tags.reserved_tags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	dd->tags.cmd_size = sizeof(struct mtip_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	dd->tags.numa_node = dd->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	dd->tags.driver_data = dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	rv = blk_mq_alloc_tag_set(&dd->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 			"Unable to allocate request queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 		goto block_queue_alloc_tag_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	/* Allocate the request queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	dd->queue = blk_mq_init_queue(&dd->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	if (IS_ERR(dd->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 			"Unable to allocate request queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 		rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 		goto block_queue_alloc_init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	dd->disk->queue		= dd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	dd->queue->queuedata	= dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) skip_create_disk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	/* Initialize the protocol layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	wait_for_rebuild = mtip_hw_get_identify(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	if (wait_for_rebuild < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 		dev_err(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 			"Protocol layer initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 		rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 		goto init_hw_cmds_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	 * if rebuild pending, start the service thread, and delay the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	 * queue creation and device_add_disk()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 		goto start_service_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	/* Set device limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	blk_queue_physical_block_size(dd->queue, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	blk_queue_max_hw_sectors(dd->queue, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	blk_queue_max_segment_size(dd->queue, 0x400000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	blk_queue_io_min(dd->queue, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 	/* Set the capacity of the device in 512 byte sectors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	if (!(mtip_hw_get_capacity(dd, &capacity))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 			"Could not read drive capacity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 		rv = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 		goto read_capacity_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	set_capacity(dd->disk, capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	/* Enable the block device and add it to /dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	device_add_disk(&dd->pdev->dev, dd->disk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	dd->bdev = bdget_disk(dd->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	 * Now that the disk is active, initialize any sysfs attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	 * managed by the protocol layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	if (kobj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 		mtip_hw_sysfs_init(dd, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 		kobject_put(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	if (dd->mtip_svc_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 		return rv; /* service thread created for handling rebuild */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) start_service_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 						dd, dd->numa_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 						"mtip_svc_thd_%02d", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	if (IS_ERR(dd->mtip_svc_handler)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 		dev_err(&dd->pdev->dev, "service thread failed to start\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 		dd->mtip_svc_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 		rv = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 		goto kthread_run_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	wake_up_process(dd->mtip_svc_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 		rv = wait_for_rebuild;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) kthread_run_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	bdput(dd->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	dd->bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	/* Delete our gendisk. This also removes the device from /dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	del_gendisk(dd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) read_capacity_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) init_hw_cmds_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	blk_cleanup_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) block_queue_alloc_init_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	blk_mq_free_tag_set(&dd->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) block_queue_alloc_tag_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	mtip_hw_debugfs_exit(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) disk_index_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	ida_free(&rssd_index_ida, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) ida_get_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	put_disk(dd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) alloc_disk_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 	mtip_hw_exit(dd); /* De-initialize the protocol layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) protocol_init_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 	cmd->status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 	blk_mq_complete_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)  * Block layer deinitialization function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)  * Called by the PCI layer as each P320 device is removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767)  *	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) static int mtip_block_remove(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	struct kobject *kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	mtip_hw_debugfs_exit(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	if (dd->mtip_svc_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 		set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 		wake_up_interruptible(&dd->port->svc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		kthread_stop(dd->mtip_svc_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	/* Clean up the sysfs attributes, if created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		if (kobj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 			mtip_hw_sysfs_exit(dd, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 			kobject_put(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	if (!dd->sr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 		 * Explicitly wait here for IOs to quiesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 		 * as mtip_standby_drive usually won't wait for IOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 		if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 			mtip_standby_drive(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 						dd->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	blk_freeze_queue_start(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	blk_mq_quiesce_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	blk_mq_unquiesce_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	 * Delete our gendisk structure. This also removes the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	 * from /dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	if (dd->bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		bdput(dd->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 		dd->bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	if (dd->disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 			del_gendisk(dd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		if (dd->disk->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 			blk_cleanup_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 			blk_mq_free_tag_set(&dd->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 			dd->queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		put_disk(dd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	dd->disk  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	ida_free(&rssd_index_ida, dd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	/* De-initialize the protocol layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	mtip_hw_exit(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)  * Function called by the PCI layer when just before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)  * machine shuts down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)  * If a protocol layer shutdown function is present it will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)  * by this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842)  * @dd Pointer to the driver data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)  *	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) static int mtip_block_shutdown(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	mtip_hw_shutdown(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	/* Delete our gendisk structure, and cleanup the blk queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	if (dd->disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 		dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 			"Shutting down %s ...\n", dd->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 			del_gendisk(dd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		if (dd->disk->queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 			blk_cleanup_queue(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 			blk_mq_free_tag_set(&dd->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 		put_disk(dd->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		dd->disk  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 		dd->queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	ida_free(&rssd_index_ida, dd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) static int mtip_block_suspend(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 	dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		"Suspending %s ...\n", dd->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	mtip_hw_suspend(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) static int mtip_block_resume(struct driver_data *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	dev_info(&dd->pdev->dev, "Resuming %s ...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 		dd->disk->disk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	mtip_hw_resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) static void drop_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 	cpu_use[cpu]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) static int get_least_used_cpu_on_node(int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	int cpu, least_used_cpu, least_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	const struct cpumask *node_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	node_mask = cpumask_of_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	least_used_cpu = cpumask_first(node_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	least_cnt = cpu_use[least_used_cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 	cpu = least_used_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 	for_each_cpu(cpu, node_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 		if (cpu_use[cpu] < least_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 			least_used_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 			least_cnt = cpu_use[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	cpu_use[least_used_cpu]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	return least_used_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) /* Helper for selecting a node in round robin mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) static inline int mtip_get_next_rr_node(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	static int next_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	if (next_node == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 		next_node = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		return next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 	next_node = next_online_node(next_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	if (next_node == MAX_NUMNODES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 		next_node = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	return next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) static DEFINE_HANDLER(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) static DEFINE_HANDLER(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) static DEFINE_HANDLER(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) static DEFINE_HANDLER(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) static DEFINE_HANDLER(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) static DEFINE_HANDLER(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) static DEFINE_HANDLER(6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) static DEFINE_HANDLER(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	unsigned short pcie_dev_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	if (pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 		pci_read_config_word(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 			pos + PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 			&pcie_dev_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		if (pcie_dev_ctrl & (1 << 11) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 		    pcie_dev_ctrl & (1 << 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 			dev_info(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 				"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 					pdev->vendor, pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 			pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 						PCI_EXP_DEVCTL_RELAX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 			pci_write_config_word(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 				pos + PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 				pcie_dev_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 	 * This workaround is specific to AMD/ATI chipset with a PCI upstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	 * device with device id 0x5aXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	if (pdev->bus && pdev->bus->self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 		if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		    ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 			mtip_disable_link_opts(dd, pdev->bus->self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 			/* Check further up the topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 			struct pci_dev *parent_dev = pdev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 			if (parent_dev->bus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 				parent_dev->bus->parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 				parent_dev->bus->parent->self &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 				parent_dev->bus->parent->self->vendor ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 					 PCI_VENDOR_ID_ATI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 				(parent_dev->bus->parent->self->device &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 					0xff00) == 0x5a00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 				mtip_disable_link_opts(dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 					parent_dev->bus->parent->self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)  * Called for each supported PCI device detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)  * This function allocates the private data structure, enables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)  * PCI device and then calls the block layer initialization function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995)  *	0 on success else an error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) static int mtip_pci_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 			const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	struct driver_data *dd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 	char cpu_list[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	const struct cpumask *node_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	int cpu, i = 0, j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	int my_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 	/* Allocate memory for this devices private data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	my_node = pcibus_to_node(pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 	if (my_node != NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 		if (!node_online(my_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 			my_node = mtip_get_next_rr_node();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 		dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 		my_node = mtip_get_next_rr_node();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 	dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 		my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 		cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 	if (dd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 			"Unable to allocate memory for driver data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	/* Attach the private data to this PCI device.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	pci_set_drvdata(pdev, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	rv = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 		dev_err(&pdev->dev, "Unable to enable device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 		goto iomap_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	/* Map BAR5 to memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 	rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 	if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 		dev_err(&pdev->dev, "Unable to map regions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 		goto iomap_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 	rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 		dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 		goto setmask_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	/* Copy the info we may need later into the private data structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	dd->major	= mtip_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	dd->instance	= instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	dd->pdev	= pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 	dd->numa_node	= my_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	INIT_LIST_HEAD(&dd->online_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	INIT_LIST_HEAD(&dd->remove_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	memset(dd->workq_name, 0, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	dd->isr_workq = create_workqueue(dd->workq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 	if (!dd->isr_workq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 		dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 		rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 		goto setmask_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	memset(cpu_list, 0, sizeof(cpu_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	node_mask = cpumask_of_node(dd->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	if (!cpumask_empty(node_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 		for_each_cpu(cpu, node_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 			snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 			j = strlen(cpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 		dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 			dd->numa_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 			topology_physical_package_id(cpumask_first(node_mask)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 			nr_cpus_node(dd->numa_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 			cpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 		dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 	dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 		cpu_to_node(dd->isr_binding), dd->isr_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 	/* first worker context always runs in ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	dd->work[0].cpu_binding = dd->isr_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 	dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 	dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 	dd->work[3].cpu_binding = dd->work[0].cpu_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 	dd->work[4].cpu_binding = dd->work[1].cpu_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 	dd->work[5].cpu_binding = dd->work[2].cpu_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 	dd->work[6].cpu_binding = dd->work[2].cpu_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 	dd->work[7].cpu_binding = dd->work[1].cpu_binding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 	/* Log the bindings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 	for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		memset(cpu_list, 0, sizeof(cpu_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 		for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 			if (dd->work[i].cpu_binding == cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 				snprintf(&cpu_list[j], 256 - j, "%d ", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 				j = strlen(cpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 		if (j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 			dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 	INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 	INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 	INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	rv = pci_enable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 			"Unable to enable MSI interrupt.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 		goto msi_initialize_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	mtip_fix_ero_nosnoop(dd, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 	/* Initialize the block layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	rv = mtip_block_initialize(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 	if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 			"Unable to initialize block layer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 		goto block_initialize_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 	 * Increment the instance count so that each device has a unique
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 	 * instance number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	instance++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	if (rv != MTIP_FTL_REBUILD_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 		rv = 0; /* device in rebuild state, return 0 from probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 	/* Add to online list even if in ftl rebuild */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 	spin_lock_irqsave(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	list_add(&dd->online_list, &online_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 	spin_unlock_irqrestore(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) block_initialize_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	pci_disable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) msi_initialize_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	if (dd->isr_workq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 		flush_workqueue(dd->isr_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 		destroy_workqueue(dd->isr_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		drop_cpu(dd->work[0].cpu_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 		drop_cpu(dd->work[1].cpu_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 		drop_cpu(dd->work[2].cpu_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) setmask_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) iomap_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 	kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 	pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)  * Called for each probed device when the device is removed or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)  * driver is unloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)  *	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) static void mtip_pci_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 	struct driver_data *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	unsigned long flags, to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 	set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 	spin_lock_irqsave(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	list_del_init(&dd->online_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	list_add(&dd->remove_list, &removing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 	spin_unlock_irqrestore(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 	mtip_check_surprise_removal(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	synchronize_irq(dd->pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 	/* Spin until workers are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 	to = jiffies + msecs_to_jiffies(4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 		msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	} while (atomic_read(&dd->irq_workers_active) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 		time_before(jiffies, to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 	if (!dd->sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 		fsync_bdev(dd->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 	if (atomic_read(&dd->irq_workers_active) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 		dev_warn(&dd->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 			"Completion workers still active!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	blk_set_queue_dying(dd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 	/* Clean up the block layer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	mtip_block_remove(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 	if (dd->isr_workq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 		flush_workqueue(dd->isr_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		destroy_workqueue(dd->isr_workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		drop_cpu(dd->work[0].cpu_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		drop_cpu(dd->work[1].cpu_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 		drop_cpu(dd->work[2].cpu_binding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	pci_disable_msi(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 	spin_lock_irqsave(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	list_del_init(&dd->remove_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 	spin_unlock_irqrestore(&dev_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	kfree(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244)  * Called for each probed device when the device is suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)  *	0  Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)  *	<0 Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	struct driver_data *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 	if (!dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 			"Driver private datastructure is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 	/* Disable ports & interrupts then send standby immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 	rv = mtip_block_suspend(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 			"Failed to suspend controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	 * Save the pci config space to pdev structure &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	 * disable the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 	pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	/* Move to Low power state*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 	pci_set_power_state(pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285)  * Called for each probed device when the device is resumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288)  *      0  Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)  *      <0 Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) static int mtip_pci_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 	struct driver_data *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 	dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	if (!dd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 			"Driver private datastructure is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	/* Move the device to active State */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	pci_set_power_state(pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 	/* Restore PCI configuration space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	/* Enable the PCI device*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 	rv = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 			"Failed to enable card during resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	 * Calls hbaReset, initPort, & startPort function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 	 * then enables interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 	rv = mtip_block_resume(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 		dev_err(&pdev->dev, "Unable to resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333)  * Shutdown routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)  * return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336)  *      None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) static void mtip_pci_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 	struct driver_data *dd = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 	if (dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 		mtip_block_shutdown(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) /* Table of device ids supported by this driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) static const struct pci_device_id mtip_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 	{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 	{ 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) /* Structure that describes the PCI driver functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) static struct pci_driver mtip_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	.name			= MTIP_DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	.id_table		= mtip_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	.probe			= mtip_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 	.remove			= mtip_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	.suspend		= mtip_pci_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	.resume			= mtip_pci_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	.shutdown		= mtip_pci_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371)  * Module initialization function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373)  * Called once when the module is loaded. This function allocates a major
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)  * block device number to the Cyclone devices and registers the PCI layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)  * of the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)  * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)  *      0 on success else error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) static int __init mtip_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	spin_lock_init(&dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 	INIT_LIST_HEAD(&online_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 	INIT_LIST_HEAD(&removing_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	/* Allocate a major block device number to use with this driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 	error = register_blkdev(0, MTIP_DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 	if (error <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 		pr_err("Unable to register block device (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 		error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	mtip_major = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	dfs_parent = debugfs_create_dir("rssd", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 	if (IS_ERR_OR_NULL(dfs_parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 		pr_warn("Error creating debugfs parent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 		dfs_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 	if (dfs_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 		dfs_device_status = debugfs_create_file("device_status",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 					0444, dfs_parent, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 					&mtip_device_status_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 		if (IS_ERR_OR_NULL(dfs_device_status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 			pr_err("Error creating device_status node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 			dfs_device_status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	/* Register our PCI operations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 	error = pci_register_driver(&mtip_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 		debugfs_remove(dfs_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 		unregister_blkdev(mtip_major, MTIP_DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)  * Module de-initialization function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)  * Called once when the module is unloaded. This function deallocates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)  * the major block device number allocated by mtip_init() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)  * unregisters the PCI layer of the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432)  * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433)  *      none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) static void __exit mtip_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	/* Release the allocated major block device number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 	unregister_blkdev(mtip_major, MTIP_DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	/* Unregister the PCI driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	pci_unregister_driver(&mtip_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 	debugfs_remove_recursive(dfs_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) MODULE_AUTHOR("Micron Technology, Inc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) MODULE_VERSION(MTIP_DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) module_init(mtip_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) module_exit(mtip_exit);