Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * bsg.c - block layer implementation of the sg v4 interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/bsg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <scsi/scsi_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <scsi/scsi_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <scsi/sg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define BSG_VERSION	"0.4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define bsg_dbg(bd, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct bsg_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct request_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct hlist_node dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	refcount_t ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	char name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int max_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define BSG_DEFAULT_CMDS	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define BSG_MAX_DEVS		32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static DEFINE_MUTEX(bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static DEFINE_IDR(bsg_minor_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define BSG_LIST_ARRAY_SIZE	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static struct class *bsg_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static int bsg_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static inline struct hlist_head *bsg_dev_idx_hash(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define uptr64(val) ((void __user *)(uintptr_t)(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	    hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct scsi_request *sreq = scsi_req(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (hdr->dout_xfer_len && hdr->din_xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		pr_warn_once("BIDI support in bsg has been removed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	sreq->cmd_len = hdr->request_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (sreq->cmd_len > BLK_MAX_CDB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (!sreq->cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (blk_verify_command(sreq->cmd, mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct scsi_request *sreq = scsi_req(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * fill in all the output members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	hdr->device_status = sreq->result & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	hdr->transport_status = host_byte(sreq->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	hdr->driver_status = driver_byte(sreq->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	hdr->info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		hdr->info |= SG_INFO_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	hdr->response_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (sreq->sense_len && hdr->response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		int len = min_t(unsigned int, hdr->max_response_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 					sreq->sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			hdr->response_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (rq_data_dir(rq) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		hdr->din_resid = sreq->resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		hdr->dout_resid = sreq->resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void bsg_scsi_free_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	scsi_req_free_cmd(scsi_req(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static const struct bsg_ops bsg_scsi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	.check_proto		= bsg_scsi_check_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	.fill_hdr		= bsg_scsi_fill_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	.complete_rq		= bsg_scsi_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.free_rq		= bsg_scsi_free_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	struct sg_io_v4 hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (copy_from_user(&hdr, uarg, sizeof(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (!q->bsg_dev.class_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (hdr.guard != 'Q')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	ret = q->bsg_dev.ops->check_proto(&hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	rq = blk_get_request(q, hdr.dout_xfer_len ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (IS_ERR(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		return PTR_ERR(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		blk_put_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	rq->timeout = msecs_to_jiffies(hdr.timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!rq->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		rq->timeout = q->sg_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (!rq->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		rq->timeout = BLK_MIN_SG_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (hdr.dout_xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 				hdr.dout_xfer_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	} else if (hdr.din_xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				hdr.din_xfer_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		goto out_free_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	blk_rq_unmap_user(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) out_free_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	rq->q->bsg_dev.ops->free_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	blk_put_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static struct bsg_device *bsg_alloc_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct bsg_device *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (unlikely(!bd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	spin_lock_init(&bd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	bd->max_queue = BSG_DEFAULT_CMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	INIT_HLIST_NODE(&bd->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	return bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int bsg_put_device(struct bsg_device *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct request_queue *q = bd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	mutex_lock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (!refcount_dec_and_test(&bd->ref_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		mutex_unlock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	hlist_del(&bd->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	mutex_unlock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	bsg_dbg(bd, "tearing down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * close can always block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	kfree(bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	blk_put_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static struct bsg_device *bsg_add_device(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 					 struct request_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 					 struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct bsg_device *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	unsigned char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	lockdep_assert_held(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (!blk_get_queue(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return ERR_PTR(-ENXIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	bd = bsg_alloc_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		blk_put_queue(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	bd->queue = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	refcount_set(&bd->ref_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	bsg_dbg(bd, "bound to <%s>, max queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		format_dev_t(buf, inode->i_rdev), bd->max_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct bsg_device *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	lockdep_assert_held(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		if (bd->queue == q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			refcount_inc(&bd->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	bd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct bsg_device *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct bsg_class_device *bcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * find the class device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	mutex_lock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	bcd = idr_find(&bsg_minor_idr, iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (!bcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		bd = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	bd = __bsg_get_device(iminor(inode), bcd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (!bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		bd = bsg_add_device(inode, bcd->queue, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	mutex_unlock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	return bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int bsg_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	struct bsg_device *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct bsg_class_device *bcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	bd = bsg_get_device(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (IS_ERR(bd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		return PTR_ERR(bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	bcd = &bd->queue->bsg_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	pm_runtime_get_sync(bcd->class_dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	file->private_data = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int bsg_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	struct bsg_device *bd = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct bsg_class_device *bcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	file->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	bcd = &bd->queue->bsg_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	pm_runtime_put_sync(bcd->class_dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return bsg_put_device(bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	return put_user(bd->max_queue, uarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	int queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (get_user(queue, uarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (queue < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	spin_lock_irq(&bd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	bd->max_queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	spin_unlock_irq(&bd->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct bsg_device *bd = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	void __user *uarg = (void __user *) arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	 * Our own ioctls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	case SG_GET_COMMAND_Q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		return bsg_get_command_q(bd, uarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	case SG_SET_COMMAND_Q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		return bsg_set_command_q(bd, uarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * SCSI/sg ioctls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	case SG_GET_VERSION_NUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	case SCSI_IOCTL_GET_IDLUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	case SCSI_IOCTL_GET_BUS_NUMBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	case SG_SET_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	case SG_GET_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	case SG_GET_RESERVED_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	case SG_SET_RESERVED_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	case SG_EMULATED_HOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	case SG_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		return bsg_sg_io(bd->queue, file->f_mode, uarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	case SCSI_IOCTL_SEND_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 				current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static const struct file_operations bsg_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.open		=	bsg_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	.release	=	bsg_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	.unlocked_ioctl	=	bsg_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	.compat_ioctl	=	compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	.llseek		=	default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) void bsg_unregister_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct bsg_class_device *bcd = &q->bsg_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (!bcd->class_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	mutex_lock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	idr_remove(&bsg_minor_idr, bcd->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	if (q->kobj.sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		sysfs_remove_link(&q->kobj, "bsg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	device_unregister(bcd->class_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	bcd->class_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	mutex_unlock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) EXPORT_SYMBOL_GPL(bsg_unregister_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int bsg_register_queue(struct request_queue *q, struct device *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		const char *name, const struct bsg_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct bsg_class_device *bcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	dev_t dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	struct device *class_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	 * we need a proper transport to send commands, not a stacked device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	if (!queue_is_mq(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	bcd = &q->bsg_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	memset(bcd, 0, sizeof(*bcd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	mutex_lock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		if (ret == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			printk(KERN_ERR "bsg: too many bsg devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	bcd->minor = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	bcd->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	bcd->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	dev = MKDEV(bsg_major, bcd->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	if (IS_ERR(class_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		ret = PTR_ERR(class_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		goto idr_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	bcd->class_dev = class_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	if (q->kobj.sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			goto unregister_class_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	mutex_unlock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) unregister_class_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	device_unregister(class_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) idr_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	idr_remove(&bsg_minor_idr, bcd->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	mutex_unlock(&bsg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	if (!blk_queue_scsi_passthrough(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static struct cdev bsg_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static char *bsg_devnode(struct device *dev, umode_t *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int __init bsg_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	dev_t devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		INIT_HLIST_HEAD(&bsg_device_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	bsg_class = class_create(THIS_MODULE, "bsg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	if (IS_ERR(bsg_class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		return PTR_ERR(bsg_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	bsg_class->devnode = bsg_devnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		goto destroy_bsg_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	bsg_major = MAJOR(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	cdev_init(&bsg_cdev, &bsg_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		goto unregister_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	       " loaded (major %d)\n", bsg_major);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unregister_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) destroy_bsg_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	class_destroy(bsg_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) MODULE_AUTHOR("Jens Axboe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) MODULE_DESCRIPTION(BSG_DESCRIPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) device_initcall(bsg_init);