^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * BSG helper library
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008 James Smart, Emulex Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2011 Mike Christie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bsg-lib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <scsi/sg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define uptr64(val) ((void __user *)(uintptr_t)(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct bsg_set {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) bsg_job_fn *job_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) bsg_timeout_fn *timeout_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (hdr->protocol != BSG_PROTOCOL_SCSI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct bsg_job *job = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) job->request_len = hdr->request_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (IS_ERR(job->request))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return PTR_ERR(job->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (hdr->dout_xfer_len && hdr->din_xfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (IS_ERR(job->bidi_rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ret = PTR_ERR(job->bidi_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) uptr64(hdr->din_xferp), hdr->din_xfer_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) goto out_free_bidi_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) job->bidi_bio = job->bidi_rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) job->bidi_rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) job->bidi_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) out_free_bidi_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (job->bidi_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) blk_put_request(job->bidi_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) kfree(job->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct bsg_job *job = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The assignments below don't make much sense, but are kept for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * bug by bug backwards compatibility:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) hdr->device_status = job->result & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) hdr->transport_status = host_byte(job->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) hdr->driver_status = driver_byte(job->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) hdr->info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (hdr->device_status || hdr->transport_status || hdr->driver_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) hdr->info |= SG_INFO_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) hdr->response_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (job->result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* we're only returning the result field in the reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) job->reply_len = sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ret = job->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (job->reply_len && hdr->response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int len = min(hdr->max_response_len, job->reply_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (copy_to_user(uptr64(hdr->response), job->reply, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) hdr->response_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* we assume all request payload was transferred, residual == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) hdr->dout_resid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (job->bidi_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int rsp_len = job->reply_payload.payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) hdr->din_resid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) hdr->din_resid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void bsg_transport_free_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct bsg_job *job = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (job->bidi_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) blk_rq_unmap_user(job->bidi_bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) blk_put_request(job->bidi_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) kfree(job->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static const struct bsg_ops bsg_transport_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .check_proto = bsg_transport_check_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .fill_hdr = bsg_transport_fill_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .complete_rq = bsg_transport_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) .free_rq = bsg_transport_free_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * bsg_teardown_job - routine to teardown a bsg job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @kref: kref inside bsg_job that is to be torn down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void bsg_teardown_job(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct bsg_job *job = container_of(kref, struct bsg_job, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct request *rq = blk_mq_rq_from_pdu(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) put_device(job->dev); /* release reference for the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) kfree(job->request_payload.sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kfree(job->reply_payload.sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) blk_mq_end_request(rq, BLK_STS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void bsg_job_put(struct bsg_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kref_put(&job->kref, bsg_teardown_job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) EXPORT_SYMBOL_GPL(bsg_job_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int bsg_job_get(struct bsg_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return kref_get_unless_zero(&job->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) EXPORT_SYMBOL_GPL(bsg_job_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * bsg_job_done - completion routine for bsg requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @job: bsg_job that is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * @result: job reply result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @reply_payload_rcv_len: length of payload recvd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * The LLD should call this when the bsg job has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void bsg_job_done(struct bsg_job *job, int result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned int reply_payload_rcv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct request *rq = blk_mq_rq_from_pdu(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) job->result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) job->reply_payload_rcv_len = reply_payload_rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (likely(!blk_should_fake_timeout(rq->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) blk_mq_complete_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) EXPORT_SYMBOL_GPL(bsg_job_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * bsg_complete - softirq done routine for destroying the bsg requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @rq: BSG request that holds the job to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void bsg_complete(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct bsg_job *job = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) bsg_job_put(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) BUG_ON(!req->nr_phys_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) buf->sg_list = kmalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!buf->sg_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) sg_init_table(buf->sg_list, req->nr_phys_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) buf->payload_len = blk_rq_bytes(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * bsg_prepare_job - create the bsg_job structure for the bsg request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @dev: device that is being sent the bsg request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @req: BSG request that needs a job structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static bool bsg_prepare_job(struct device *dev, struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct bsg_job *job = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) job->timeout = req->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (req->bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = bsg_map_buffer(&job->request_payload, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto failjob_rls_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (job->bidi_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto failjob_rls_rqst_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) job->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* take a reference for the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) get_device(job->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) kref_init(&job->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) failjob_rls_rqst_payload:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kfree(job->request_payload.sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) failjob_rls_job:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) job->result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * bsg_queue_rq - generic handler for bsg requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @hctx: hardware queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @bd: queue data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * On error the create_bsg_job function should return a -Exyz error value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * that will be set to ->result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Drivers/subsys should pass this to the queue init function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct device *dev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct request *req = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct bsg_set *bset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) container_of(q->tag_set, struct bsg_set, tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) blk_status_t sts = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!get_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!bsg_prepare_job(dev, req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ret = bset->job_fn(blk_mq_rq_to_pdu(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) sts = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* called right after the request is allocated for the request_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned int hctx_idx, unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct bsg_job *job = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!job->reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* called right before the request is given to the request_queue user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void bsg_initialize_rq(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct bsg_job *job = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) void *reply = job->reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) memset(job, 0, sizeof(*job));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) job->reply = reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) job->reply_len = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) job->dd_data = job + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct bsg_job *job = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kfree(job->reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) void bsg_remove_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct bsg_set *bset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) container_of(q->tag_set, struct bsg_set, tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) bsg_unregister_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) blk_cleanup_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) blk_mq_free_tag_set(&bset->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kfree(bset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) EXPORT_SYMBOL_GPL(bsg_remove_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct bsg_set *bset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) container_of(rq->q->tag_set, struct bsg_set, tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!bset->timeout_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return bset->timeout_fn(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static const struct blk_mq_ops bsg_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .queue_rq = bsg_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .init_request = bsg_init_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .exit_request = bsg_exit_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .initialize_rq_fn = bsg_initialize_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .complete = bsg_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .timeout = bsg_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * @dev: device to attach bsg device to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @name: device to give bsg device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @job_fn: bsg job handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @timeout: timeout handler function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * @dd_job_size: size of LLD data needed for each job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct bsg_set *bset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct blk_mq_tag_set *set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bset = kzalloc(sizeof(*bset), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!bset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bset->job_fn = job_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) bset->timeout_fn = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) set = &bset->tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) set->ops = &bsg_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) set->nr_hw_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) set->queue_depth = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) set->numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (blk_mq_alloc_tag_set(set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) goto out_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) q = blk_mq_init_queue(set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (IS_ERR(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = PTR_ERR(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto out_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) q->queuedata = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) printk(KERN_ERR "%s: bsg interface failed to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "initialize - register queue\n", dev->kobj.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto out_cleanup_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) out_cleanup_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) blk_cleanup_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) out_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) blk_mq_free_tag_set(set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) out_tag_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) kfree(bset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) EXPORT_SYMBOL_GPL(bsg_setup_queue);