blob: ccb98276c964a61a755aeedc1f3d82b1849adca6 [file] [log] [blame]
Christoph Hellwiga497ee32019-04-30 14:42:40 -04001// SPDX-License-Identifier: GPL-2.0-or-later
Mike Christieaa387cc2011-07-31 22:05:09 +02002/*
3 * BSG helper library
4 *
5 * Copyright (C) 2008 James Smart, Emulex Corporation
6 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2011 Mike Christie
Mike Christieaa387cc2011-07-31 22:05:09 +02008 */
Christoph Hellwigead09dd2021-07-29 08:48:42 +02009#include <linux/bsg.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020010#include <linux/slab.h>
Jens Axboecd2f0762018-10-24 07:11:39 -060011#include <linux/blk-mq.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020012#include <linux/delay.h>
13#include <linux/scatterlist.h>
14#include <linux/bsg-lib.h>
Paul Gortmaker6adb1232011-09-28 18:26:05 -040015#include <linux/export.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020016#include <scsi/scsi_cmnd.h>
Christoph Hellwig17cb9602018-03-13 17:28:41 +010017#include <scsi/sg.h>
18
19#define uptr64(val) ((void __user *)(uintptr_t)(val))
20
Jens Axboe1028e4b2018-10-29 09:47:17 -060021struct bsg_set {
22 struct blk_mq_tag_set tag_set;
Christoph Hellwigead09dd2021-07-29 08:48:42 +020023 struct bsg_device *bd;
Jens Axboe1028e4b2018-10-29 09:47:17 -060024 bsg_job_fn *job_fn;
25 bsg_timeout_fn *timeout_fn;
26};
27
Christoph Hellwig75ca5642021-07-29 08:48:45 +020028static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
29 fmode_t mode, unsigned int timeout)
Christoph Hellwig17cb9602018-03-13 17:28:41 +010030{
Christoph Hellwig75ca5642021-07-29 08:48:45 +020031 struct bsg_job *job;
32 struct request *rq;
33 struct bio *bio;
34 int ret;
35
Christoph Hellwig17cb9602018-03-13 17:28:41 +010036 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
37 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
38 return -EINVAL;
39 if (!capable(CAP_SYS_RAWIO))
40 return -EPERM;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010041
Christoph Hellwig75ca5642021-07-29 08:48:45 +020042 rq = blk_get_request(q, hdr->dout_xfer_len ?
43 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
44 if (IS_ERR(rq))
45 return PTR_ERR(rq);
46 rq->timeout = timeout;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010047
Christoph Hellwig75ca5642021-07-29 08:48:45 +020048 job = blk_mq_rq_to_pdu(rq);
Christoph Hellwig17cb9602018-03-13 17:28:41 +010049 job->request_len = hdr->request_len;
50 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
Christoph Hellwig75ca5642021-07-29 08:48:45 +020051 if (IS_ERR(job->request)) {
52 ret = PTR_ERR(job->request);
53 goto out_put_request;
54 }
zhong jiang47255492018-08-01 00:13:14 +080055
Christoph Hellwig972248e2019-01-29 09:32:03 +010056 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
Christoph Hellwigda6269d2021-06-24 14:39:34 +020057 job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
Christoph Hellwig972248e2019-01-29 09:32:03 +010058 if (IS_ERR(job->bidi_rq)) {
59 ret = PTR_ERR(job->bidi_rq);
Christoph Hellwig75ca5642021-07-29 08:48:45 +020060 goto out_free_job_request;
Christoph Hellwig972248e2019-01-29 09:32:03 +010061 }
62
63 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
64 uptr64(hdr->din_xferp), hdr->din_xfer_len,
65 GFP_KERNEL);
66 if (ret)
67 goto out_free_bidi_rq;
68
69 job->bidi_bio = job->bidi_rq->bio;
70 } else {
71 job->bidi_rq = NULL;
72 job->bidi_bio = NULL;
73 }
74
Christoph Hellwig659a3782021-07-31 09:40:27 +020075 ret = 0;
Christoph Hellwig75ca5642021-07-29 08:48:45 +020076 if (hdr->dout_xfer_len) {
77 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
78 hdr->dout_xfer_len, GFP_KERNEL);
79 } else if (hdr->din_xfer_len) {
80 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
81 hdr->din_xfer_len, GFP_KERNEL);
82 }
Christoph Hellwig972248e2019-01-29 09:32:03 +010083
Christoph Hellwig75ca5642021-07-29 08:48:45 +020084 if (ret)
85 goto out_unmap_bidi_rq;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010086
Christoph Hellwig75ca5642021-07-29 08:48:45 +020087 bio = rq->bio;
88 blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
Christoph Hellwig17cb9602018-03-13 17:28:41 +010089
90 /*
91 * The assignments below don't make much sense, but are kept for
92 * bug by bug backwards compatibility:
93 */
94 hdr->device_status = job->result & 0xff;
95 hdr->transport_status = host_byte(job->result);
Hannes Reinecke54c29082021-04-27 10:30:20 +020096 hdr->driver_status = 0;
Christoph Hellwig17cb9602018-03-13 17:28:41 +010097 hdr->info = 0;
98 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
99 hdr->info |= SG_INFO_CHECK;
100 hdr->response_len = 0;
101
102 if (job->result < 0) {
103 /* we're only returning the result field in the reply */
104 job->reply_len = sizeof(u32);
105 ret = job->result;
106 }
107
108 if (job->reply_len && hdr->response) {
109 int len = min(hdr->max_response_len, job->reply_len);
110
111 if (copy_to_user(uptr64(hdr->response), job->reply, len))
112 ret = -EFAULT;
113 else
114 hdr->response_len = len;
115 }
116
117 /* we assume all request payload was transferred, residual == 0 */
118 hdr->dout_resid = 0;
119
Christoph Hellwig972248e2019-01-29 09:32:03 +0100120 if (job->bidi_rq) {
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100121 unsigned int rsp_len = job->reply_payload.payload_len;
122
123 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
124 hdr->din_resid = 0;
125 else
126 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
127 } else {
128 hdr->din_resid = 0;
129 }
130
Christoph Hellwig75ca5642021-07-29 08:48:45 +0200131 blk_rq_unmap_user(bio);
132out_unmap_bidi_rq:
133 if (job->bidi_rq)
134 blk_rq_unmap_user(job->bidi_bio);
135out_free_bidi_rq:
136 if (job->bidi_rq)
137 blk_put_request(job->bidi_rq);
138out_free_job_request:
139 kfree(job->request);
140out_put_request:
141 blk_put_request(rq);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100142 return ret;
143}
144
Mike Christieaa387cc2011-07-31 22:05:09 +0200145/**
Benjamin Block50b4d482017-08-24 01:57:56 +0200146 * bsg_teardown_job - routine to teardown a bsg job
Bart Van Asscheaa981922018-01-09 10:11:00 -0800147 * @kref: kref inside bsg_job that is to be torn down
Mike Christieaa387cc2011-07-31 22:05:09 +0200148 */
Benjamin Block50b4d482017-08-24 01:57:56 +0200149static void bsg_teardown_job(struct kref *kref)
Mike Christieaa387cc2011-07-31 22:05:09 +0200150{
Johannes Thumshirnbf0f2d32016-11-17 10:31:18 +0100151 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
Christoph Hellwigef6fa642018-03-13 17:28:40 +0100152 struct request *rq = blk_mq_rq_from_pdu(job);
Johannes Thumshirnc00da4c2016-11-17 10:31:20 +0100153
Mike Christieaa387cc2011-07-31 22:05:09 +0200154 put_device(job->dev); /* release reference for the request */
155
156 kfree(job->request_payload.sg_list);
157 kfree(job->reply_payload.sg_list);
Benjamin Block50b4d482017-08-24 01:57:56 +0200158
Jens Axboecd2f0762018-10-24 07:11:39 -0600159 blk_mq_end_request(rq, BLK_STS_OK);
Mike Christieaa387cc2011-07-31 22:05:09 +0200160}
161
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100162void bsg_job_put(struct bsg_job *job)
163{
Benjamin Block50b4d482017-08-24 01:57:56 +0200164 kref_put(&job->kref, bsg_teardown_job);
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100165}
166EXPORT_SYMBOL_GPL(bsg_job_put);
167
168int bsg_job_get(struct bsg_job *job)
169{
170 return kref_get_unless_zero(&job->kref);
171}
172EXPORT_SYMBOL_GPL(bsg_job_get);
Mike Christieaa387cc2011-07-31 22:05:09 +0200173
174/**
175 * bsg_job_done - completion routine for bsg requests
176 * @job: bsg_job that is complete
177 * @result: job reply result
178 * @reply_payload_rcv_len: length of payload recvd
179 *
180 * The LLD should call this when the bsg job has completed.
181 */
182void bsg_job_done(struct bsg_job *job, int result,
183 unsigned int reply_payload_rcv_len)
184{
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200185 struct request *rq = blk_mq_rq_from_pdu(job);
186
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100187 job->result = result;
188 job->reply_payload_rcv_len = reply_payload_rcv_len;
Christoph Hellwig15f73f52020-06-11 08:44:47 +0200189 if (likely(!blk_should_fake_timeout(rq->q)))
190 blk_mq_complete_request(rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200191}
192EXPORT_SYMBOL_GPL(bsg_job_done);
193
194/**
Jens Axboecd2f0762018-10-24 07:11:39 -0600195 * bsg_complete - softirq done routine for destroying the bsg requests
Mike Christieaa387cc2011-07-31 22:05:09 +0200196 * @rq: BSG request that holds the job to be destroyed
197 */
Jens Axboecd2f0762018-10-24 07:11:39 -0600198static void bsg_complete(struct request *rq)
Mike Christieaa387cc2011-07-31 22:05:09 +0200199{
Benjamin Block50b4d482017-08-24 01:57:56 +0200200 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200201
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100202 bsg_job_put(job);
Mike Christieaa387cc2011-07-31 22:05:09 +0200203}
204
205static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
206{
207 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
208
209 BUG_ON(!req->nr_phys_segments);
210
Julia Lawallf952eef2020-09-20 13:26:18 +0200211 buf->sg_list = kmalloc(sz, GFP_KERNEL);
Mike Christieaa387cc2011-07-31 22:05:09 +0200212 if (!buf->sg_list)
213 return -ENOMEM;
214 sg_init_table(buf->sg_list, req->nr_phys_segments);
215 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
216 buf->payload_len = blk_rq_bytes(req);
217 return 0;
218}
219
220/**
Benjamin Block50b4d482017-08-24 01:57:56 +0200221 * bsg_prepare_job - create the bsg_job structure for the bsg request
Mike Christieaa387cc2011-07-31 22:05:09 +0200222 * @dev: device that is being sent the bsg request
223 * @req: BSG request that needs a job structure
224 */
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100225static bool bsg_prepare_job(struct device *dev, struct request *req)
Mike Christieaa387cc2011-07-31 22:05:09 +0200226{
Benjamin Block50b4d482017-08-24 01:57:56 +0200227 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Mike Christieaa387cc2011-07-31 22:05:09 +0200228 int ret;
229
Christoph Hellwig31156ec2018-03-13 17:28:39 +0100230 job->timeout = req->timeout;
Benjamin Block50b4d482017-08-24 01:57:56 +0200231
Mike Christieaa387cc2011-07-31 22:05:09 +0200232 if (req->bio) {
233 ret = bsg_map_buffer(&job->request_payload, req);
234 if (ret)
235 goto failjob_rls_job;
236 }
Christoph Hellwig972248e2019-01-29 09:32:03 +0100237 if (job->bidi_rq) {
238 ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200239 if (ret)
240 goto failjob_rls_rqst_payload;
241 }
242 job->dev = dev;
243 /* take a reference for the request */
244 get_device(job->dev);
Johannes Thumshirnbf0f2d32016-11-17 10:31:18 +0100245 kref_init(&job->kref);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100246 return true;
Mike Christieaa387cc2011-07-31 22:05:09 +0200247
248failjob_rls_rqst_payload:
249 kfree(job->request_payload.sg_list);
250failjob_rls_job:
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100251 job->result = -ENOMEM;
252 return false;
Mike Christieaa387cc2011-07-31 22:05:09 +0200253}
254
Mike Christieaa387cc2011-07-31 22:05:09 +0200255/**
Jens Axboecd2f0762018-10-24 07:11:39 -0600256 * bsg_queue_rq - generic handler for bsg requests
257 * @hctx: hardware queue
258 * @bd: queue data
Mike Christieaa387cc2011-07-31 22:05:09 +0200259 *
260 * On error the create_bsg_job function should return a -Exyz error value
Christoph Hellwig17d53632017-04-20 16:03:01 +0200261 * that will be set to ->result.
Mike Christieaa387cc2011-07-31 22:05:09 +0200262 *
263 * Drivers/subsys should pass this to the queue init function.
264 */
Jens Axboecd2f0762018-10-24 07:11:39 -0600265static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
266 const struct blk_mq_queue_data *bd)
Mike Christieaa387cc2011-07-31 22:05:09 +0200267{
Jens Axboecd2f0762018-10-24 07:11:39 -0600268 struct request_queue *q = hctx->queue;
Mike Christieaa387cc2011-07-31 22:05:09 +0200269 struct device *dev = q->queuedata;
Jens Axboecd2f0762018-10-24 07:11:39 -0600270 struct request *req = bd->rq;
Jens Axboe1028e4b2018-10-29 09:47:17 -0600271 struct bsg_set *bset =
272 container_of(q->tag_set, struct bsg_set, tag_set);
Bart Van Asschec44a4ed2019-12-17 16:23:29 -0800273 blk_status_t sts = BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200274 int ret;
275
Jens Axboecd2f0762018-10-24 07:11:39 -0600276 blk_mq_start_request(req);
277
Mike Christieaa387cc2011-07-31 22:05:09 +0200278 if (!get_device(dev))
Jens Axboecd2f0762018-10-24 07:11:39 -0600279 return BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200280
Jens Axboecd2f0762018-10-24 07:11:39 -0600281 if (!bsg_prepare_job(dev, req))
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000282 goto out;
Mike Christieaa387cc2011-07-31 22:05:09 +0200283
Jens Axboe1028e4b2018-10-29 09:47:17 -0600284 ret = bset->job_fn(blk_mq_rq_to_pdu(req));
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000285 if (!ret)
286 sts = BLK_STS_OK;
Mike Christieaa387cc2011-07-31 22:05:09 +0200287
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000288out:
Mike Christieaa387cc2011-07-31 22:05:09 +0200289 put_device(dev);
Martin Wilckd46fe2c2019-09-23 14:02:02 +0000290 return sts;
Mike Christieaa387cc2011-07-31 22:05:09 +0200291}
Mike Christieaa387cc2011-07-31 22:05:09 +0200292
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100293/* called right after the request is allocated for the request_queue */
Jens Axboecd2f0762018-10-24 07:11:39 -0600294static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
295 unsigned int hctx_idx, unsigned int numa_node)
Benjamin Block50b4d482017-08-24 01:57:56 +0200296{
297 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Benjamin Block50b4d482017-08-24 01:57:56 +0200298
Jens Axboecd2f0762018-10-24 07:11:39 -0600299 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100300 if (!job->reply)
Benjamin Block50b4d482017-08-24 01:57:56 +0200301 return -ENOMEM;
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200302 return 0;
303}
304
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100305/* called right before the request is given to the request_queue user */
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200306static void bsg_initialize_rq(struct request *req)
307{
308 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100309 void *reply = job->reply;
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200310
311 memset(job, 0, sizeof(*job));
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100312 job->reply = reply;
313 job->reply_len = SCSI_SENSE_BUFFERSIZE;
Benjamin Block50b4d482017-08-24 01:57:56 +0200314 job->dd_data = job + 1;
Benjamin Block50b4d482017-08-24 01:57:56 +0200315}
316
Jens Axboecd2f0762018-10-24 07:11:39 -0600317static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
318 unsigned int hctx_idx)
Benjamin Block50b4d482017-08-24 01:57:56 +0200319{
320 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Benjamin Block50b4d482017-08-24 01:57:56 +0200321
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100322 kfree(job->reply);
Benjamin Block50b4d482017-08-24 01:57:56 +0200323}
324
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600325void bsg_remove_queue(struct request_queue *q)
326{
327 if (q) {
Jens Axboe1028e4b2018-10-29 09:47:17 -0600328 struct bsg_set *bset =
329 container_of(q->tag_set, struct bsg_set, tag_set);
Jens Axboecd2f0762018-10-24 07:11:39 -0600330
Christoph Hellwigead09dd2021-07-29 08:48:42 +0200331 bsg_unregister_queue(bset->bd);
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600332 blk_cleanup_queue(q);
Jens Axboe1028e4b2018-10-29 09:47:17 -0600333 blk_mq_free_tag_set(&bset->tag_set);
334 kfree(bset);
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600335 }
336}
337EXPORT_SYMBOL_GPL(bsg_remove_queue);
338
Jens Axboecd2f0762018-10-24 07:11:39 -0600339static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
340{
Jens Axboe1028e4b2018-10-29 09:47:17 -0600341 struct bsg_set *bset =
342 container_of(rq->q->tag_set, struct bsg_set, tag_set);
Jens Axboecd2f0762018-10-24 07:11:39 -0600343
Jens Axboe1028e4b2018-10-29 09:47:17 -0600344 if (!bset->timeout_fn)
345 return BLK_EH_DONE;
346 return bset->timeout_fn(rq);
Jens Axboecd2f0762018-10-24 07:11:39 -0600347}
348
349static const struct blk_mq_ops bsg_mq_ops = {
350 .queue_rq = bsg_queue_rq,
351 .init_request = bsg_init_rq,
352 .exit_request = bsg_exit_rq,
353 .initialize_rq_fn = bsg_initialize_rq,
354 .complete = bsg_complete,
355 .timeout = bsg_timeout,
356};
357
Mike Christieaa387cc2011-07-31 22:05:09 +0200358/**
359 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
360 * @dev: device to attach bsg device to
Mike Christieaa387cc2011-07-31 22:05:09 +0200361 * @name: device to give bsg device
362 * @job_fn: bsg job handler
Bart Van Asschea0b77e32019-05-30 17:00:51 -0700363 * @timeout: timeout handler function pointer
Mike Christieaa387cc2011-07-31 22:05:09 +0200364 * @dd_job_size: size of LLD data needed for each job
Mike Christieaa387cc2011-07-31 22:05:09 +0200365 */
Christoph Hellwigc1225f02017-08-25 17:37:38 +0200366struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
Jens Axboe1028e4b2018-10-29 09:47:17 -0600367 bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
Mike Christieaa387cc2011-07-31 22:05:09 +0200368{
Jens Axboe1028e4b2018-10-29 09:47:17 -0600369 struct bsg_set *bset;
Jens Axboecd2f0762018-10-24 07:11:39 -0600370 struct blk_mq_tag_set *set;
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300371 struct request_queue *q;
Jens Axboecd2f0762018-10-24 07:11:39 -0600372 int ret = -ENOMEM;
Mike Christieaa387cc2011-07-31 22:05:09 +0200373
Jens Axboe1028e4b2018-10-29 09:47:17 -0600374 bset = kzalloc(sizeof(*bset), GFP_KERNEL);
375 if (!bset)
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300376 return ERR_PTR(-ENOMEM);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100377
Jens Axboe1028e4b2018-10-29 09:47:17 -0600378 bset->job_fn = job_fn;
379 bset->timeout_fn = timeout;
380
381 set = &bset->tag_set;
Xu Wang03ef5942020-08-17 02:16:49 +0000382 set->ops = &bsg_mq_ops;
Jens Axboecd2f0762018-10-24 07:11:39 -0600383 set->nr_hw_queues = 1;
384 set->queue_depth = 128;
385 set->numa_node = NUMA_NO_NODE;
386 set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
387 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
388 if (blk_mq_alloc_tag_set(set))
389 goto out_tag_set;
390
391 q = blk_mq_init_queue(set);
392 if (IS_ERR(q)) {
393 ret = PTR_ERR(q);
394 goto out_queue;
395 }
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300396
Mike Christieaa387cc2011-07-31 22:05:09 +0200397 q->queuedata = dev;
Mike Christieaa387cc2011-07-31 22:05:09 +0200398 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
399
Christoph Hellwig75ca5642021-07-29 08:48:45 +0200400 bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
Christoph Hellwigead09dd2021-07-29 08:48:42 +0200401 if (IS_ERR(bset->bd)) {
402 ret = PTR_ERR(bset->bd);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100403 goto out_cleanup_queue;
Mike Christieaa387cc2011-07-31 22:05:09 +0200404 }
405
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300406 return q;
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100407out_cleanup_queue:
408 blk_cleanup_queue(q);
Jens Axboecd2f0762018-10-24 07:11:39 -0600409out_queue:
410 blk_mq_free_tag_set(set);
411out_tag_set:
Jens Axboe1028e4b2018-10-29 09:47:17 -0600412 kfree(bset);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100413 return ERR_PTR(ret);
Mike Christieaa387cc2011-07-31 22:05:09 +0200414}
415EXPORT_SYMBOL_GPL(bsg_setup_queue);