blob: 132e657e2d913ca3fcff87c594de0957d8b6bd97 [file] [log] [blame]
Jens Axboe3d6392c2007-07-09 12:38:05 +02001/*
FUJITA Tomonori0c6a89b2007-07-29 23:00:46 +09002 * bsg.c - block layer implementation of the sg v4 interface
Jens Axboe3d6392c2007-07-09 12:38:05 +02003 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 */
Jens Axboe3d6392c2007-07-09 12:38:05 +020012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/file.h>
15#include <linux/blkdev.h>
16#include <linux/poll.h>
17#include <linux/cdev.h>
Randy Dunlapad5ebd22009-11-11 13:47:45 +010018#include <linux/jiffies.h>
Jens Axboe3d6392c2007-07-09 12:38:05 +020019#include <linux/percpu.h>
20#include <linux/uio.h>
FUJITA Tomonori598443a2007-07-23 09:33:26 +090021#include <linux/idr.h>
Jens Axboe3d6392c2007-07-09 12:38:05 +020022#include <linux/bsg.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Jens Axboe3d6392c2007-07-09 12:38:05 +020024
25#include <scsi/scsi.h>
26#include <scsi/scsi_ioctl.h>
27#include <scsi/scsi_cmnd.h>
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +020028#include <scsi/scsi_device.h>
29#include <scsi/scsi_driver.h>
Jens Axboe3d6392c2007-07-09 12:38:05 +020030#include <scsi/sg.h>
31
FUJITA Tomonori0ed081c2007-07-17 12:21:35 +020032#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
33#define BSG_VERSION "0.4"
Jens Axboe3d6392c2007-07-09 12:38:05 +020034
Johannes Thumshirn3124b652018-01-24 09:50:06 -070035#define bsg_dbg(bd, fmt, ...) \
36 pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
37
Jens Axboe3d6392c2007-07-09 12:38:05 +020038struct bsg_device {
Jens Axboe165125e2007-07-24 09:28:11 +020039 struct request_queue *queue;
Jens Axboe3d6392c2007-07-09 12:38:05 +020040 spinlock_t lock;
41 struct list_head busy_list;
42 struct list_head done_list;
43 struct hlist_node dev_list;
44 atomic_t ref_count;
Jens Axboe3d6392c2007-07-09 12:38:05 +020045 int queued_cmds;
46 int done_cmds;
Jens Axboe3d6392c2007-07-09 12:38:05 +020047 wait_queue_head_t wq_done;
48 wait_queue_head_t wq_free;
Kay Sievers3ada8b72009-01-06 10:44:43 -080049 char name[20];
Jens Axboe3d6392c2007-07-09 12:38:05 +020050 int max_queue;
51 unsigned long flags;
52};
53
54enum {
55 BSG_F_BLOCK = 1,
Jens Axboe3d6392c2007-07-09 12:38:05 +020056};
57
Jens Axboe5309cb32007-01-23 16:24:41 +010058#define BSG_DEFAULT_CMDS 64
FUJITA Tomonori292b7f22007-03-28 13:29:58 +020059#define BSG_MAX_DEVS 32768
Jens Axboe3d6392c2007-07-09 12:38:05 +020060
Jens Axboe3d6392c2007-07-09 12:38:05 +020061static DEFINE_MUTEX(bsg_mutex);
FUJITA Tomonori598443a2007-07-23 09:33:26 +090062static DEFINE_IDR(bsg_minor_idr);
Jens Axboe3d6392c2007-07-09 12:38:05 +020063
Jens Axboe25fd1642007-07-17 08:52:29 +020064#define BSG_LIST_ARRAY_SIZE 8
Jens Axboe25fd1642007-07-17 08:52:29 +020065static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
Jens Axboe3d6392c2007-07-09 12:38:05 +020066
67static struct class *bsg_class;
Jens Axboe46f6ef42007-07-17 08:56:10 +020068static int bsg_major;
Jens Axboe3d6392c2007-07-09 12:38:05 +020069
Jens Axboe5309cb32007-01-23 16:24:41 +010070static struct kmem_cache *bsg_cmd_cachep;
71
Jens Axboe3d6392c2007-07-09 12:38:05 +020072/*
73 * our internal command type
74 */
75struct bsg_command {
76 struct bsg_device *bd;
77 struct list_head list;
78 struct request *rq;
79 struct bio *bio;
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +020080 struct bio *bidi_bio;
Jens Axboe3d6392c2007-07-09 12:38:05 +020081 int err;
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +010082 struct sg_io_v4 hdr;
Jens Axboe3d6392c2007-07-09 12:38:05 +020083};
84
85static void bsg_free_command(struct bsg_command *bc)
86{
87 struct bsg_device *bd = bc->bd;
Jens Axboe3d6392c2007-07-09 12:38:05 +020088 unsigned long flags;
89
Jens Axboe5309cb32007-01-23 16:24:41 +010090 kmem_cache_free(bsg_cmd_cachep, bc);
Jens Axboe3d6392c2007-07-09 12:38:05 +020091
92 spin_lock_irqsave(&bd->lock, flags);
93 bd->queued_cmds--;
Jens Axboe3d6392c2007-07-09 12:38:05 +020094 spin_unlock_irqrestore(&bd->lock, flags);
95
96 wake_up(&bd->wq_free);
97}
98
FUJITA Tomonorie7d72172007-05-08 15:32:03 +020099static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200100{
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200101 struct bsg_command *bc = ERR_PTR(-EINVAL);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200102
103 spin_lock_irq(&bd->lock);
104
105 if (bd->queued_cmds >= bd->max_queue)
106 goto out;
107
Jens Axboe3d6392c2007-07-09 12:38:05 +0200108 bd->queued_cmds++;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200109 spin_unlock_irq(&bd->lock);
110
Jens Axboe25fd1642007-07-17 08:52:29 +0200111 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
Jens Axboe5309cb32007-01-23 16:24:41 +0100112 if (unlikely(!bc)) {
113 spin_lock_irq(&bd->lock);
FUJITA Tomonori7e75d732007-01-24 09:05:54 +0100114 bd->queued_cmds--;
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200115 bc = ERR_PTR(-ENOMEM);
FUJITA Tomonori7e75d732007-01-24 09:05:54 +0100116 goto out;
Jens Axboe5309cb32007-01-23 16:24:41 +0100117 }
118
Jens Axboe3d6392c2007-07-09 12:38:05 +0200119 bc->bd = bd;
120 INIT_LIST_HEAD(&bc->list);
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700121 bsg_dbg(bd, "returning free cmd %p\n", bc);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200122 return bc;
123out:
Jens Axboe3d6392c2007-07-09 12:38:05 +0200124 spin_unlock_irq(&bd->lock);
125 return bc;
126}
127
FUJITA Tomonori1c1133e2007-07-17 12:21:15 +0200128static inline struct hlist_head *bsg_dev_idx_hash(int index)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200129{
FUJITA Tomonori1c1133e2007-07-17 12:21:15 +0200130 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
Jens Axboe3d6392c2007-07-09 12:38:05 +0200131}
132
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100133#define uptr64(val) ((void __user *)(uintptr_t)(val))
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100134
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100135static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
136{
137 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
138 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
139 return -EINVAL;
140 return 0;
141}
142
143static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
144 fmode_t mode)
145{
146 struct scsi_request *sreq = scsi_req(rq);
147
148 sreq->cmd_len = hdr->request_len;
149 if (sreq->cmd_len > BLK_MAX_CDB) {
150 sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
151 if (!sreq->cmd)
FUJITA Tomonori9f5de6b2008-04-30 13:16:21 +0900152 return -ENOMEM;
153 }
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100154
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100155 if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100156 return -EFAULT;
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100157 if (blk_verify_command(sreq->cmd, mode))
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100158 return -EPERM;
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100159 return 0;
160}
161
162static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
163{
164 struct scsi_request *sreq = scsi_req(rq);
165 int ret = 0;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200166
167 /*
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100168 * fill in all the output members
Jens Axboe3d6392c2007-07-09 12:38:05 +0200169 */
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100170 hdr->device_status = sreq->result & 0xff;
171 hdr->transport_status = host_byte(sreq->result);
172 hdr->driver_status = driver_byte(sreq->result);
173 hdr->info = 0;
174 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
175 hdr->info |= SG_INFO_CHECK;
176 hdr->response_len = 0;
177
178 if (sreq->sense_len && hdr->response) {
179 int len = min_t(unsigned int, hdr->max_response_len,
180 sreq->sense_len);
181
182 if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
183 ret = -EFAULT;
184 else
185 hdr->response_len = len;
186 }
187
188 if (rq->next_rq) {
189 hdr->dout_resid = sreq->resid_len;
190 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
191 } else if (rq_data_dir(rq) == READ) {
192 hdr->din_resid = sreq->resid_len;
193 } else {
194 hdr->dout_resid = sreq->resid_len;
195 }
196
197 return ret;
198}
199
200static void bsg_scsi_free_rq(struct request *rq)
201{
202 scsi_req_free_cmd(scsi_req(rq));
203}
204
205static const struct bsg_ops bsg_scsi_ops = {
206 .check_proto = bsg_scsi_check_proto,
207 .fill_hdr = bsg_scsi_fill_hdr,
208 .complete_rq = bsg_scsi_complete_rq,
209 .free_rq = bsg_scsi_free_rq,
210};
211
212static struct request *
213bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
214{
215 struct request *rq, *next_rq = NULL;
216 int ret;
217
218 if (!q->bsg_dev.class_dev)
219 return ERR_PTR(-ENXIO);
220
221 if (hdr->guard != 'Q')
222 return ERR_PTR(-EINVAL);
223
224 ret = q->bsg_dev.ops->check_proto(hdr);
225 if (ret)
226 return ERR_PTR(ret);
227
228 rq = blk_get_request(q, hdr->dout_xfer_len ?
Christoph Hellwigff005a02018-05-09 09:54:05 +0200229 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100230 if (IS_ERR(rq))
231 return rq;
232
233 ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
234 if (ret)
235 goto out;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200236
Randy Dunlapad5ebd22009-11-11 13:47:45 +0100237 rq->timeout = msecs_to_jiffies(hdr->timeout);
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100238 if (!rq->timeout)
239 rq->timeout = q->sg_timeout;
240 if (!rq->timeout)
241 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
Linus Torvaldsf2f1fa72008-12-05 14:49:18 -0800242 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
243 rq->timeout = BLK_MIN_SG_TIMEOUT;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200244
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100245 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
247 ret = -EOPNOTSUPP;
248 goto out;
249 }
250
Christoph Hellwigff005a02018-05-09 09:54:05 +0200251 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
Joe Lawrencea492f072014-08-28 08:15:21 -0600252 if (IS_ERR(next_rq)) {
253 ret = PTR_ERR(next_rq);
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200254 goto out;
255 }
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200256
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100257 rq->next_rq = next_rq;
258 ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900259 hdr->din_xfer_len, GFP_KERNEL);
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200260 if (ret)
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100261 goto out_free_nextrq;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200262 }
263
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100264 if (hdr->dout_xfer_len) {
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100265 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
266 hdr->dout_xfer_len, GFP_KERNEL);
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100267 } else if (hdr->din_xfer_len) {
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100268 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
269 hdr->din_xfer_len, GFP_KERNEL);
270 } else {
271 ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200272 }
Boaz Harroshc1c20122009-02-03 07:47:29 +0100273
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100274 if (ret)
275 goto out_unmap_nextrq;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200276 return rq;
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100277
278out_unmap_nextrq:
279 if (rq->next_rq)
280 blk_rq_unmap_user(rq->next_rq->bio);
281out_free_nextrq:
282 if (rq->next_rq)
283 blk_put_request(rq->next_rq);
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200284out:
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100285 q->bsg_dev.ops->free_rq(rq);
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200286 blk_put_request(rq);
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200287 return ERR_PTR(ret);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200288}
289
290/*
291 * async completion call-back from the block layer, when scsi/ide/whatever
292 * calls end_that_request_last() on a request
293 */
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200294static void bsg_rq_end_io(struct request *rq, blk_status_t status)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200295{
296 struct bsg_command *bc = rq->end_io_data;
297 struct bsg_device *bd = bc->bd;
298 unsigned long flags;
299
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700300 bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
301 rq, bc, bc->bio);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200302
303 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
304
305 spin_lock_irqsave(&bd->lock, flags);
Jens Axboe25fd1642007-07-17 08:52:29 +0200306 list_move_tail(&bc->list, &bd->done_list);
307 bd->done_cmds++;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200308 spin_unlock_irqrestore(&bd->lock, flags);
Jens Axboe25fd1642007-07-17 08:52:29 +0200309
310 wake_up(&bd->wq_done);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200311}
312
313/*
314 * do final setup of a 'bc' and submit the matching 'rq' to the block
315 * layer for io
316 */
Jens Axboe165125e2007-07-24 09:28:11 +0200317static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
Jens Axboe3d6392c2007-07-09 12:38:05 +0200318 struct bsg_command *bc, struct request *rq)
319{
Boaz Harrosh05378942009-03-24 12:23:40 +0100320 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
321
Jens Axboe3d6392c2007-07-09 12:38:05 +0200322 /*
323 * add bc command to busy queue and submit rq for io
324 */
325 bc->rq = rq;
326 bc->bio = rq->bio;
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200327 if (rq->next_rq)
328 bc->bidi_bio = rq->next_rq->bio;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200329 bc->hdr.duration = jiffies;
330 spin_lock_irq(&bd->lock);
331 list_add_tail(&bc->list, &bd->busy_list);
332 spin_unlock_irq(&bd->lock);
333
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700334 bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200335
336 rq->end_io_data = bc;
Boaz Harrosh05378942009-03-24 12:23:40 +0100337 blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200338}
339
Jens Axboe25fd1642007-07-17 08:52:29 +0200340static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200341{
342 struct bsg_command *bc = NULL;
343
344 spin_lock_irq(&bd->lock);
345 if (bd->done_cmds) {
FUJITA Tomonori43ac9e62008-03-31 10:03:40 +0900346 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
Jens Axboe25fd1642007-07-17 08:52:29 +0200347 list_del(&bc->list);
348 bd->done_cmds--;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200349 }
350 spin_unlock_irq(&bd->lock);
351
352 return bc;
353}
354
355/*
356 * Get a finished command from the done list
357 */
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200358static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200359{
360 struct bsg_command *bc;
361 int ret;
362
363 do {
364 bc = bsg_next_done_cmd(bd);
365 if (bc)
366 break;
367
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200368 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
369 bc = ERR_PTR(-EAGAIN);
370 break;
371 }
372
373 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200374 if (ret) {
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200375 bc = ERR_PTR(-ERESTARTSYS);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200376 break;
377 }
378 } while (1);
379
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700380 bsg_dbg(bd, "returning done %p\n", bc);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200381
382 return bc;
383}
384
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100385static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200386 struct bio *bio, struct bio *bidi_bio)
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100387{
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100388 int ret;
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100389
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100390 ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100391
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200392 if (rq->next_rq) {
393 blk_rq_unmap_user(bidi_bio);
394 blk_put_request(rq->next_rq);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100395 }
James Bottomley2d507a02007-12-29 10:59:53 -0600396
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100397 blk_rq_unmap_user(bio);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100398 rq->q->bsg_dev.ops->free_rq(rq);
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100399 blk_put_request(rq);
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100400 return ret;
401}
402
Peter Zijlstra2c561242015-02-03 12:55:31 +0100403static bool bsg_complete(struct bsg_device *bd)
404{
405 bool ret = false;
406 bool spin;
407
408 do {
409 spin_lock_irq(&bd->lock);
410
411 BUG_ON(bd->done_cmds > bd->queued_cmds);
412
413 /*
414 * All commands consumed.
415 */
416 if (bd->done_cmds == bd->queued_cmds)
417 ret = true;
418
419 spin = !test_bit(BSG_F_BLOCK, &bd->flags);
420
421 spin_unlock_irq(&bd->lock);
422 } while (!ret && spin);
423
424 return ret;
425}
426
Jens Axboe3d6392c2007-07-09 12:38:05 +0200427static int bsg_complete_all_commands(struct bsg_device *bd)
428{
429 struct bsg_command *bc;
430 int ret, tret;
431
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700432 bsg_dbg(bd, "entered\n");
Jens Axboe3d6392c2007-07-09 12:38:05 +0200433
Jens Axboe3d6392c2007-07-09 12:38:05 +0200434 /*
435 * wait for all commands to complete
436 */
Peter Zijlstra2c561242015-02-03 12:55:31 +0100437 io_wait_event(bd->wq_done, bsg_complete(bd));
Jens Axboe3d6392c2007-07-09 12:38:05 +0200438
439 /*
440 * discard done commands
441 */
442 ret = 0;
443 do {
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200444 spin_lock_irq(&bd->lock);
445 if (!bd->queued_cmds) {
446 spin_unlock_irq(&bd->lock);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200447 break;
448 }
FUJITA Tomonoriefba1a32007-06-07 13:24:06 +0200449 spin_unlock_irq(&bd->lock);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200450
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200451 bc = bsg_get_done_cmd(bd);
452 if (IS_ERR(bc))
453 break;
454
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200455 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
456 bc->bidi_bio);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200457 if (!ret)
458 ret = tret;
459
460 bsg_free_command(bc);
461 } while (1);
462
463 return ret;
464}
465
Jens Axboe25fd1642007-07-17 08:52:29 +0200466static int
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200467__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
468 const struct iovec *iov, ssize_t *bytes_read)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200469{
470 struct bsg_command *bc;
471 int nr_commands, ret;
472
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100473 if (count % sizeof(struct sg_io_v4))
Jens Axboe3d6392c2007-07-09 12:38:05 +0200474 return -EINVAL;
475
476 ret = 0;
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100477 nr_commands = count / sizeof(struct sg_io_v4);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200478 while (nr_commands) {
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200479 bc = bsg_get_done_cmd(bd);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200480 if (IS_ERR(bc)) {
481 ret = PTR_ERR(bc);
482 break;
483 }
484
485 /*
486 * this is the only case where we need to copy data back
487 * after completing the request. so do that here,
488 * bsg_complete_work() cannot do that for us
489 */
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200490 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
491 bc->bidi_bio);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200492
Jens Axboe25fd1642007-07-17 08:52:29 +0200493 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
Jens Axboe3d6392c2007-07-09 12:38:05 +0200494 ret = -EFAULT;
495
496 bsg_free_command(bc);
497
498 if (ret)
499 break;
500
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100501 buf += sizeof(struct sg_io_v4);
502 *bytes_read += sizeof(struct sg_io_v4);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200503 nr_commands--;
504 }
505
506 return ret;
507}
508
509static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
510{
511 if (file->f_flags & O_NONBLOCK)
512 clear_bit(BSG_F_BLOCK, &bd->flags);
513 else
514 set_bit(BSG_F_BLOCK, &bd->flags);
515}
516
Jens Axboe25fd1642007-07-17 08:52:29 +0200517/*
518 * Check if the error is a "real" error that we should return.
519 */
Jens Axboe3d6392c2007-07-09 12:38:05 +0200520static inline int err_block_err(int ret)
521{
522 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
523 return 1;
524
525 return 0;
526}
527
528static ssize_t
529bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
530{
531 struct bsg_device *bd = file->private_data;
532 int ret;
533 ssize_t bytes_read;
534
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700535 bsg_dbg(bd, "read %zd bytes\n", count);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200536
537 bsg_set_block(bd, file);
Adel Gadllah0b07de82008-06-26 13:48:27 +0200538
Jens Axboe3d6392c2007-07-09 12:38:05 +0200539 bytes_read = 0;
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200540 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200541 *ppos = bytes_read;
542
Namhyung Kim44194e32011-06-20 13:27:44 +0200543 if (!bytes_read || err_block_err(ret))
Jens Axboe3d6392c2007-07-09 12:38:05 +0200544 bytes_read = ret;
545
546 return bytes_read;
547}
548
Jens Axboe25fd1642007-07-17 08:52:29 +0200549static int __bsg_write(struct bsg_device *bd, const char __user *buf,
Christoph Hellwigf00c4d82017-11-05 10:36:31 +0300550 size_t count, ssize_t *bytes_written, fmode_t mode)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200551{
552 struct bsg_command *bc;
553 struct request *rq;
554 int ret, nr_commands;
555
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100556 if (count % sizeof(struct sg_io_v4))
Jens Axboe3d6392c2007-07-09 12:38:05 +0200557 return -EINVAL;
558
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100559 nr_commands = count / sizeof(struct sg_io_v4);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200560 rq = NULL;
561 bc = NULL;
562 ret = 0;
563 while (nr_commands) {
Jens Axboe165125e2007-07-24 09:28:11 +0200564 struct request_queue *q = bd->queue;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200565
FUJITA Tomonorie7d72172007-05-08 15:32:03 +0200566 bc = bsg_alloc_command(bd);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200567 if (IS_ERR(bc)) {
568 ret = PTR_ERR(bc);
569 bc = NULL;
570 break;
571 }
572
Jens Axboe3d6392c2007-07-09 12:38:05 +0200573 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
574 ret = -EFAULT;
575 break;
576 }
577
578 /*
579 * get a request, fill in the blanks, and add to request queue
580 */
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100581 rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200582 if (IS_ERR(rq)) {
583 ret = PTR_ERR(rq);
584 rq = NULL;
585 break;
586 }
587
588 bsg_add_command(bd, q, bc, rq);
589 bc = NULL;
590 rq = NULL;
591 nr_commands--;
FUJITA Tomonori70e36ec2006-12-20 11:20:15 +0100592 buf += sizeof(struct sg_io_v4);
Jens Axboe25fd1642007-07-17 08:52:29 +0200593 *bytes_written += sizeof(struct sg_io_v4);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200594 }
595
Jens Axboe3d6392c2007-07-09 12:38:05 +0200596 if (bc)
597 bsg_free_command(bc);
598
599 return ret;
600}
601
602static ssize_t
603bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
604{
605 struct bsg_device *bd = file->private_data;
Jens Axboe25fd1642007-07-17 08:52:29 +0200606 ssize_t bytes_written;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200607 int ret;
608
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700609 bsg_dbg(bd, "write %zd bytes\n", count);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200610
Al Virodb68ce12017-03-20 21:08:07 -0400611 if (unlikely(uaccess_kernel()))
Al Viro128394e2016-12-16 13:42:06 -0500612 return -EINVAL;
613
Jens Axboe3d6392c2007-07-09 12:38:05 +0200614 bsg_set_block(bd, file);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200615
Jens Axboe25fd1642007-07-17 08:52:29 +0200616 bytes_written = 0;
Christoph Hellwigf00c4d82017-11-05 10:36:31 +0300617 ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
FUJITA Tomonoriabf54392008-08-16 14:10:05 +0900618
Jens Axboe25fd1642007-07-17 08:52:29 +0200619 *ppos = bytes_written;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200620
621 /*
622 * return bytes written on non-fatal errors
623 */
Namhyung Kim44194e32011-06-20 13:27:44 +0200624 if (!bytes_written || err_block_err(ret))
Jens Axboe25fd1642007-07-17 08:52:29 +0200625 bytes_written = ret;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200626
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700627 bsg_dbg(bd, "returning %zd\n", bytes_written);
Jens Axboe25fd1642007-07-17 08:52:29 +0200628 return bytes_written;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200629}
630
Jens Axboe3d6392c2007-07-09 12:38:05 +0200631static struct bsg_device *bsg_alloc_device(void)
632{
Jens Axboe3d6392c2007-07-09 12:38:05 +0200633 struct bsg_device *bd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200634
635 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
636 if (unlikely(!bd))
637 return NULL;
638
639 spin_lock_init(&bd->lock);
640
Jens Axboe5309cb32007-01-23 16:24:41 +0100641 bd->max_queue = BSG_DEFAULT_CMDS;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200642
643 INIT_LIST_HEAD(&bd->busy_list);
644 INIT_LIST_HEAD(&bd->done_list);
645 INIT_HLIST_NODE(&bd->dev_list);
646
647 init_waitqueue_head(&bd->wq_free);
648 init_waitqueue_head(&bd->wq_done);
649 return bd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200650}
651
652static int bsg_put_device(struct bsg_device *bd)
653{
FUJITA Tomonori97f46ae2008-04-19 00:43:14 +0900654 int ret = 0, do_free;
655 struct request_queue *q = bd->queue;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200656
657 mutex_lock(&bsg_mutex);
658
FUJITA Tomonori97f46ae2008-04-19 00:43:14 +0900659 do_free = atomic_dec_and_test(&bd->ref_count);
FUJITA Tomonori3f27e3e2008-05-29 07:56:55 +0900660 if (!do_free) {
661 mutex_unlock(&bsg_mutex);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200662 goto out;
FUJITA Tomonori3f27e3e2008-05-29 07:56:55 +0900663 }
664
665 hlist_del(&bd->dev_list);
666 mutex_unlock(&bsg_mutex);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200667
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700668 bsg_dbg(bd, "tearing down\n");
Jens Axboe3d6392c2007-07-09 12:38:05 +0200669
670 /*
671 * close can always block
672 */
673 set_bit(BSG_F_BLOCK, &bd->flags);
674
675 /*
676 * correct error detection baddies here again. it's the responsibility
677 * of the app to properly reap commands before close() if it wants
678 * fool-proof error detection
679 */
680 ret = bsg_complete_all_commands(bd);
681
Jens Axboe5309cb32007-01-23 16:24:41 +0100682 kfree(bd);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200683out:
FUJITA Tomonori97f46ae2008-04-19 00:43:14 +0900684 if (do_free)
685 blk_put_queue(q);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200686 return ret;
687}
688
689static struct bsg_device *bsg_add_device(struct inode *inode,
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200690 struct request_queue *rq,
Jens Axboe3d6392c2007-07-09 12:38:05 +0200691 struct file *file)
692{
Jens Axboe25fd1642007-07-17 08:52:29 +0200693 struct bsg_device *bd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200694 unsigned char buf[32];
Bart Van Assched9f97262017-05-31 14:43:47 -0700695
Tejun Heo09ac46c2011-12-14 00:33:38 +0100696 if (!blk_get_queue(rq))
FUJITA Tomonoric3ff1b92008-03-31 10:03:39 +0900697 return ERR_PTR(-ENXIO);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200698
699 bd = bsg_alloc_device();
FUJITA Tomonoric3ff1b92008-03-31 10:03:39 +0900700 if (!bd) {
701 blk_put_queue(rq);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200702 return ERR_PTR(-ENOMEM);
FUJITA Tomonoric3ff1b92008-03-31 10:03:39 +0900703 }
Jens Axboe3d6392c2007-07-09 12:38:05 +0200704
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200705 bd->queue = rq;
Adel Gadllah0b07de82008-06-26 13:48:27 +0200706
Jens Axboe3d6392c2007-07-09 12:38:05 +0200707 bsg_set_block(bd, file);
708
709 atomic_set(&bd->ref_count, 1);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200710 mutex_lock(&bsg_mutex);
FUJITA Tomonori842ea772008-03-31 10:03:41 +0900711 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
Jens Axboe3d6392c2007-07-09 12:38:05 +0200712
Kay Sievers3ada8b72009-01-06 10:44:43 -0800713 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
Johannes Thumshirn3124b652018-01-24 09:50:06 -0700714 bsg_dbg(bd, "bound to <%s>, max queue %d\n",
FUJITA Tomonori9e69fbb2006-12-20 11:18:22 +0100715 format_dev_t(buf, inode->i_rdev), bd->max_queue);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200716
717 mutex_unlock(&bsg_mutex);
718 return bd;
719}
720
FUJITA Tomonori842ea772008-03-31 10:03:41 +0900721static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200722{
FUJITA Tomonori43ac9e62008-03-31 10:03:40 +0900723 struct bsg_device *bd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200724
725 mutex_lock(&bsg_mutex);
726
Sasha Levinb67bfe02013-02-27 17:06:00 -0800727 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
FUJITA Tomonori842ea772008-03-31 10:03:41 +0900728 if (bd->queue == q) {
Jens Axboe3d6392c2007-07-09 12:38:05 +0200729 atomic_inc(&bd->ref_count);
FUJITA Tomonori43ac9e62008-03-31 10:03:40 +0900730 goto found;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200731 }
Jens Axboe3d6392c2007-07-09 12:38:05 +0200732 }
FUJITA Tomonori43ac9e62008-03-31 10:03:40 +0900733 bd = NULL;
734found:
Jens Axboe3d6392c2007-07-09 12:38:05 +0200735 mutex_unlock(&bsg_mutex);
736 return bd;
737}
738
739static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
740{
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900741 struct bsg_device *bd;
742 struct bsg_class_device *bcd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200743
Jens Axboe3d6392c2007-07-09 12:38:05 +0200744 /*
745 * find the class device
746 */
Jens Axboe3d6392c2007-07-09 12:38:05 +0200747 mutex_lock(&bsg_mutex);
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900748 bcd = idr_find(&bsg_minor_idr, iminor(inode));
Jens Axboe3d6392c2007-07-09 12:38:05 +0200749 mutex_unlock(&bsg_mutex);
750
751 if (!bcd)
752 return ERR_PTR(-ENODEV);
753
FUJITA Tomonori842ea772008-03-31 10:03:41 +0900754 bd = __bsg_get_device(iminor(inode), bcd->queue);
FUJITA Tomonorid45ac4f2008-03-31 10:03:38 +0900755 if (bd)
756 return bd;
757
758 bd = bsg_add_device(inode, bcd->queue, file);
FUJITA Tomonorid45ac4f2008-03-31 10:03:38 +0900759
760 return bd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200761}
762
763static int bsg_open(struct inode *inode, struct file *file)
764{
Jonathan Corbet75bd2ef2008-05-15 09:09:23 -0600765 struct bsg_device *bd;
766
Jonathan Corbet75bd2ef2008-05-15 09:09:23 -0600767 bd = bsg_get_device(inode, file);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200768
769 if (IS_ERR(bd))
770 return PTR_ERR(bd);
771
772 file->private_data = bd;
773 return 0;
774}
775
776static int bsg_release(struct inode *inode, struct file *file)
777{
778 struct bsg_device *bd = file->private_data;
779
780 file->private_data = NULL;
781 return bsg_put_device(bd);
782}
783
Al Viro1771e702017-07-02 23:03:13 -0400784static __poll_t bsg_poll(struct file *file, poll_table *wait)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200785{
786 struct bsg_device *bd = file->private_data;
Al Viro1771e702017-07-02 23:03:13 -0400787 __poll_t mask = 0;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200788
789 poll_wait(file, &bd->wq_done, wait);
790 poll_wait(file, &bd->wq_free, wait);
791
792 spin_lock_irq(&bd->lock);
793 if (!list_empty(&bd->done_list))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800794 mask |= EPOLLIN | EPOLLRDNORM;
Namhyung Kim80ceb052011-06-20 13:27:44 +0200795 if (bd->queued_cmds < bd->max_queue)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800796 mask |= EPOLLOUT;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200797 spin_unlock_irq(&bd->lock);
798
799 return mask;
800}
801
Jens Axboe25fd1642007-07-17 08:52:29 +0200802static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200803{
804 struct bsg_device *bd = file->private_data;
805 int __user *uarg = (int __user *) arg;
James Bottomley2d507a02007-12-29 10:59:53 -0600806 int ret;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200807
Jens Axboe3d6392c2007-07-09 12:38:05 +0200808 switch (cmd) {
809 /*
810 * our own ioctls
811 */
812 case SG_GET_COMMAND_Q:
813 return put_user(bd->max_queue, uarg);
Jens Axboe5309cb32007-01-23 16:24:41 +0100814 case SG_SET_COMMAND_Q: {
Jens Axboe3d6392c2007-07-09 12:38:05 +0200815 int queue;
816
817 if (get_user(queue, uarg))
818 return -EFAULT;
Jens Axboe5309cb32007-01-23 16:24:41 +0100819 if (queue < 1)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200820 return -EINVAL;
821
Jens Axboe5309cb32007-01-23 16:24:41 +0100822 spin_lock_irq(&bd->lock);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200823 bd->max_queue = queue;
Jens Axboe5309cb32007-01-23 16:24:41 +0100824 spin_unlock_irq(&bd->lock);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200825 return 0;
826 }
827
828 /*
829 * SCSI/sg ioctls
830 */
831 case SG_GET_VERSION_NUM:
832 case SCSI_IOCTL_GET_IDLUN:
833 case SCSI_IOCTL_GET_BUS_NUMBER:
834 case SG_SET_TIMEOUT:
835 case SG_GET_TIMEOUT:
836 case SG_GET_RESERVED_SIZE:
837 case SG_SET_RESERVED_SIZE:
838 case SG_EMULATED_HOST:
Jens Axboe3d6392c2007-07-09 12:38:05 +0200839 case SCSI_IOCTL_SEND_COMMAND: {
840 void __user *uarg = (void __user *) arg;
Al Viro74f3c8a2007-08-27 15:38:10 -0400841 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200842 }
FUJITA Tomonori10e88552006-12-20 11:20:57 +0100843 case SG_IO: {
844 struct request *rq;
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200845 struct bio *bio, *bidi_bio = NULL;
FUJITA Tomonori10e88552006-12-20 11:20:57 +0100846 struct sg_io_v4 hdr;
Boaz Harrosh05378942009-03-24 12:23:40 +0100847 int at_head;
FUJITA Tomonori10e88552006-12-20 11:20:57 +0100848
849 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
850 return -EFAULT;
851
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100852 rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
FUJITA Tomonori10e88552006-12-20 11:20:57 +0100853 if (IS_ERR(rq))
854 return PTR_ERR(rq);
855
856 bio = rq->bio;
FUJITA Tomonori2c9ecdf2007-07-16 08:52:15 +0200857 if (rq->next_rq)
858 bidi_bio = rq->next_rq->bio;
Boaz Harrosh05378942009-03-24 12:23:40 +0100859
860 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
861 blk_execute_rq(bd->queue, NULL, rq, at_head);
James Bottomley2d507a02007-12-29 10:59:53 -0600862 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
FUJITA Tomonori10e88552006-12-20 11:20:57 +0100863
864 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
865 return -EFAULT;
Jens Axboeb711afa2006-12-20 11:25:23 +0100866
James Bottomley2d507a02007-12-29 10:59:53 -0600867 return ret;
FUJITA Tomonori10e88552006-12-20 11:20:57 +0100868 }
Jens Axboe3d6392c2007-07-09 12:38:05 +0200869 default:
Jens Axboe3d6392c2007-07-09 12:38:05 +0200870 return -ENOTTY;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200871 }
872}
873
Arjan van de Ven7344be02007-10-15 11:01:53 +0200874static const struct file_operations bsg_fops = {
Jens Axboe3d6392c2007-07-09 12:38:05 +0200875 .read = bsg_read,
876 .write = bsg_write,
877 .poll = bsg_poll,
878 .open = bsg_open,
879 .release = bsg_release,
Jens Axboe25fd1642007-07-17 08:52:29 +0200880 .unlocked_ioctl = bsg_ioctl,
Jens Axboe3d6392c2007-07-09 12:38:05 +0200881 .owner = THIS_MODULE,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200882 .llseek = default_llseek,
Jens Axboe3d6392c2007-07-09 12:38:05 +0200883};
884
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200885void bsg_unregister_queue(struct request_queue *q)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200886{
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200887 struct bsg_class_device *bcd = &q->bsg_dev;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200888
FUJITA Tomonoridf468822007-07-21 13:23:25 +0900889 if (!bcd->class_dev)
890 return;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200891
892 mutex_lock(&bsg_mutex);
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900893 idr_remove(&bsg_minor_idr, bcd->minor);
Stanislaw Gruszka37b40ad2012-02-08 20:02:03 +0100894 if (q->kobj.sd)
895 sysfs_remove_link(&q->kobj, "bsg");
Tony Jonesee959b02008-02-22 00:13:36 +0100896 device_unregister(bcd->class_dev);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200897 bcd->class_dev = NULL;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200898 mutex_unlock(&bsg_mutex);
899}
FUJITA Tomonori4cf07232007-03-30 11:19:39 +0200900EXPORT_SYMBOL_GPL(bsg_unregister_queue);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200901
FUJITA Tomonori97f46ae2008-04-19 00:43:14 +0900902int bsg_register_queue(struct request_queue *q, struct device *parent,
Christoph Hellwig5de815a2018-05-29 08:40:23 +0200903 const char *name, const struct bsg_ops *ops)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200904{
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900905 struct bsg_class_device *bcd;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200906 dev_t dev;
Tejun Heobab998d2013-02-27 17:03:57 -0800907 int ret;
Tony Jonesee959b02008-02-22 00:13:36 +0100908 struct device *class_dev = NULL;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200909
910 /*
911 * we need a proper transport to send commands, not a stacked device
912 */
Jens Axboe49fd5242014-04-16 10:57:18 -0600913 if (!queue_is_rq_based(q))
Jens Axboe3d6392c2007-07-09 12:38:05 +0200914 return 0;
915
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200916 bcd = &q->bsg_dev;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200917 memset(bcd, 0, sizeof(*bcd));
Jens Axboe3d6392c2007-07-09 12:38:05 +0200918
919 mutex_lock(&bsg_mutex);
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900920
Tejun Heobab998d2013-02-27 17:03:57 -0800921 ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
922 if (ret < 0) {
923 if (ret == -ENOSPC) {
924 printk(KERN_ERR "bsg: too many bsg devices\n");
925 ret = -EINVAL;
926 }
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900927 goto unlock;
928 }
929
Tejun Heobab998d2013-02-27 17:03:57 -0800930 bcd->minor = ret;
FUJITA Tomonorid351af02007-07-09 12:40:35 +0200931 bcd->queue = q;
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100932 bcd->ops = ops;
Jens Axboe46f6ef42007-07-17 08:56:10 +0200933 dev = MKDEV(bsg_major, bcd->minor);
Christoph Hellwig5de815a2018-05-29 08:40:23 +0200934 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +0200935 if (IS_ERR(class_dev)) {
936 ret = PTR_ERR(class_dev);
Christoph Hellwig5de815a2018-05-29 08:40:23 +0200937 goto idr_remove;
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +0200938 }
939 bcd->class_dev = class_dev;
940
Linus Torvaldsabce8912007-07-16 11:18:23 -0700941 if (q->kobj.sd) {
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +0200942 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
943 if (ret)
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900944 goto unregister_class_dev;
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +0200945 }
946
Jens Axboe3d6392c2007-07-09 12:38:05 +0200947 mutex_unlock(&bsg_mutex);
948 return 0;
James Bottomley6826ee42007-07-20 16:50:10 -0500949
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900950unregister_class_dev:
Tony Jonesee959b02008-02-22 00:13:36 +0100951 device_unregister(class_dev);
Christoph Hellwig5de815a2018-05-29 08:40:23 +0200952idr_remove:
Tejun Heobab998d2013-02-27 17:03:57 -0800953 idr_remove(&bsg_minor_idr, bcd->minor);
FUJITA Tomonori598443a2007-07-23 09:33:26 +0900954unlock:
Jens Axboe264a0472007-01-23 16:30:17 +0100955 mutex_unlock(&bsg_mutex);
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +0200956 return ret;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200957}
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100958
959int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
960{
961 if (!blk_queue_scsi_passthrough(q)) {
962 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
963 return -EINVAL;
964 }
965
Christoph Hellwig5de815a2018-05-29 08:40:23 +0200966 return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100967}
968EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
Jens Axboe3d6392c2007-07-09 12:38:05 +0200969
Greg Kroah-Hartman7e7654a2007-09-12 15:06:57 -0700970static struct cdev bsg_cdev;
FUJITA Tomonori292b7f22007-03-28 13:29:58 +0200971
Al Viro2c9ede52011-07-23 20:24:48 -0400972static char *bsg_devnode(struct device *dev, umode_t *mode)
Kay Sievers2bdf9142009-04-30 15:23:42 +0200973{
974 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
975}
976
Jens Axboe3d6392c2007-07-09 12:38:05 +0200977static int __init bsg_init(void)
978{
979 int ret, i;
Jens Axboe46f6ef42007-07-17 08:56:10 +0200980 dev_t devid;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200981
Jens Axboe5309cb32007-01-23 16:24:41 +0100982 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
Paul Mundt20c2df82007-07-20 10:11:58 +0900983 sizeof(struct bsg_command), 0, 0, NULL);
Jens Axboe5309cb32007-01-23 16:24:41 +0100984 if (!bsg_cmd_cachep) {
985 printk(KERN_ERR "bsg: failed creating slab cache\n");
986 return -ENOMEM;
987 }
988
Jens Axboe25fd1642007-07-17 08:52:29 +0200989 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
Jens Axboe3d6392c2007-07-09 12:38:05 +0200990 INIT_HLIST_HEAD(&bsg_device_list[i]);
991
992 bsg_class = class_create(THIS_MODULE, "bsg");
Jens Axboe5309cb32007-01-23 16:24:41 +0100993 if (IS_ERR(bsg_class)) {
FUJITA Tomonori9b9f7702007-07-17 12:20:46 +0200994 ret = PTR_ERR(bsg_class);
995 goto destroy_kmemcache;
Jens Axboe5309cb32007-01-23 16:24:41 +0100996 }
Kay Sieverse454cea2009-09-18 23:01:12 +0200997 bsg_class->devnode = bsg_devnode;
Jens Axboe3d6392c2007-07-09 12:38:05 +0200998
Jens Axboe46f6ef42007-07-17 08:56:10 +0200999 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
FUJITA Tomonori9b9f7702007-07-17 12:20:46 +02001000 if (ret)
1001 goto destroy_bsg_class;
Jens Axboe3d6392c2007-07-09 12:38:05 +02001002
Jens Axboe46f6ef42007-07-17 08:56:10 +02001003 bsg_major = MAJOR(devid);
1004
FUJITA Tomonori292b7f22007-03-28 13:29:58 +02001005 cdev_init(&bsg_cdev, &bsg_fops);
Jens Axboe46f6ef42007-07-17 08:56:10 +02001006 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
FUJITA Tomonori9b9f7702007-07-17 12:20:46 +02001007 if (ret)
1008 goto unregister_chrdev;
FUJITA Tomonori292b7f22007-03-28 13:29:58 +02001009
Jens Axboe5d3a8cd2007-07-17 15:10:09 +02001010 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
FUJITA Tomonori0ed081c2007-07-17 12:21:35 +02001011 " loaded (major %d)\n", bsg_major);
Jens Axboe3d6392c2007-07-09 12:38:05 +02001012 return 0;
FUJITA Tomonori9b9f7702007-07-17 12:20:46 +02001013unregister_chrdev:
1014 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1015destroy_bsg_class:
1016 class_destroy(bsg_class);
1017destroy_kmemcache:
1018 kmem_cache_destroy(bsg_cmd_cachep);
1019 return ret;
Jens Axboe3d6392c2007-07-09 12:38:05 +02001020}
1021
1022MODULE_AUTHOR("Jens Axboe");
FUJITA Tomonori0ed081c2007-07-17 12:21:35 +02001023MODULE_DESCRIPTION(BSG_DESCRIPTION);
Jens Axboe3d6392c2007-07-09 12:38:05 +02001024MODULE_LICENSE("GPL");
1025
FUJITA Tomonori4e2872d2007-03-28 13:29:24 +02001026device_initcall(bsg_init);