blob: f07a8a614738201df4b6e3273a2d27c4963a2417 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Joe Perchesd236cd02013-02-01 14:33:58 -080033#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000034
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080043
Arun Sharma600634972011-07-26 16:09:06 -070044#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010049#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080050#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090051#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080052
Roland Dreieraef9ec32005-11-02 14:07:13 -080053#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020057#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080059
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
David Dillow49248642011-01-14 18:23:24 -050065static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050067static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +020069static bool prefer_fr;
Bart Van Asscheb1b88542014-05-20 15:06:41 +020070static bool register_always;
Roland Dreieraef9ec32005-11-02 14:07:13 -080071static int topspin_workarounds = 1;
72
David Dillow49248642011-01-14 18:23:24 -050073module_param(srp_sg_tablesize, uint, 0444);
74MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
75
76module_param(cmd_sg_entries, uint, 0444);
77MODULE_PARM_DESC(cmd_sg_entries,
78 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
79
David Dillowc07d4242011-01-16 13:57:10 -050080module_param(indirect_sg_entries, uint, 0444);
81MODULE_PARM_DESC(indirect_sg_entries,
82 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83
84module_param(allow_ext_sg, bool, 0444);
85MODULE_PARM_DESC(allow_ext_sg,
86 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87
Roland Dreieraef9ec32005-11-02 14:07:13 -080088module_param(topspin_workarounds, int, 0444);
89MODULE_PARM_DESC(topspin_workarounds,
90 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91
Bart Van Assche5cfb1782014-05-20 15:08:34 +020092module_param(prefer_fr, bool, 0444);
93MODULE_PARM_DESC(prefer_fr,
94"Whether to use fast registration if both FMR and fast registration are supported");
95
Bart Van Asscheb1b88542014-05-20 15:06:41 +020096module_param(register_always, bool, 0444);
97MODULE_PARM_DESC(register_always,
98 "Use memory registration even for contiguous memory regions");
99
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200100static struct kernel_param_ops srp_tmo_ops;
101
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200102static int srp_reconnect_delay = 10;
103module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200107static int srp_fast_io_fail_tmo = 15;
108module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(fast_io_fail_tmo,
111 "Number of seconds between the observation of a transport"
112 " layer error and failing all I/O. \"off\" means that this"
113 " functionality is disabled.");
114
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200115static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200116module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(dev_loss_tmo,
119 "Maximum number of seconds that the SRP transport should"
120 " insulate transport layer errors. After this time has been"
121 " exceeded the SCSI host is removed. Should be"
122 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 " this functionality is disabled.");
125
Roland Dreieraef9ec32005-11-02 14:07:13 -0800126static void srp_add_one(struct ib_device *device);
127static void srp_remove_one(struct ib_device *device);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100128static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
129static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131
FUJITA Tomonori32368222007-06-27 16:33:12 +0900132static struct scsi_transport_template *ib_srp_transport_template;
Bart Van Asschebcc05912014-07-09 15:57:26 +0200133static struct workqueue_struct *srp_remove_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +0900134
Roland Dreieraef9ec32005-11-02 14:07:13 -0800135static struct ib_client srp_client = {
136 .name = "srp",
137 .add = srp_add_one,
138 .remove = srp_remove_one
139};
140
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700141static struct ib_sa_client srp_sa_client;
142
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200143static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
144{
145 int tmo = *(int *)kp->arg;
146
147 if (tmo >= 0)
148 return sprintf(buffer, "%d", tmo);
149 else
150 return sprintf(buffer, "off");
151}
152
153static int srp_tmo_set(const char *val, const struct kernel_param *kp)
154{
155 int tmo, res;
156
157 if (strncmp(val, "off", 3) != 0) {
158 res = kstrtoint(val, 0, &tmo);
159 if (res)
160 goto out;
161 } else {
162 tmo = -1;
163 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200164 if (kp->arg == &srp_reconnect_delay)
165 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
166 srp_dev_loss_tmo);
167 else if (kp->arg == &srp_fast_io_fail_tmo)
168 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200169 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200170 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
171 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200172 if (res)
173 goto out;
174 *(int *)kp->arg = tmo;
175
176out:
177 return res;
178}
179
180static struct kernel_param_ops srp_tmo_ops = {
181 .get = srp_tmo_get,
182 .set = srp_tmo_set,
183};
184
Roland Dreieraef9ec32005-11-02 14:07:13 -0800185static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
186{
187 return (struct srp_target_port *) host->hostdata;
188}
189
190static const char *srp_target_info(struct Scsi_Host *host)
191{
192 return host_to_target(host)->target_name;
193}
194
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700195static int srp_target_is_topspin(struct srp_target_port *target)
196{
197 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff48d2007-08-03 10:45:18 -0700198 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700199
200 return topspin_workarounds &&
Raghava Kondapalli3d1ff48d2007-08-03 10:45:18 -0700201 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
202 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700203}
204
Roland Dreieraef9ec32005-11-02 14:07:13 -0800205static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
206 gfp_t gfp_mask,
207 enum dma_data_direction direction)
208{
209 struct srp_iu *iu;
210
211 iu = kmalloc(sizeof *iu, gfp_mask);
212 if (!iu)
213 goto out;
214
215 iu->buf = kzalloc(size, gfp_mask);
216 if (!iu->buf)
217 goto out_free_iu;
218
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100219 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
220 direction);
221 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800222 goto out_free_buf;
223
224 iu->size = size;
225 iu->direction = direction;
226
227 return iu;
228
229out_free_buf:
230 kfree(iu->buf);
231out_free_iu:
232 kfree(iu);
233out:
234 return NULL;
235}
236
237static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
238{
239 if (!iu)
240 return;
241
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100242 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
243 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800244 kfree(iu->buf);
245 kfree(iu);
246}
247
248static void srp_qp_event(struct ib_event *event, void *context)
249{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000250 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800251}
252
253static int srp_init_qp(struct srp_target_port *target,
254 struct ib_qp *qp)
255{
256 struct ib_qp_attr *attr;
257 int ret;
258
259 attr = kmalloc(sizeof *attr, GFP_KERNEL);
260 if (!attr)
261 return -ENOMEM;
262
Roland Dreier969a60f2008-07-14 23:48:43 -0700263 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
264 target->srp_host->port,
Bart Van Assche747fe002014-10-30 14:48:05 +0100265 be16_to_cpu(target->pkey),
Roland Dreier969a60f2008-07-14 23:48:43 -0700266 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800267 if (ret)
268 goto out;
269
270 attr->qp_state = IB_QPS_INIT;
271 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
272 IB_ACCESS_REMOTE_WRITE);
273 attr->port_num = target->srp_host->port;
274
275 ret = ib_modify_qp(qp, attr,
276 IB_QP_STATE |
277 IB_QP_PKEY_INDEX |
278 IB_QP_ACCESS_FLAGS |
279 IB_QP_PORT);
280
281out:
282 kfree(attr);
283 return ret;
284}
285
Bart Van Assche509c07b2014-10-30 14:48:30 +0100286static int srp_new_cm_id(struct srp_rdma_ch *ch)
David Dillow9fe4bcf2008-01-08 17:08:52 -0500287{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100288 struct srp_target_port *target = ch->target;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500289 struct ib_cm_id *new_cm_id;
290
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100291 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100292 srp_cm_handler, ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -0500293 if (IS_ERR(new_cm_id))
294 return PTR_ERR(new_cm_id);
295
Bart Van Assche509c07b2014-10-30 14:48:30 +0100296 if (ch->cm_id)
297 ib_destroy_cm_id(ch->cm_id);
298 ch->cm_id = new_cm_id;
299 ch->path.sgid = target->sgid;
300 ch->path.dgid = target->orig_dgid;
301 ch->path.pkey = target->pkey;
302 ch->path.service_id = target->service_id;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500303
304 return 0;
305}
306
Bart Van Assched1b42892014-05-20 15:07:20 +0200307static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
308{
309 struct srp_device *dev = target->srp_host->srp_dev;
310 struct ib_fmr_pool_param fmr_param;
311
312 memset(&fmr_param, 0, sizeof(fmr_param));
313 fmr_param.pool_size = target->scsi_host->can_queue;
314 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
315 fmr_param.cache = 1;
Bart Van Assche52ede082014-05-20 15:07:45 +0200316 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
317 fmr_param.page_shift = ilog2(dev->mr_page_size);
Bart Van Assched1b42892014-05-20 15:07:20 +0200318 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
319 IB_ACCESS_REMOTE_WRITE |
320 IB_ACCESS_REMOTE_READ);
321
322 return ib_create_fmr_pool(dev->pd, &fmr_param);
323}
324
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200325/**
326 * srp_destroy_fr_pool() - free the resources owned by a pool
327 * @pool: Fast registration pool to be destroyed.
328 */
329static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
330{
331 int i;
332 struct srp_fr_desc *d;
333
334 if (!pool)
335 return;
336
337 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
338 if (d->frpl)
339 ib_free_fast_reg_page_list(d->frpl);
340 if (d->mr)
341 ib_dereg_mr(d->mr);
342 }
343 kfree(pool);
344}
345
346/**
347 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
348 * @device: IB device to allocate fast registration descriptors for.
349 * @pd: Protection domain associated with the FR descriptors.
350 * @pool_size: Number of descriptors to allocate.
351 * @max_page_list_len: Maximum fast registration work request page list length.
352 */
353static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
354 struct ib_pd *pd, int pool_size,
355 int max_page_list_len)
356{
357 struct srp_fr_pool *pool;
358 struct srp_fr_desc *d;
359 struct ib_mr *mr;
360 struct ib_fast_reg_page_list *frpl;
361 int i, ret = -EINVAL;
362
363 if (pool_size <= 0)
364 goto err;
365 ret = -ENOMEM;
366 pool = kzalloc(sizeof(struct srp_fr_pool) +
367 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
368 if (!pool)
369 goto err;
370 pool->size = pool_size;
371 pool->max_page_list_len = max_page_list_len;
372 spin_lock_init(&pool->lock);
373 INIT_LIST_HEAD(&pool->free_list);
374
375 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
376 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
377 if (IS_ERR(mr)) {
378 ret = PTR_ERR(mr);
379 goto destroy_pool;
380 }
381 d->mr = mr;
382 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
383 if (IS_ERR(frpl)) {
384 ret = PTR_ERR(frpl);
385 goto destroy_pool;
386 }
387 d->frpl = frpl;
388 list_add_tail(&d->entry, &pool->free_list);
389 }
390
391out:
392 return pool;
393
394destroy_pool:
395 srp_destroy_fr_pool(pool);
396
397err:
398 pool = ERR_PTR(ret);
399 goto out;
400}
401
402/**
403 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
404 * @pool: Pool to obtain descriptor from.
405 */
406static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
407{
408 struct srp_fr_desc *d = NULL;
409 unsigned long flags;
410
411 spin_lock_irqsave(&pool->lock, flags);
412 if (!list_empty(&pool->free_list)) {
413 d = list_first_entry(&pool->free_list, typeof(*d), entry);
414 list_del(&d->entry);
415 }
416 spin_unlock_irqrestore(&pool->lock, flags);
417
418 return d;
419}
420
421/**
422 * srp_fr_pool_put() - put an FR descriptor back in the free list
423 * @pool: Pool the descriptor was allocated from.
424 * @desc: Pointer to an array of fast registration descriptor pointers.
425 * @n: Number of descriptors to put back.
426 *
427 * Note: The caller must already have queued an invalidation request for
428 * desc->mr->rkey before calling this function.
429 */
430static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
431 int n)
432{
433 unsigned long flags;
434 int i;
435
436 spin_lock_irqsave(&pool->lock, flags);
437 for (i = 0; i < n; i++)
438 list_add(&desc[i]->entry, &pool->free_list);
439 spin_unlock_irqrestore(&pool->lock, flags);
440}
441
442static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
443{
444 struct srp_device *dev = target->srp_host->srp_dev;
445
446 return srp_create_fr_pool(dev->dev, dev->pd,
447 target->scsi_host->can_queue,
448 dev->max_pages_per_mr);
449}
450
Bart Van Assche509c07b2014-10-30 14:48:30 +0100451static int srp_create_ch_ib(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800452{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100453 struct srp_target_port *target = ch->target;
Bart Van Assche62154b22014-05-20 15:04:45 +0200454 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800455 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100456 struct ib_cq *recv_cq, *send_cq;
457 struct ib_qp *qp;
Bart Van Assched1b42892014-05-20 15:07:20 +0200458 struct ib_fmr_pool *fmr_pool = NULL;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200459 struct srp_fr_pool *fr_pool = NULL;
460 const int m = 1 + dev->use_fast_reg;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800461 int ret;
462
463 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
464 if (!init_attr)
465 return -ENOMEM;
466
Bart Van Assche509c07b2014-10-30 14:48:30 +0100467 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
468 target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100469 if (IS_ERR(recv_cq)) {
470 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800471 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800472 }
473
Bart Van Assche509c07b2014-10-30 14:48:30 +0100474 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
475 m * target->queue_size, ch->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100476 if (IS_ERR(send_cq)) {
477 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800478 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000479 }
480
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100481 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800482
483 init_attr->event_handler = srp_qp_event;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200484 init_attr->cap.max_send_wr = m * target->queue_size;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200485 init_attr->cap.max_recv_wr = target->queue_size;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800486 init_attr->cap.max_recv_sge = 1;
487 init_attr->cap.max_send_sge = 1;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200488 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800489 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100490 init_attr->send_cq = send_cq;
491 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800492
Bart Van Assche62154b22014-05-20 15:04:45 +0200493 qp = ib_create_qp(dev->pd, init_attr);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100494 if (IS_ERR(qp)) {
495 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800496 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800497 }
498
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100499 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800500 if (ret)
501 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800502
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200503 if (dev->use_fast_reg && dev->has_fr) {
504 fr_pool = srp_alloc_fr_pool(target);
505 if (IS_ERR(fr_pool)) {
506 ret = PTR_ERR(fr_pool);
507 shost_printk(KERN_WARNING, target->scsi_host, PFX
508 "FR pool allocation failed (%d)\n", ret);
509 goto err_qp;
510 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100511 if (ch->fr_pool)
512 srp_destroy_fr_pool(ch->fr_pool);
513 ch->fr_pool = fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200514 } else if (!dev->use_fast_reg && dev->has_fmr) {
Bart Van Assched1b42892014-05-20 15:07:20 +0200515 fmr_pool = srp_alloc_fmr_pool(target);
516 if (IS_ERR(fmr_pool)) {
517 ret = PTR_ERR(fmr_pool);
518 shost_printk(KERN_WARNING, target->scsi_host, PFX
519 "FMR pool allocation failed (%d)\n", ret);
520 goto err_qp;
521 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100522 if (ch->fmr_pool)
523 ib_destroy_fmr_pool(ch->fmr_pool);
524 ch->fmr_pool = fmr_pool;
Bart Van Assched1b42892014-05-20 15:07:20 +0200525 }
526
Bart Van Assche509c07b2014-10-30 14:48:30 +0100527 if (ch->qp)
528 ib_destroy_qp(ch->qp);
529 if (ch->recv_cq)
530 ib_destroy_cq(ch->recv_cq);
531 if (ch->send_cq)
532 ib_destroy_cq(ch->send_cq);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100533
Bart Van Assche509c07b2014-10-30 14:48:30 +0100534 ch->qp = qp;
535 ch->recv_cq = recv_cq;
536 ch->send_cq = send_cq;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100537
Roland Dreierda9d2f02010-02-24 15:07:59 -0800538 kfree(init_attr);
539 return 0;
540
541err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100542 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800543
544err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100545 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800546
547err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100548 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800549
550err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800551 kfree(init_attr);
552 return ret;
553}
554
Bart Van Assche4d73f952013-10-26 14:40:37 +0200555/*
556 * Note: this function may be called without srp_alloc_iu_bufs() having been
Bart Van Assche509c07b2014-10-30 14:48:30 +0100557 * invoked. Hence the ch->[rt]x_ring checks.
Bart Van Assche4d73f952013-10-26 14:40:37 +0200558 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100559static void srp_free_ch_ib(struct srp_target_port *target,
560 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800561{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200562 struct srp_device *dev = target->srp_host->srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800563 int i;
564
Bart Van Assche509c07b2014-10-30 14:48:30 +0100565 if (ch->cm_id) {
566 ib_destroy_cm_id(ch->cm_id);
567 ch->cm_id = NULL;
Bart Van Assche394c5952014-10-30 14:46:27 +0100568 }
569
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200570 if (dev->use_fast_reg) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100571 if (ch->fr_pool)
572 srp_destroy_fr_pool(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200573 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100574 if (ch->fmr_pool)
575 ib_destroy_fmr_pool(ch->fmr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200576 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100577 ib_destroy_qp(ch->qp);
578 ib_destroy_cq(ch->send_cq);
579 ib_destroy_cq(ch->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800580
Bart Van Assche509c07b2014-10-30 14:48:30 +0100581 ch->qp = NULL;
582 ch->send_cq = ch->recv_cq = NULL;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100583
Bart Van Assche509c07b2014-10-30 14:48:30 +0100584 if (ch->rx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200585 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100586 srp_free_iu(target->srp_host, ch->rx_ring[i]);
587 kfree(ch->rx_ring);
588 ch->rx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200589 }
Bart Van Assche509c07b2014-10-30 14:48:30 +0100590 if (ch->tx_ring) {
Bart Van Assche4d73f952013-10-26 14:40:37 +0200591 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +0100592 srp_free_iu(target->srp_host, ch->tx_ring[i]);
593 kfree(ch->tx_ring);
594 ch->tx_ring = NULL;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200595 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800596}
597
598static void srp_path_rec_completion(int status,
599 struct ib_sa_path_rec *pathrec,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100600 void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800601{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100602 struct srp_rdma_ch *ch = ch_ptr;
603 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800604
Bart Van Assche509c07b2014-10-30 14:48:30 +0100605 ch->status = status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800606 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500607 shost_printk(KERN_ERR, target->scsi_host,
608 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800609 else
Bart Van Assche509c07b2014-10-30 14:48:30 +0100610 ch->path = *pathrec;
611 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800612}
613
Bart Van Assche509c07b2014-10-30 14:48:30 +0100614static int srp_lookup_path(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800615{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100616 struct srp_target_port *target = ch->target;
Bart Van Asschea702adc2014-03-14 13:53:10 +0100617 int ret;
618
Bart Van Assche509c07b2014-10-30 14:48:30 +0100619 ch->path.numb_path = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800620
Bart Van Assche509c07b2014-10-30 14:48:30 +0100621 init_completion(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800622
Bart Van Assche509c07b2014-10-30 14:48:30 +0100623 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
624 target->srp_host->srp_dev->dev,
625 target->srp_host->port,
626 &ch->path,
627 IB_SA_PATH_REC_SERVICE_ID |
628 IB_SA_PATH_REC_DGID |
629 IB_SA_PATH_REC_SGID |
630 IB_SA_PATH_REC_NUMB_PATH |
631 IB_SA_PATH_REC_PKEY,
632 SRP_PATH_REC_TIMEOUT_MS,
633 GFP_KERNEL,
634 srp_path_rec_completion,
635 ch, &ch->path_query);
636 if (ch->path_query_id < 0)
637 return ch->path_query_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800638
Bart Van Assche509c07b2014-10-30 14:48:30 +0100639 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100640 if (ret < 0)
641 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800642
Bart Van Assche509c07b2014-10-30 14:48:30 +0100643 if (ch->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500644 shost_printk(KERN_WARNING, target->scsi_host,
645 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800646
Bart Van Assche509c07b2014-10-30 14:48:30 +0100647 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800648}
649
Bart Van Assche509c07b2014-10-30 14:48:30 +0100650static int srp_send_req(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800651{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100652 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800653 struct {
654 struct ib_cm_req_param param;
655 struct srp_login_req priv;
656 } *req = NULL;
657 int status;
658
659 req = kzalloc(sizeof *req, GFP_KERNEL);
660 if (!req)
661 return -ENOMEM;
662
Bart Van Assche509c07b2014-10-30 14:48:30 +0100663 req->param.primary_path = &ch->path;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800664 req->param.alternate_path = NULL;
665 req->param.service_id = target->service_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100666 req->param.qp_num = ch->qp->qp_num;
667 req->param.qp_type = ch->qp->qp_type;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800668 req->param.private_data = &req->priv;
669 req->param.private_data_len = sizeof req->priv;
670 req->param.flow_control = 1;
671
672 get_random_bytes(&req->param.starting_psn, 4);
673 req->param.starting_psn &= 0xffffff;
674
675 /*
676 * Pick some arbitrary defaults here; we could make these
677 * module parameters if anyone cared about setting them.
678 */
679 req->param.responder_resources = 4;
680 req->param.remote_cm_response_timeout = 20;
681 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200682 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800683 req->param.rnr_retry_count = 7;
684 req->param.max_cm_retries = 15;
685
686 req->priv.opcode = SRP_LOGIN_REQ;
687 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500688 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800689 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
690 SRP_BUF_FORMAT_INDIRECT);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700691 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700692 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700693 * port identifier format is 8 bytes of ID extension followed
694 * by 8 bytes of GUID. Older drafts put the two halves in the
695 * opposite order, so that the GUID comes first.
696 *
697 * Targets conforming to these obsolete drafts can be
698 * recognized by the I/O Class they report.
699 */
700 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
701 memcpy(req->priv.initiator_port_id,
Bart Van Assche747fe002014-10-30 14:48:05 +0100702 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700703 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200704 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700705 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
706 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
707 } else {
708 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200709 &target->initiator_ext, 8);
710 memcpy(req->priv.initiator_port_id + 8,
Bart Van Assche747fe002014-10-30 14:48:05 +0100711 &target->sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700712 memcpy(req->priv.target_port_id, &target->id_ext, 8);
713 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
714 }
715
Roland Dreieraef9ec32005-11-02 14:07:13 -0800716 /*
717 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200718 * zero out the first 8 bytes of our initiator port ID and set
719 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800720 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700721 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500722 shost_printk(KERN_DEBUG, target->scsi_host,
723 PFX "Topspin/Cisco initiator port ID workaround "
724 "activated for target GUID %016llx\n",
725 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800726 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200727 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100728 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800729 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800730
Bart Van Assche509c07b2014-10-30 14:48:30 +0100731 status = ib_send_cm_req(ch->cm_id, &req->param);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800732
733 kfree(req);
734
735 return status;
736}
737
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000738static bool srp_queue_remove_work(struct srp_target_port *target)
739{
740 bool changed = false;
741
742 spin_lock_irq(&target->lock);
743 if (target->state != SRP_TARGET_REMOVED) {
744 target->state = SRP_TARGET_REMOVED;
745 changed = true;
746 }
747 spin_unlock_irq(&target->lock);
748
749 if (changed)
Bart Van Asschebcc05912014-07-09 15:57:26 +0200750 queue_work(srp_remove_wq, &target->remove_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000751
752 return changed;
753}
754
Bart Van Assche294c8752011-12-25 12:18:12 +0000755static bool srp_change_conn_state(struct srp_target_port *target,
756 bool connected)
757{
758 bool changed = false;
759
760 spin_lock_irq(&target->lock);
761 if (target->connected != connected) {
762 target->connected = connected;
763 changed = true;
764 }
765 spin_unlock_irq(&target->lock);
766
767 return changed;
768}
769
Roland Dreieraef9ec32005-11-02 14:07:13 -0800770static void srp_disconnect_target(struct srp_target_port *target)
771{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100772 struct srp_rdma_ch *ch = &target->ch;
773
Bart Van Assche294c8752011-12-25 12:18:12 +0000774 if (srp_change_conn_state(target, false)) {
775 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800776
Bart Van Assche509c07b2014-10-30 14:48:30 +0100777 if (ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
Bart Van Assche294c8752011-12-25 12:18:12 +0000778 shost_printk(KERN_DEBUG, target->scsi_host,
779 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000780 }
Roland Dreiere6581052006-05-17 09:13:21 -0700781 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800782}
783
Bart Van Assche509c07b2014-10-30 14:48:30 +0100784static void srp_free_req_data(struct srp_target_port *target,
785 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -0500786{
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200787 struct srp_device *dev = target->srp_host->srp_dev;
788 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500789 struct srp_request *req;
790 int i;
791
Bart Van Assche509c07b2014-10-30 14:48:30 +0100792 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200793 return;
794
795 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100796 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200797 if (dev->use_fast_reg)
798 kfree(req->fr_list);
799 else
800 kfree(req->fmr_list);
David Dillow8f26c9f2011-01-14 19:45:50 -0500801 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500802 if (req->indirect_dma_addr) {
803 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
804 target->indirect_size,
805 DMA_TO_DEVICE);
806 }
807 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500808 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200809
Bart Van Assche509c07b2014-10-30 14:48:30 +0100810 kfree(ch->req_ring);
811 ch->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500812}
813
Bart Van Assche509c07b2014-10-30 14:48:30 +0100814static int srp_alloc_req_data(struct srp_rdma_ch *ch)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200815{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100816 struct srp_target_port *target = ch->target;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200817 struct srp_device *srp_dev = target->srp_host->srp_dev;
818 struct ib_device *ibdev = srp_dev->dev;
819 struct srp_request *req;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200820 void *mr_list;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200821 dma_addr_t dma_addr;
822 int i, ret = -ENOMEM;
823
Bart Van Assche509c07b2014-10-30 14:48:30 +0100824 INIT_LIST_HEAD(&ch->free_reqs);
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200825
Bart Van Assche509c07b2014-10-30 14:48:30 +0100826 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
827 GFP_KERNEL);
828 if (!ch->req_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +0200829 goto out;
830
831 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100832 req = &ch->req_ring[i];
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200833 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
834 GFP_KERNEL);
835 if (!mr_list)
836 goto out;
837 if (srp_dev->use_fast_reg)
838 req->fr_list = mr_list;
839 else
840 req->fmr_list = mr_list;
Bart Van Assche52ede082014-05-20 15:07:45 +0200841 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
Bart Van Assched1b42892014-05-20 15:07:20 +0200842 sizeof(void *), GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200843 if (!req->map_page)
844 goto out;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200845 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200846 if (!req->indirect_desc)
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200847 goto out;
848
849 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
850 target->indirect_size,
851 DMA_TO_DEVICE);
852 if (ib_dma_mapping_error(ibdev, dma_addr))
853 goto out;
854
855 req->indirect_dma_addr = dma_addr;
856 req->index = i;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100857 list_add_tail(&req->list, &ch->free_reqs);
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200858 }
859 ret = 0;
860
861out:
862 return ret;
863}
864
Bart Van Assche683b1592012-01-14 12:40:44 +0000865/**
866 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
867 * @shost: SCSI host whose attributes to remove from sysfs.
868 *
869 * Note: Any attributes defined in the host template and that did not exist
870 * before invocation of this function will be ignored.
871 */
872static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
873{
874 struct device_attribute **attr;
875
876 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
877 device_remove_file(&shost->shost_dev, *attr);
878}
879
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000880static void srp_remove_target(struct srp_target_port *target)
881{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100882 struct srp_rdma_ch *ch = &target->ch;
883
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000884 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
885
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000886 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200887 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000888 srp_remove_host(target->scsi_host);
889 scsi_remove_host(target->scsi_host);
Bart Van Assche93079162013-12-11 17:06:14 +0100890 srp_stop_rport_timers(target->rport);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000891 srp_disconnect_target(target);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100892 srp_free_ch_ib(target, ch);
Bart Van Asschec1120f82013-10-26 14:35:08 +0200893 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200894 srp_rport_put(target->rport);
Bart Van Assche509c07b2014-10-30 14:48:30 +0100895 srp_free_req_data(target, ch);
Vu Pham65d7dd22013-10-10 13:50:29 +0200896
897 spin_lock(&target->srp_host->target_lock);
898 list_del(&target->list);
899 spin_unlock(&target->srp_host->target_lock);
900
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000901 scsi_host_put(target->scsi_host);
902}
903
David Howellsc4028952006-11-22 14:57:56 +0000904static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800905{
David Howellsc4028952006-11-22 14:57:56 +0000906 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000907 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800908
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000909 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800910
Bart Van Assche96fc2482013-06-28 14:51:26 +0200911 srp_remove_target(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800912}
913
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200914static void srp_rport_delete(struct srp_rport *rport)
915{
916 struct srp_target_port *target = rport->lld_data;
917
918 srp_queue_remove_work(target);
919}
920
Bart Van Assche509c07b2014-10-30 14:48:30 +0100921static int srp_connect_ch(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800922{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100923 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800924 int ret;
925
Bart Van Assche294c8752011-12-25 12:18:12 +0000926 WARN_ON_ONCE(target->connected);
927
Bart Van Assche948d1e82011-09-03 09:25:42 +0200928 target->qp_in_error = false;
929
Bart Van Assche509c07b2014-10-30 14:48:30 +0100930 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800931 if (ret)
932 return ret;
933
934 while (1) {
Bart Van Assche509c07b2014-10-30 14:48:30 +0100935 init_completion(&ch->done);
936 ret = srp_send_req(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800937 if (ret)
938 return ret;
Bart Van Assche509c07b2014-10-30 14:48:30 +0100939 ret = wait_for_completion_interruptible(&ch->done);
Bart Van Asschea702adc2014-03-14 13:53:10 +0100940 if (ret < 0)
941 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800942
943 /*
944 * The CM event handling code will set status to
945 * SRP_PORT_REDIRECT if we get a port redirect REJ
946 * back, or SRP_DLID_REDIRECT if we get a lid/qp
947 * redirect REJ back.
948 */
Bart Van Assche509c07b2014-10-30 14:48:30 +0100949 switch (ch->status) {
Roland Dreieraef9ec32005-11-02 14:07:13 -0800950 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +0000951 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800952 return 0;
953
954 case SRP_PORT_REDIRECT:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100955 ret = srp_lookup_path(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800956 if (ret)
957 return ret;
958 break;
959
960 case SRP_DLID_REDIRECT:
961 break;
962
David Dillow9fe4bcf2008-01-08 17:08:52 -0500963 case SRP_STALE_CONN:
David Dillow9fe4bcf2008-01-08 17:08:52 -0500964 shost_printk(KERN_ERR, target->scsi_host, PFX
Bart Van Assche205619f2014-10-30 14:46:55 +0100965 "giving up on stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +0100966 ch->status = -ECONNRESET;
967 return ch->status;
David Dillow9fe4bcf2008-01-08 17:08:52 -0500968
Roland Dreieraef9ec32005-11-02 14:07:13 -0800969 default:
Bart Van Assche509c07b2014-10-30 14:48:30 +0100970 return ch->status;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800971 }
972 }
973}
974
Bart Van Assche509c07b2014-10-30 14:48:30 +0100975static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200976{
977 struct ib_send_wr *bad_wr;
978 struct ib_send_wr wr = {
979 .opcode = IB_WR_LOCAL_INV,
980 .wr_id = LOCAL_INV_WR_ID_MASK,
981 .next = NULL,
982 .num_sge = 0,
983 .send_flags = 0,
984 .ex.invalidate_rkey = rkey,
985 };
986
Bart Van Assche509c07b2014-10-30 14:48:30 +0100987 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200988}
989
Roland Dreierd945e1d2006-05-09 10:50:28 -0700990static void srp_unmap_data(struct scsi_cmnd *scmnd,
Bart Van Assche509c07b2014-10-30 14:48:30 +0100991 struct srp_rdma_ch *ch,
Roland Dreierd945e1d2006-05-09 10:50:28 -0700992 struct srp_request *req)
993{
Bart Van Assche509c07b2014-10-30 14:48:30 +0100994 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +0200995 struct srp_device *dev = target->srp_host->srp_dev;
996 struct ib_device *ibdev = dev->dev;
997 int i, res;
David Dillow8f26c9f2011-01-14 19:45:50 -0500998
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900999 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -07001000 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1001 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1002 return;
1003
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001004 if (dev->use_fast_reg) {
1005 struct srp_fr_desc **pfr;
1006
1007 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001008 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001009 if (res < 0) {
1010 shost_printk(KERN_ERR, target->scsi_host, PFX
1011 "Queueing INV WR for rkey %#x failed (%d)\n",
1012 (*pfr)->mr->rkey, res);
1013 queue_work(system_long_wq,
1014 &target->tl_err_work);
1015 }
1016 }
1017 if (req->nmdesc)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001018 srp_fr_pool_put(ch->fr_pool, req->fr_list,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001019 req->nmdesc);
1020 } else {
1021 struct ib_pool_fmr **pfmr;
1022
1023 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1024 ib_fmr_pool_unmap(*pfmr);
1025 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001026
David Dillow8f26c9f2011-01-14 19:45:50 -05001027 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1028 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001029}
1030
Bart Van Assche22032992012-08-14 13:18:53 +00001031/**
1032 * srp_claim_req - Take ownership of the scmnd associated with a request.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001033 * @ch: SRP RDMA channel.
Bart Van Assche22032992012-08-14 13:18:53 +00001034 * @req: SRP request.
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001035 * @sdev: If not NULL, only take ownership for this SCSI device.
Bart Van Assche22032992012-08-14 13:18:53 +00001036 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1037 * ownership of @req->scmnd if it equals @scmnd.
1038 *
1039 * Return value:
1040 * Either NULL or a pointer to the SCSI command the caller became owner of.
1041 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001042static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
Bart Van Assche22032992012-08-14 13:18:53 +00001043 struct srp_request *req,
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001044 struct scsi_device *sdev,
Bart Van Assche22032992012-08-14 13:18:53 +00001045 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001046{
Bart Van Assche94a91742010-11-26 14:50:09 -05001047 unsigned long flags;
1048
Bart Van Assche509c07b2014-10-30 14:48:30 +01001049 spin_lock_irqsave(&ch->lock, flags);
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001050 if (req->scmnd &&
1051 (!sdev || req->scmnd->device == sdev) &&
1052 (!scmnd || req->scmnd == scmnd)) {
Bart Van Assche22032992012-08-14 13:18:53 +00001053 scmnd = req->scmnd;
1054 req->scmnd = NULL;
Bart Van Assche22032992012-08-14 13:18:53 +00001055 } else {
1056 scmnd = NULL;
1057 }
Bart Van Assche509c07b2014-10-30 14:48:30 +01001058 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001059
1060 return scmnd;
1061}
1062
1063/**
1064 * srp_free_req() - Unmap data and add request to the free request list.
Bart Van Assche509c07b2014-10-30 14:48:30 +01001065 * @ch: SRP RDMA channel.
Bart Van Asscheaf246632014-05-20 15:04:21 +02001066 * @req: Request to be freed.
1067 * @scmnd: SCSI command associated with @req.
1068 * @req_lim_delta: Amount to be added to @target->req_lim.
Bart Van Assche22032992012-08-14 13:18:53 +00001069 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001070static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1071 struct scsi_cmnd *scmnd, s32 req_lim_delta)
Bart Van Assche22032992012-08-14 13:18:53 +00001072{
1073 unsigned long flags;
1074
Bart Van Assche509c07b2014-10-30 14:48:30 +01001075 srp_unmap_data(scmnd, ch, req);
Bart Van Assche22032992012-08-14 13:18:53 +00001076
Bart Van Assche509c07b2014-10-30 14:48:30 +01001077 spin_lock_irqsave(&ch->lock, flags);
1078 ch->req_lim += req_lim_delta;
1079 list_add_tail(&req->list, &ch->free_reqs);
1080 spin_unlock_irqrestore(&ch->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001081}
1082
Bart Van Assche509c07b2014-10-30 14:48:30 +01001083static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1084 struct scsi_device *sdev, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001085{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001086 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001087
1088 if (scmnd) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001089 srp_free_req(ch, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001090 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +00001091 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +00001092 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -07001093}
1094
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001095static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001096{
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001097 struct srp_target_port *target = rport->lld_data;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001098 struct srp_rdma_ch *ch = &target->ch;
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001099 struct Scsi_Host *shost = target->scsi_host;
1100 struct scsi_device *sdev;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001101 int i;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001102
Bart Van Asscheb3fe6282014-03-14 13:54:11 +01001103 /*
1104 * Invoking srp_terminate_io() while srp_queuecommand() is running
1105 * is not safe. Hence the warning statement below.
1106 */
1107 shost_for_each_device(sdev, shost)
1108 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1109
Bart Van Assche4d73f952013-10-26 14:40:37 +02001110 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001111 struct srp_request *req = &ch->req_ring[i];
1112
1113 srp_finish_req(ch, req, NULL, DID_TRANSPORT_FAILFAST << 16);
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001114 }
1115}
1116
1117/*
1118 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1119 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1120 * srp_reset_device() or srp_reset_host() calls will occur while this function
1121 * is in progress. One way to realize that is not to call this function
1122 * directly but to call srp_reconnect_rport() instead since that last function
1123 * serializes calls of this function via rport->mutex and also blocks
1124 * srp_queuecommand() calls before invoking this function.
1125 */
1126static int srp_rport_reconnect(struct srp_rport *rport)
1127{
1128 struct srp_target_port *target = rport->lld_data;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001129 struct srp_rdma_ch *ch = &target->ch;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001130 int i, ret;
Bart Van Assche09be70a2012-03-17 17:18:54 +00001131
Roland Dreieraef9ec32005-11-02 14:07:13 -08001132 srp_disconnect_target(target);
Bart Van Assche34aa6542014-10-30 14:47:22 +01001133
1134 if (target->state == SRP_TARGET_SCANNING)
1135 return -ENODEV;
1136
Roland Dreieraef9ec32005-11-02 14:07:13 -08001137 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001138 * Now get a new local CM ID so that we avoid confusing the target in
1139 * case things are really fouled up. Doing so also ensures that all CM
1140 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -08001141 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001142 ret = srp_new_cm_id(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001143
Bart Van Assche4d73f952013-10-26 14:40:37 +02001144 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001145 struct srp_request *req = &ch->req_ring[i];
1146
1147 srp_finish_req(ch, req, NULL, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -05001148 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001149
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001150 /*
1151 * Whether or not creating a new CM ID succeeded, create a new
1152 * QP. This guarantees that all callback functions for the old QP have
1153 * finished before any send requests are posted on the new QP.
1154 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001155 ret += srp_create_ch_ib(ch);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001156
Bart Van Assche509c07b2014-10-30 14:48:30 +01001157 INIT_LIST_HEAD(&ch->free_tx);
Bart Van Assche4d73f952013-10-26 14:40:37 +02001158 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001159 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001160
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00001161 if (ret == 0)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001162 ret = srp_connect_ch(ch);
Bart Van Assche09be70a2012-03-17 17:18:54 +00001163
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001164 if (ret == 0)
1165 shost_printk(KERN_INFO, target->scsi_host,
1166 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001167
1168 return ret;
1169}
1170
David Dillow8f26c9f2011-01-14 19:45:50 -05001171static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1172 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -07001173{
David Dillow8f26c9f2011-01-14 19:45:50 -05001174 struct srp_direct_buf *desc = state->desc;
1175
1176 desc->va = cpu_to_be64(dma_addr);
1177 desc->key = cpu_to_be32(rkey);
1178 desc->len = cpu_to_be32(dma_len);
1179
1180 state->total_len += dma_len;
1181 state->desc++;
1182 state->ndesc++;
1183}
1184
1185static int srp_map_finish_fmr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001186 struct srp_rdma_ch *ch)
David Dillow8f26c9f2011-01-14 19:45:50 -05001187{
David Dillow8f26c9f2011-01-14 19:45:50 -05001188 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -07001189 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001190
Bart Van Assche509c07b2014-10-30 14:48:30 +01001191 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
David Dillow8f26c9f2011-01-14 19:45:50 -05001192 state->npages, io_addr);
1193 if (IS_ERR(fmr))
1194 return PTR_ERR(fmr);
1195
1196 *state->next_fmr++ = fmr;
Bart Van Assche52ede082014-05-20 15:07:45 +02001197 state->nmdesc++;
David Dillow8f26c9f2011-01-14 19:45:50 -05001198
Bart Van Assche52ede082014-05-20 15:07:45 +02001199 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
Bart Van Assche539dde62014-05-20 15:05:46 +02001200
David Dillow8f26c9f2011-01-14 19:45:50 -05001201 return 0;
1202}
1203
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001204static int srp_map_finish_fr(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001205 struct srp_rdma_ch *ch)
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001206{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001207 struct srp_target_port *target = ch->target;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001208 struct srp_device *dev = target->srp_host->srp_dev;
1209 struct ib_send_wr *bad_wr;
1210 struct ib_send_wr wr;
1211 struct srp_fr_desc *desc;
1212 u32 rkey;
1213
Bart Van Assche509c07b2014-10-30 14:48:30 +01001214 desc = srp_fr_pool_get(ch->fr_pool);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001215 if (!desc)
1216 return -ENOMEM;
1217
1218 rkey = ib_inc_rkey(desc->mr->rkey);
1219 ib_update_fast_reg_key(desc->mr, rkey);
1220
1221 memcpy(desc->frpl->page_list, state->pages,
1222 sizeof(state->pages[0]) * state->npages);
1223
1224 memset(&wr, 0, sizeof(wr));
1225 wr.opcode = IB_WR_FAST_REG_MR;
1226 wr.wr_id = FAST_REG_WR_ID_MASK;
1227 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1228 wr.wr.fast_reg.page_list = desc->frpl;
1229 wr.wr.fast_reg.page_list_len = state->npages;
1230 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1231 wr.wr.fast_reg.length = state->dma_len;
1232 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1233 IB_ACCESS_REMOTE_READ |
1234 IB_ACCESS_REMOTE_WRITE);
1235 wr.wr.fast_reg.rkey = desc->mr->lkey;
1236
1237 *state->next_fr++ = desc;
1238 state->nmdesc++;
1239
1240 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1241 desc->mr->rkey);
1242
Bart Van Assche509c07b2014-10-30 14:48:30 +01001243 return ib_post_send(ch->qp, &wr, &bad_wr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001244}
1245
Bart Van Assche539dde62014-05-20 15:05:46 +02001246static int srp_finish_mapping(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001247 struct srp_rdma_ch *ch)
Bart Van Assche539dde62014-05-20 15:05:46 +02001248{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001249 struct srp_target_port *target = ch->target;
Bart Van Assche539dde62014-05-20 15:05:46 +02001250 int ret = 0;
1251
1252 if (state->npages == 0)
1253 return 0;
1254
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001255 if (state->npages == 1 && !register_always)
Bart Van Assche52ede082014-05-20 15:07:45 +02001256 srp_map_desc(state, state->base_dma_addr, state->dma_len,
Bart Van Assche539dde62014-05-20 15:05:46 +02001257 target->rkey);
1258 else
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001259 ret = target->srp_host->srp_dev->use_fast_reg ?
Bart Van Assche509c07b2014-10-30 14:48:30 +01001260 srp_map_finish_fr(state, ch) :
1261 srp_map_finish_fmr(state, ch);
Bart Van Assche539dde62014-05-20 15:05:46 +02001262
1263 if (ret == 0) {
1264 state->npages = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001265 state->dma_len = 0;
Bart Van Assche539dde62014-05-20 15:05:46 +02001266 }
1267
1268 return ret;
1269}
1270
David Dillow8f26c9f2011-01-14 19:45:50 -05001271static void srp_map_update_start(struct srp_map_state *state,
1272 struct scatterlist *sg, int sg_index,
1273 dma_addr_t dma_addr)
1274{
1275 state->unmapped_sg = sg;
1276 state->unmapped_index = sg_index;
1277 state->unmapped_addr = dma_addr;
1278}
1279
1280static int srp_map_sg_entry(struct srp_map_state *state,
Bart Van Assche509c07b2014-10-30 14:48:30 +01001281 struct srp_rdma_ch *ch,
David Dillow8f26c9f2011-01-14 19:45:50 -05001282 struct scatterlist *sg, int sg_index,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001283 bool use_mr)
David Dillow8f26c9f2011-01-14 19:45:50 -05001284{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001285 struct srp_target_port *target = ch->target;
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001286 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001287 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001288 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1289 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1290 unsigned int len;
1291 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -07001292
David Dillow8f26c9f2011-01-14 19:45:50 -05001293 if (!dma_len)
1294 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -07001295
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001296 if (!use_mr) {
1297 /*
1298 * Once we're in direct map mode for a request, we don't
1299 * go back to FMR or FR mode, so no need to update anything
David Dillow8f26c9f2011-01-14 19:45:50 -05001300 * other than the descriptor.
1301 */
1302 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1303 return 0;
1304 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -07001305
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001306 /*
1307 * Since not all RDMA HW drivers support non-zero page offsets for
1308 * FMR, if we start at an offset into a page, don't merge into the
1309 * current FMR mapping. Finish it out, and use the kernel's MR for
1310 * this sg entry.
David Dillow8f26c9f2011-01-14 19:45:50 -05001311 */
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001312 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1313 dma_len > dev->mr_max_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001314 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001315 if (ret)
1316 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001317
David Dillow8f26c9f2011-01-14 19:45:50 -05001318 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1319 srp_map_update_start(state, NULL, 0, 0);
1320 return 0;
1321 }
1322
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001323 /*
1324 * If this is the first sg that will be mapped via FMR or via FR, save
1325 * our position. We need to know the first unmapped entry, its index,
1326 * and the first unmapped address within that entry to be able to
1327 * restart mapping after an error.
David Dillow8f26c9f2011-01-14 19:45:50 -05001328 */
1329 if (!state->unmapped_sg)
1330 srp_map_update_start(state, sg, sg_index, dma_addr);
1331
1332 while (dma_len) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001333 unsigned offset = dma_addr & ~dev->mr_page_mask;
1334 if (state->npages == dev->max_pages_per_mr || offset != 0) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001335 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001336 if (ret)
1337 return ret;
1338
1339 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001340 }
1341
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001342 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
David Dillow8f26c9f2011-01-14 19:45:50 -05001343
1344 if (!state->npages)
1345 state->base_dma_addr = dma_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001346 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
Bart Van Assche52ede082014-05-20 15:07:45 +02001347 state->dma_len += len;
David Dillow8f26c9f2011-01-14 19:45:50 -05001348 dma_addr += len;
1349 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001350 }
1351
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001352 /*
1353 * If the last entry of the MR wasn't a full page, then we need to
David Dillow8f26c9f2011-01-14 19:45:50 -05001354 * close it out and start a new one -- we can only merge at page
1355 * boundries.
1356 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001357 ret = 0;
Bart Van Assche52ede082014-05-20 15:07:45 +02001358 if (len != dev->mr_page_size) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001359 ret = srp_finish_mapping(state, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05001360 if (!ret)
1361 srp_map_update_start(state, NULL, 0, 0);
1362 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001363 return ret;
1364}
1365
Bart Van Assche509c07b2014-10-30 14:48:30 +01001366static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1367 struct srp_request *req, struct scatterlist *scat,
1368 int count)
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001369{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001370 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001371 struct srp_device *dev = target->srp_host->srp_dev;
1372 struct ib_device *ibdev = dev->dev;
1373 struct scatterlist *sg;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001374 int i;
1375 bool use_mr;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001376
1377 state->desc = req->indirect_desc;
1378 state->pages = req->map_page;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001379 if (dev->use_fast_reg) {
1380 state->next_fr = req->fr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001381 use_mr = !!ch->fr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001382 } else {
1383 state->next_fmr = req->fmr_list;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001384 use_mr = !!ch->fmr_pool;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001385 }
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001386
1387 for_each_sg(scat, sg, count, i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001388 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001389 /*
1390 * Memory registration failed, so backtrack to the
1391 * first unmapped entry and continue on without using
1392 * memory registration.
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001393 */
1394 dma_addr_t dma_addr;
1395 unsigned int dma_len;
1396
1397backtrack:
1398 sg = state->unmapped_sg;
1399 i = state->unmapped_index;
1400
1401 dma_addr = ib_sg_dma_address(ibdev, sg);
1402 dma_len = ib_sg_dma_len(ibdev, sg);
1403 dma_len -= (state->unmapped_addr - dma_addr);
1404 dma_addr = state->unmapped_addr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001405 use_mr = false;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001406 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1407 }
1408 }
1409
Bart Van Assche509c07b2014-10-30 14:48:30 +01001410 if (use_mr && srp_finish_mapping(state, ch))
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001411 goto backtrack;
1412
Bart Van Assche52ede082014-05-20 15:07:45 +02001413 req->nmdesc = state->nmdesc;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001414
1415 return 0;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001416}
1417
Bart Van Assche509c07b2014-10-30 14:48:30 +01001418static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001419 struct srp_request *req)
1420{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001421 struct srp_target_port *target = ch->target;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001422 struct scatterlist *scat;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001423 struct srp_cmd *cmd = req->cmd->buf;
Bart Van Assche76bc1e12014-05-20 15:05:21 +02001424 int len, nents, count;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001425 struct srp_device *dev;
1426 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001427 struct srp_map_state state;
1428 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001429 u32 table_len;
1430 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001431
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001432 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001433 return sizeof (struct srp_cmd);
1434
1435 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1436 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001437 shost_printk(KERN_WARNING, target->scsi_host,
1438 PFX "Unhandled data direction %d\n",
1439 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001440 return -EINVAL;
1441 }
1442
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001443 nents = scsi_sg_count(scmnd);
1444 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001445
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001446 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001447 ibdev = dev->dev;
1448
1449 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001450 if (unlikely(count == 0))
1451 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001452
1453 fmt = SRP_DATA_DESC_DIRECT;
1454 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001455
Bart Van Asscheb1b88542014-05-20 15:06:41 +02001456 if (count == 1 && !register_always) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001457 /*
1458 * The midlayer only generated a single gather/scatter
1459 * entry, or DMA mapping coalesced everything to a
1460 * single entry. So a direct descriptor along with
1461 * the DMA MR suffices.
1462 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001463 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001464
Ralph Campbell85507bc2006-12-12 14:30:55 -08001465 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001466 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001467 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001468
Bart Van Assche52ede082014-05-20 15:07:45 +02001469 req->nmdesc = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001470 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001471 }
1472
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001473 /*
1474 * We have more than one scatter/gather entry, so build our indirect
1475 * descriptor table, trying to merge as many entries as we can.
David Dillow8f26c9f2011-01-14 19:45:50 -05001476 */
1477 indirect_hdr = (void *) cmd->add_data;
1478
David Dillowc07d4242011-01-16 13:57:10 -05001479 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1480 target->indirect_size, DMA_TO_DEVICE);
1481
David Dillow8f26c9f2011-01-14 19:45:50 -05001482 memset(&state, 0, sizeof(state));
Bart Van Assche509c07b2014-10-30 14:48:30 +01001483 srp_map_sg(&state, ch, req, scat, count);
David Dillow8f26c9f2011-01-14 19:45:50 -05001484
David Dillowc07d4242011-01-16 13:57:10 -05001485 /* We've mapped the request, now pull as much of the indirect
1486 * descriptor table as we can into the command buffer. If this
1487 * target is not using an external indirect table, we are
1488 * guaranteed to fit into the command, as the SCSI layer won't
1489 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001490 */
David Dillow8f26c9f2011-01-14 19:45:50 -05001491 if (state.ndesc == 1) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001492 /*
1493 * Memory registration collapsed the sg-list into one entry,
David Dillow8f26c9f2011-01-14 19:45:50 -05001494 * so use a direct descriptor.
1495 */
1496 struct srp_direct_buf *buf = (void *) cmd->add_data;
1497
David Dillowc07d4242011-01-16 13:57:10 -05001498 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001499 goto map_complete;
1500 }
1501
David Dillowc07d4242011-01-16 13:57:10 -05001502 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1503 !target->allow_ext_sg)) {
1504 shost_printk(KERN_ERR, target->scsi_host,
1505 "Could not fit S/G list into SRP_CMD\n");
1506 return -EIO;
1507 }
1508
1509 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001510 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1511
1512 fmt = SRP_DATA_DESC_INDIRECT;
1513 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001514 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001515
David Dillowc07d4242011-01-16 13:57:10 -05001516 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1517 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001518
David Dillowc07d4242011-01-16 13:57:10 -05001519 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001520 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1521 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1522 indirect_hdr->len = cpu_to_be32(state.total_len);
1523
1524 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001525 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001526 else
David Dillowc07d4242011-01-16 13:57:10 -05001527 cmd->data_in_desc_cnt = count;
1528
1529 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1530 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001531
1532map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001533 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1534 cmd->buf_fmt = fmt << 4;
1535 else
1536 cmd->buf_fmt = fmt;
1537
Roland Dreieraef9ec32005-11-02 14:07:13 -08001538 return len;
1539}
1540
David Dillow05a1d752010-10-08 14:48:14 -04001541/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001542 * Return an IU and possible credit to the free pool
1543 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001544static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
Bart Van Assche76c75b22010-11-26 14:37:47 -05001545 enum srp_iu_type iu_type)
1546{
1547 unsigned long flags;
1548
Bart Van Assche509c07b2014-10-30 14:48:30 +01001549 spin_lock_irqsave(&ch->lock, flags);
1550 list_add(&iu->list, &ch->free_tx);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001551 if (iu_type != SRP_IU_RSP)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001552 ++ch->req_lim;
1553 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001554}
1555
1556/*
Bart Van Assche509c07b2014-10-30 14:48:30 +01001557 * Must be called with ch->lock held to protect req_lim and free_tx.
Bart Van Asschee9684672010-11-26 15:08:38 -05001558 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001559 *
1560 * Note:
1561 * An upper limit for the number of allocated information units for each
1562 * request type is:
1563 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1564 * more than Scsi_Host.can_queue requests.
1565 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1566 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1567 * one unanswered SRP request to an initiator.
1568 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01001569static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
David Dillow05a1d752010-10-08 14:48:14 -04001570 enum srp_iu_type iu_type)
1571{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001572 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001573 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1574 struct srp_iu *iu;
1575
Bart Van Assche509c07b2014-10-30 14:48:30 +01001576 srp_send_completion(ch->send_cq, ch);
David Dillow05a1d752010-10-08 14:48:14 -04001577
Bart Van Assche509c07b2014-10-30 14:48:30 +01001578 if (list_empty(&ch->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001579 return NULL;
1580
1581 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001582 if (iu_type != SRP_IU_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001583 if (ch->req_lim <= rsv) {
Bart Van Assche76c75b22010-11-26 14:37:47 -05001584 ++target->zero_req_lim;
1585 return NULL;
1586 }
1587
Bart Van Assche509c07b2014-10-30 14:48:30 +01001588 --ch->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001589 }
1590
Bart Van Assche509c07b2014-10-30 14:48:30 +01001591 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001592 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001593 return iu;
1594}
1595
Bart Van Assche509c07b2014-10-30 14:48:30 +01001596static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001597{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001598 struct srp_target_port *target = ch->target;
David Dillow05a1d752010-10-08 14:48:14 -04001599 struct ib_sge list;
1600 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001601
1602 list.addr = iu->dma;
1603 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001604 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001605
1606 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001607 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001608 wr.sg_list = &list;
1609 wr.num_sge = 1;
1610 wr.opcode = IB_WR_SEND;
1611 wr.send_flags = IB_SEND_SIGNALED;
1612
Bart Van Assche509c07b2014-10-30 14:48:30 +01001613 return ib_post_send(ch->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001614}
1615
Bart Van Assche509c07b2014-10-30 14:48:30 +01001616static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001617{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001618 struct srp_target_port *target = ch->target;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001619 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001620 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001621
1622 list.addr = iu->dma;
1623 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001624 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001625
1626 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001627 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001628 wr.sg_list = &list;
1629 wr.num_sge = 1;
1630
Bart Van Assche509c07b2014-10-30 14:48:30 +01001631 return ib_post_recv(ch->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001632}
1633
Bart Van Assche509c07b2014-10-30 14:48:30 +01001634static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001635{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001636 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001637 struct srp_request *req;
1638 struct scsi_cmnd *scmnd;
1639 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001640
Roland Dreieraef9ec32005-11-02 14:07:13 -08001641 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001642 spin_lock_irqsave(&ch->lock, flags);
1643 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1644 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001645
Bart Van Assche509c07b2014-10-30 14:48:30 +01001646 ch->tsk_mgmt_status = -1;
David Dillowf8b6e312010-11-26 13:02:21 -05001647 if (be32_to_cpu(rsp->resp_data_len) >= 4)
Bart Van Assche509c07b2014-10-30 14:48:30 +01001648 ch->tsk_mgmt_status = rsp->data[3];
1649 complete(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001650 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001651 req = &ch->req_ring[rsp->tag];
1652 scmnd = srp_claim_req(ch, req, NULL, NULL);
Bart Van Assche22032992012-08-14 13:18:53 +00001653 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001654 shost_printk(KERN_ERR, target->scsi_host,
1655 "Null scmnd for RSP w/tag %016llx\n",
1656 (unsigned long long) rsp->tag);
Bart Van Assche22032992012-08-14 13:18:53 +00001657
Bart Van Assche509c07b2014-10-30 14:48:30 +01001658 spin_lock_irqsave(&ch->lock, flags);
1659 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1660 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche22032992012-08-14 13:18:53 +00001661
1662 return;
1663 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001664 scmnd->result = rsp->status;
1665
1666 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1667 memcpy(scmnd->sense_buffer, rsp->data +
1668 be32_to_cpu(rsp->resp_data_len),
1669 min_t(int, be32_to_cpu(rsp->sense_data_len),
1670 SCSI_SENSE_BUFFERSIZE));
1671 }
1672
Bart Van Asschee7145312014-07-09 15:57:51 +02001673 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001674 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Bart Van Asschee7145312014-07-09 15:57:51 +02001675 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1676 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1677 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1678 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1679 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1680 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001681
Bart Van Assche509c07b2014-10-30 14:48:30 +01001682 srp_free_req(ch, req, scmnd,
Bart Van Assche22032992012-08-14 13:18:53 +00001683 be32_to_cpu(rsp->req_lim_delta));
1684
David Dillowf8b6e312010-11-26 13:02:21 -05001685 scmnd->host_scribble = NULL;
1686 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001687 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001688}
1689
Bart Van Assche509c07b2014-10-30 14:48:30 +01001690static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
David Dillowbb125882010-10-08 14:40:47 -04001691 void *rsp, int len)
1692{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001693 struct srp_target_port *target = ch->target;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001694 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001695 unsigned long flags;
1696 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001697 int err;
David Dillowbb125882010-10-08 14:40:47 -04001698
Bart Van Assche509c07b2014-10-30 14:48:30 +01001699 spin_lock_irqsave(&ch->lock, flags);
1700 ch->req_lim += req_delta;
1701 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1702 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001703
David Dillowbb125882010-10-08 14:40:47 -04001704 if (!iu) {
1705 shost_printk(KERN_ERR, target->scsi_host, PFX
1706 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001707 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001708 }
1709
1710 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1711 memcpy(iu->buf, rsp, len);
1712 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1713
Bart Van Assche509c07b2014-10-30 14:48:30 +01001714 err = srp_post_send(ch, iu, len);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001715 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001716 shost_printk(KERN_ERR, target->scsi_host, PFX
1717 "unable to post response: %d\n", err);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001718 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001719 }
David Dillowbb125882010-10-08 14:40:47 -04001720
David Dillowbb125882010-10-08 14:40:47 -04001721 return err;
1722}
1723
Bart Van Assche509c07b2014-10-30 14:48:30 +01001724static void srp_process_cred_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001725 struct srp_cred_req *req)
1726{
1727 struct srp_cred_rsp rsp = {
1728 .opcode = SRP_CRED_RSP,
1729 .tag = req->tag,
1730 };
1731 s32 delta = be32_to_cpu(req->req_lim_delta);
1732
Bart Van Assche509c07b2014-10-30 14:48:30 +01001733 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1734 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
David Dillowbb125882010-10-08 14:40:47 -04001735 "problems processing SRP_CRED_REQ\n");
1736}
1737
Bart Van Assche509c07b2014-10-30 14:48:30 +01001738static void srp_process_aer_req(struct srp_rdma_ch *ch,
David Dillowbb125882010-10-08 14:40:47 -04001739 struct srp_aer_req *req)
1740{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001741 struct srp_target_port *target = ch->target;
David Dillowbb125882010-10-08 14:40:47 -04001742 struct srp_aer_rsp rsp = {
1743 .opcode = SRP_AER_RSP,
1744 .tag = req->tag,
1745 };
1746 s32 delta = be32_to_cpu(req->req_lim_delta);
1747
1748 shost_printk(KERN_ERR, target->scsi_host, PFX
1749 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1750
Bart Van Assche509c07b2014-10-30 14:48:30 +01001751 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
David Dillowbb125882010-10-08 14:40:47 -04001752 shost_printk(KERN_ERR, target->scsi_host, PFX
1753 "problems processing SRP_AER_REQ\n");
1754}
1755
Bart Van Assche509c07b2014-10-30 14:48:30 +01001756static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001757{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001758 struct srp_target_port *target = ch->target;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001759 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001760 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001761 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001762 u8 opcode;
1763
Bart Van Assche509c07b2014-10-30 14:48:30 +01001764 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001765 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001766
1767 opcode = *(u8 *) iu->buf;
1768
1769 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001770 shost_printk(KERN_ERR, target->scsi_host,
1771 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001772 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1773 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001774 }
1775
1776 switch (opcode) {
1777 case SRP_RSP:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001778 srp_process_rsp(ch, iu->buf);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001779 break;
1780
David Dillowbb125882010-10-08 14:40:47 -04001781 case SRP_CRED_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001782 srp_process_cred_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001783 break;
1784
1785 case SRP_AER_REQ:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001786 srp_process_aer_req(ch, iu->buf);
David Dillowbb125882010-10-08 14:40:47 -04001787 break;
1788
Roland Dreieraef9ec32005-11-02 14:07:13 -08001789 case SRP_T_LOGOUT:
1790 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001791 shost_printk(KERN_WARNING, target->scsi_host,
1792 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001793 break;
1794
1795 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001796 shost_printk(KERN_WARNING, target->scsi_host,
1797 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001798 break;
1799 }
1800
Bart Van Assche509c07b2014-10-30 14:48:30 +01001801 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001802 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001803
Bart Van Assche509c07b2014-10-30 14:48:30 +01001804 res = srp_post_recv(ch, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001805 if (res != 0)
1806 shost_printk(KERN_ERR, target->scsi_host,
1807 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001808}
1809
Bart Van Asschec1120f82013-10-26 14:35:08 +02001810/**
1811 * srp_tl_err_work() - handle a transport layer error
Bart Van Asscheaf246632014-05-20 15:04:21 +02001812 * @work: Work structure embedded in an SRP target port.
Bart Van Asschec1120f82013-10-26 14:35:08 +02001813 *
1814 * Note: This function may get invoked before the rport has been created,
1815 * hence the target->rport test.
1816 */
1817static void srp_tl_err_work(struct work_struct *work)
1818{
1819 struct srp_target_port *target;
1820
1821 target = container_of(work, struct srp_target_port, tl_err_work);
1822 if (target->rport)
1823 srp_start_tl_fail_timers(target->rport);
1824}
1825
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001826static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1827 bool send_err, struct srp_target_port *target)
Bart Van Assche948d1e82011-09-03 09:25:42 +02001828{
Bart Van Assche294c8752011-12-25 12:18:12 +00001829 if (target->connected && !target->qp_in_error) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02001830 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1831 shost_printk(KERN_ERR, target->scsi_host, PFX
1832 "LOCAL_INV failed with status %d\n",
1833 wc_status);
1834 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1835 shost_printk(KERN_ERR, target->scsi_host, PFX
1836 "FAST_REG_MR failed status %d\n",
1837 wc_status);
1838 } else {
1839 shost_printk(KERN_ERR, target->scsi_host,
1840 PFX "failed %s status %d for iu %p\n",
1841 send_err ? "send" : "receive",
1842 wc_status, (void *)(uintptr_t)wr_id);
1843 }
Bart Van Asschec1120f82013-10-26 14:35:08 +02001844 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001845 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001846 target->qp_in_error = true;
1847}
1848
Bart Van Assche509c07b2014-10-30 14:48:30 +01001849static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001850{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001851 struct srp_rdma_ch *ch = ch_ptr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001852 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001853
1854 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1855 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001856 if (likely(wc.status == IB_WC_SUCCESS)) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001857 srp_handle_recv(ch, &wc);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001858 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001859 srp_handle_qp_err(wc.wr_id, wc.status, false,
1860 ch->target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001861 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001862 }
1863}
1864
Bart Van Assche509c07b2014-10-30 14:48:30 +01001865static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001866{
Bart Van Assche509c07b2014-10-30 14:48:30 +01001867 struct srp_rdma_ch *ch = ch_ptr;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001868 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001869 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001870
1871 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001872 if (likely(wc.status == IB_WC_SUCCESS)) {
1873 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001874 list_add(&iu->list, &ch->free_tx);
Bart Van Assche948d1e82011-09-03 09:25:42 +02001875 } else {
Bart Van Assche509c07b2014-10-30 14:48:30 +01001876 srp_handle_qp_err(wc.wr_id, wc.status, true,
1877 ch->target);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001878 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001879 }
1880}
1881
Bart Van Assche76c75b22010-11-26 14:37:47 -05001882static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001883{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001884 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001885 struct srp_rport *rport = target->rport;
Bart Van Assche509c07b2014-10-30 14:48:30 +01001886 struct srp_rdma_ch *ch;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001887 struct srp_request *req;
1888 struct srp_iu *iu;
1889 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001890 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001891 unsigned long flags;
Bart Van Assched1b42892014-05-20 15:07:20 +02001892 int len, ret;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001893 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1894
1895 /*
1896 * The SCSI EH thread is the only context from which srp_queuecommand()
1897 * can get invoked for blocked devices (SDEV_BLOCK /
1898 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1899 * locking the rport mutex if invoked from inside the SCSI EH.
1900 */
1901 if (in_scsi_eh)
1902 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001903
Bart Van Assched1b42892014-05-20 15:07:20 +02001904 scmnd->result = srp_chkready(target->rport);
1905 if (unlikely(scmnd->result))
1906 goto err;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001907
Bart Van Assche509c07b2014-10-30 14:48:30 +01001908 ch = &target->ch;
1909
1910 spin_lock_irqsave(&ch->lock, flags);
1911 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001912 if (!iu)
Bart Van Assche695b8342011-01-13 19:02:25 +00001913 goto err_unlock;
1914
Bart Van Assche509c07b2014-10-30 14:48:30 +01001915 req = list_first_entry(&ch->free_reqs, struct srp_request, list);
Bart Van Assche695b8342011-01-13 19:02:25 +00001916 list_del(&req->list);
Bart Van Assche509c07b2014-10-30 14:48:30 +01001917 spin_unlock_irqrestore(&ch->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001918
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001919 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05001920 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001921 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001922
David Dillowf8b6e312010-11-26 13:02:21 -05001923 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001924
1925 cmd = iu->buf;
1926 memset(cmd, 0, sizeof *cmd);
1927
1928 cmd->opcode = SRP_CMD;
1929 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001930 cmd->tag = req->index;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001931 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1932
Roland Dreieraef9ec32005-11-02 14:07:13 -08001933 req->scmnd = scmnd;
1934 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001935
Bart Van Assche509c07b2014-10-30 14:48:30 +01001936 len = srp_map_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001937 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001938 shost_printk(KERN_ERR, target->scsi_host,
Bart Van Assched1b42892014-05-20 15:07:20 +02001939 PFX "Failed to map data (%d)\n", len);
1940 /*
1941 * If we ran out of memory descriptors (-ENOMEM) because an
1942 * application is queuing many requests with more than
Bart Van Assche52ede082014-05-20 15:07:45 +02001943 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
Bart Van Assched1b42892014-05-20 15:07:20 +02001944 * to reduce queue depth temporarily.
1945 */
1946 scmnd->result = len == -ENOMEM ?
1947 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001948 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001949 }
1950
David Dillow49248642011-01-14 18:23:24 -05001951 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001952 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001953
Bart Van Assche509c07b2014-10-30 14:48:30 +01001954 if (srp_post_send(ch, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001955 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001956 goto err_unmap;
1957 }
1958
Bart Van Assched1b42892014-05-20 15:07:20 +02001959 ret = 0;
1960
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001961unlock_rport:
1962 if (in_scsi_eh)
1963 mutex_unlock(&rport->mutex);
1964
Bart Van Assched1b42892014-05-20 15:07:20 +02001965 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001966
1967err_unmap:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001968 srp_unmap_data(scmnd, ch, req);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001969
Bart Van Assche76c75b22010-11-26 14:37:47 -05001970err_iu:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001971 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001972
Bart Van Assche024ca902014-05-20 15:03:49 +02001973 /*
1974 * Avoid that the loops that iterate over the request ring can
1975 * encounter a dangling SCSI command pointer.
1976 */
1977 req->scmnd = NULL;
1978
Bart Van Assche509c07b2014-10-30 14:48:30 +01001979 spin_lock_irqsave(&ch->lock, flags);
1980 list_add(&req->list, &ch->free_reqs);
Bart Van Assche695b8342011-01-13 19:02:25 +00001981
1982err_unlock:
Bart Van Assche509c07b2014-10-30 14:48:30 +01001983 spin_unlock_irqrestore(&ch->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001984
Bart Van Assched1b42892014-05-20 15:07:20 +02001985err:
1986 if (scmnd->result) {
1987 scmnd->scsi_done(scmnd);
1988 ret = 0;
1989 } else {
1990 ret = SCSI_MLQUEUE_HOST_BUSY;
1991 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001992
Bart Van Assched1b42892014-05-20 15:07:20 +02001993 goto unlock_rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001994}
1995
Bart Van Assche4d73f952013-10-26 14:40:37 +02001996/*
1997 * Note: the resources allocated in this function are freed in
Bart Van Assche509c07b2014-10-30 14:48:30 +01001998 * srp_free_ch_ib().
Bart Van Assche4d73f952013-10-26 14:40:37 +02001999 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002000static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002001{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002002 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002003 int i;
2004
Bart Van Assche509c07b2014-10-30 14:48:30 +01002005 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2006 GFP_KERNEL);
2007 if (!ch->rx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002008 goto err_no_ring;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002009 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2010 GFP_KERNEL);
2011 if (!ch->tx_ring)
Bart Van Assche4d73f952013-10-26 14:40:37 +02002012 goto err_no_ring;
2013
2014 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002015 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2016 ch->max_ti_iu_len,
2017 GFP_KERNEL, DMA_FROM_DEVICE);
2018 if (!ch->rx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002019 goto err;
2020 }
2021
Bart Van Assche4d73f952013-10-26 14:40:37 +02002022 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002023 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2024 target->max_iu_len,
2025 GFP_KERNEL, DMA_TO_DEVICE);
2026 if (!ch->tx_ring[i])
Roland Dreieraef9ec32005-11-02 14:07:13 -08002027 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002028
Bart Van Assche509c07b2014-10-30 14:48:30 +01002029 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002030 }
2031
2032 return 0;
2033
2034err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002035 for (i = 0; i < target->queue_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002036 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2037 srp_free_iu(target->srp_host, ch->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002038 }
2039
Bart Van Assche4d73f952013-10-26 14:40:37 +02002040
2041err_no_ring:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002042 kfree(ch->tx_ring);
2043 ch->tx_ring = NULL;
2044 kfree(ch->rx_ring);
2045 ch->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002046
2047 return -ENOMEM;
2048}
2049
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002050static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2051{
2052 uint64_t T_tr_ns, max_compl_time_ms;
2053 uint32_t rq_tmo_jiffies;
2054
2055 /*
2056 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2057 * table 91), both the QP timeout and the retry count have to be set
2058 * for RC QP's during the RTR to RTS transition.
2059 */
2060 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2061 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2062
2063 /*
2064 * Set target->rq_tmo_jiffies to one second more than the largest time
2065 * it can take before an error completion is generated. See also
2066 * C9-140..142 in the IBTA spec for more information about how to
2067 * convert the QP Local ACK Timeout value to nanoseconds.
2068 */
2069 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2070 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2071 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2072 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2073
2074 return rq_tmo_jiffies;
2075}
2076
David Dillow961e0be2011-01-14 17:32:07 -05002077static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2078 struct srp_login_rsp *lrsp,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002079 struct srp_rdma_ch *ch)
David Dillow961e0be2011-01-14 17:32:07 -05002080{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002081 struct srp_target_port *target = ch->target;
David Dillow961e0be2011-01-14 17:32:07 -05002082 struct ib_qp_attr *qp_attr = NULL;
2083 int attr_mask = 0;
2084 int ret;
2085 int i;
2086
2087 if (lrsp->opcode == SRP_LOGIN_RSP) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002088 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2089 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
David Dillow961e0be2011-01-14 17:32:07 -05002090
2091 /*
2092 * Reserve credits for task management so we don't
2093 * bounce requests back to the SCSI mid-layer.
2094 */
2095 target->scsi_host->can_queue
Bart Van Assche509c07b2014-10-30 14:48:30 +01002096 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
David Dillow961e0be2011-01-14 17:32:07 -05002097 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02002098 target->scsi_host->cmd_per_lun
2099 = min_t(int, target->scsi_host->can_queue,
2100 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05002101 } else {
2102 shost_printk(KERN_WARNING, target->scsi_host,
2103 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2104 ret = -ECONNRESET;
2105 goto error;
2106 }
2107
Bart Van Assche509c07b2014-10-30 14:48:30 +01002108 if (!ch->rx_ring) {
2109 ret = srp_alloc_iu_bufs(ch);
David Dillow961e0be2011-01-14 17:32:07 -05002110 if (ret)
2111 goto error;
2112 }
2113
2114 ret = -ENOMEM;
2115 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2116 if (!qp_attr)
2117 goto error;
2118
2119 qp_attr->qp_state = IB_QPS_RTR;
2120 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2121 if (ret)
2122 goto error_free;
2123
Bart Van Assche509c07b2014-10-30 14:48:30 +01002124 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002125 if (ret)
2126 goto error_free;
2127
Bart Van Assche4d73f952013-10-26 14:40:37 +02002128 for (i = 0; i < target->queue_size; i++) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002129 struct srp_iu *iu = ch->rx_ring[i];
2130
2131 ret = srp_post_recv(ch, iu);
David Dillow961e0be2011-01-14 17:32:07 -05002132 if (ret)
2133 goto error_free;
2134 }
2135
2136 qp_attr->qp_state = IB_QPS_RTS;
2137 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2138 if (ret)
2139 goto error_free;
2140
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002141 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2142
Bart Van Assche509c07b2014-10-30 14:48:30 +01002143 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
David Dillow961e0be2011-01-14 17:32:07 -05002144 if (ret)
2145 goto error_free;
2146
2147 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2148
2149error_free:
2150 kfree(qp_attr);
2151
2152error:
Bart Van Assche509c07b2014-10-30 14:48:30 +01002153 ch->status = ret;
David Dillow961e0be2011-01-14 17:32:07 -05002154}
2155
Roland Dreieraef9ec32005-11-02 14:07:13 -08002156static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2157 struct ib_cm_event *event,
Bart Van Assche509c07b2014-10-30 14:48:30 +01002158 struct srp_rdma_ch *ch)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002159{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002160 struct srp_target_port *target = ch->target;
David Dillow7aa54bd2008-01-07 18:23:41 -05002161 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002162 struct ib_class_port_info *cpi;
2163 int opcode;
2164
2165 switch (event->param.rej_rcvd.reason) {
2166 case IB_CM_REJ_PORT_CM_REDIRECT:
2167 cpi = event->param.rej_rcvd.ari;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002168 ch->path.dlid = cpi->redirect_lid;
2169 ch->path.pkey = cpi->redirect_pkey;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002170 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002171 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002172
Bart Van Assche509c07b2014-10-30 14:48:30 +01002173 ch->status = ch->path.dlid ?
Roland Dreieraef9ec32005-11-02 14:07:13 -08002174 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2175 break;
2176
2177 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07002178 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002179 /*
2180 * Topspin/Cisco SRP gateways incorrectly send
2181 * reject reason code 25 when they mean 24
2182 * (port redirect).
2183 */
Bart Van Assche509c07b2014-10-30 14:48:30 +01002184 memcpy(ch->path.dgid.raw,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002185 event->param.rej_rcvd.ari, 16);
2186
David Dillow7aa54bd2008-01-07 18:23:41 -05002187 shost_printk(KERN_DEBUG, shost,
2188 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
Bart Van Assche509c07b2014-10-30 14:48:30 +01002189 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2190 be64_to_cpu(ch->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08002191
Bart Van Assche509c07b2014-10-30 14:48:30 +01002192 ch->status = SRP_PORT_REDIRECT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002193 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05002194 shost_printk(KERN_WARNING, shost,
2195 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002196 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002197 }
2198 break;
2199
2200 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05002201 shost_printk(KERN_WARNING, shost,
2202 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002203 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002204 break;
2205
2206 case IB_CM_REJ_CONSUMER_DEFINED:
2207 opcode = *(u8 *) event->private_data;
2208 if (opcode == SRP_LOGIN_REJ) {
2209 struct srp_login_rej *rej = event->private_data;
2210 u32 reason = be32_to_cpu(rej->reason);
2211
2212 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05002213 shost_printk(KERN_WARNING, shost,
2214 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002215 else
Bart Van Asschee7ffde02014-03-14 13:52:21 +01002216 shost_printk(KERN_WARNING, shost, PFX
2217 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
Bart Van Assche747fe002014-10-30 14:48:05 +01002218 target->sgid.raw,
2219 target->orig_dgid.raw, reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002220 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05002221 shost_printk(KERN_WARNING, shost,
2222 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2223 " opcode 0x%02x\n", opcode);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002224 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002225 break;
2226
David Dillow9fe4bcf2008-01-08 17:08:52 -05002227 case IB_CM_REJ_STALE_CONN:
2228 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
Bart Van Assche509c07b2014-10-30 14:48:30 +01002229 ch->status = SRP_STALE_CONN;
David Dillow9fe4bcf2008-01-08 17:08:52 -05002230 break;
2231
Roland Dreieraef9ec32005-11-02 14:07:13 -08002232 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002233 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2234 event->param.rej_rcvd.reason);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002235 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002236 }
2237}
2238
2239static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2240{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002241 struct srp_rdma_ch *ch = cm_id->context;
2242 struct srp_target_port *target = ch->target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002243 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002244
2245 switch (event->event) {
2246 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05002247 shost_printk(KERN_DEBUG, target->scsi_host,
2248 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002249 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002250 ch->status = -ECONNRESET;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002251 break;
2252
2253 case IB_CM_REP_RECEIVED:
2254 comp = 1;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002255 srp_cm_rep_handler(cm_id, event->private_data, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002256 break;
2257
2258 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002259 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002260 comp = 1;
2261
Bart Van Assche509c07b2014-10-30 14:48:30 +01002262 srp_cm_rej_handler(cm_id, event, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002263 break;
2264
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002265 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05002266 shost_printk(KERN_WARNING, target->scsi_host,
2267 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00002268 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002269 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05002270 shost_printk(KERN_ERR, target->scsi_host,
2271 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02002272 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002273 break;
2274
2275 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05002276 shost_printk(KERN_ERR, target->scsi_host,
2277 PFX "connection closed\n");
Bart Van Asscheac72d762014-03-14 13:53:40 +01002278 comp = 1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002279
Bart Van Assche509c07b2014-10-30 14:48:30 +01002280 ch->status = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002281 break;
2282
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07002283 case IB_CM_MRA_RECEIVED:
2284 case IB_CM_DREQ_ERROR:
2285 case IB_CM_DREP_RECEIVED:
2286 break;
2287
Roland Dreieraef9ec32005-11-02 14:07:13 -08002288 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05002289 shost_printk(KERN_WARNING, target->scsi_host,
2290 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002291 break;
2292 }
2293
2294 if (comp)
Bart Van Assche509c07b2014-10-30 14:48:30 +01002295 complete(&ch->done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002296
Roland Dreieraef9ec32005-11-02 14:07:13 -08002297 return 0;
2298}
2299
Jack Wang71444b92013-11-07 11:37:37 +01002300/**
Jack Wang71444b92013-11-07 11:37:37 +01002301 * srp_change_queue_depth - setting device queue depth
2302 * @sdev: scsi device struct
2303 * @qdepth: requested queue depth
2304 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
2305 * (see include/scsi/scsi_host.h for definition)
2306 *
2307 * Returns queue depth.
2308 */
2309static int
2310srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2311{
2312 struct Scsi_Host *shost = sdev->host;
2313 int max_depth;
2314 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
2315 max_depth = shost->can_queue;
2316 if (!sdev->tagged_supported)
2317 max_depth = 1;
2318 if (qdepth > max_depth)
2319 qdepth = max_depth;
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01002320 scsi_adjust_queue_depth(sdev, qdepth);
Jack Wang71444b92013-11-07 11:37:37 +01002321 } else if (reason == SCSI_QDEPTH_QFULL)
2322 scsi_track_queue_full(sdev, qdepth);
2323 else
2324 return -EOPNOTSUPP;
2325
2326 return sdev->queue_depth;
2327}
2328
Bart Van Assche509c07b2014-10-30 14:48:30 +01002329static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
2330 unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002331{
Bart Van Assche509c07b2014-10-30 14:48:30 +01002332 struct srp_target_port *target = ch->target;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002333 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04002334 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002335 struct srp_iu *iu;
2336 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002337
Bart Van Assche3780d1f2013-02-21 17:18:00 +00002338 if (!target->connected || target->qp_in_error)
2339 return -1;
2340
Bart Van Assche509c07b2014-10-30 14:48:30 +01002341 init_completion(&ch->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002342
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002343 /*
Bart Van Assche509c07b2014-10-30 14:48:30 +01002344 * Lock the rport mutex to avoid that srp_create_ch_ib() is
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002345 * invoked while a task management function is being sent.
2346 */
2347 mutex_lock(&rport->mutex);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002348 spin_lock_irq(&ch->lock);
2349 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2350 spin_unlock_irq(&ch->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05002351
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002352 if (!iu) {
2353 mutex_unlock(&rport->mutex);
2354
Bart Van Assche76c75b22010-11-26 14:37:47 -05002355 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002356 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002357
David Dillow19081f32010-10-18 08:54:49 -04002358 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2359 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002360 tsk_mgmt = iu->buf;
2361 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2362
2363 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05002364 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
2365 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002366 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05002367 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002368
David Dillow19081f32010-10-18 08:54:49 -04002369 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2370 DMA_TO_DEVICE);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002371 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2372 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002373 mutex_unlock(&rport->mutex);
2374
Bart Van Assche76c75b22010-11-26 14:37:47 -05002375 return -1;
2376 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002377 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002378
Bart Van Assche509c07b2014-10-30 14:48:30 +01002379 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002380 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002381 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002382
Roland Dreierd945e1d2006-05-09 10:50:28 -07002383 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002384}
2385
Roland Dreieraef9ec32005-11-02 14:07:13 -08002386static int srp_abort(struct scsi_cmnd *scmnd)
2387{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002388 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05002389 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002390 struct srp_rdma_ch *ch;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002391 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002392
David Dillow7aa54bd2008-01-07 18:23:41 -05002393 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002394
Bart Van Assche509c07b2014-10-30 14:48:30 +01002395 ch = &target->ch;
2396 if (!req || !srp_claim_req(ch, req, NULL, scmnd))
Bart Van Assche99b66972013-10-10 13:52:33 +02002397 return SUCCESS;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002398 if (srp_send_tsk_mgmt(ch, req->index, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002399 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002400 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002401 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002402 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002403 else
2404 ret = FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002405 srp_free_req(ch, req, scmnd, 0);
Bart Van Assche22032992012-08-14 13:18:53 +00002406 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002407 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002408
Bart Van Assche086f44f2013-06-12 15:23:04 +02002409 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002410}
2411
2412static int srp_reset_device(struct scsi_cmnd *scmnd)
2413{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002414 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assche509c07b2014-10-30 14:48:30 +01002415 struct srp_rdma_ch *ch = &target->ch;
Bart Van Assche536ae142010-11-26 13:58:27 -05002416 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002417
David Dillow7aa54bd2008-01-07 18:23:41 -05002418 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002419
Bart Van Assche509c07b2014-10-30 14:48:30 +01002420 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
David Dillowf8b6e312010-11-26 13:02:21 -05002421 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002422 return FAILED;
Bart Van Assche509c07b2014-10-30 14:48:30 +01002423 if (ch->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002424 return FAILED;
2425
Bart Van Assche4d73f952013-10-26 14:40:37 +02002426 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche509c07b2014-10-30 14:48:30 +01002427 struct srp_request *req = &ch->req_ring[i];
2428
2429 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -05002430 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002431
Roland Dreierd945e1d2006-05-09 10:50:28 -07002432 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002433}
2434
2435static int srp_reset_host(struct scsi_cmnd *scmnd)
2436{
2437 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002438
David Dillow7aa54bd2008-01-07 18:23:41 -05002439 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002440
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002441 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002442}
2443
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002444static int srp_slave_configure(struct scsi_device *sdev)
2445{
2446 struct Scsi_Host *shost = sdev->host;
2447 struct srp_target_port *target = host_to_target(shost);
2448 struct request_queue *q = sdev->request_queue;
2449 unsigned long timeout;
2450
2451 if (sdev->type == TYPE_DISK) {
2452 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2453 blk_queue_rq_timeout(q, timeout);
2454 }
2455
2456 return 0;
2457}
2458
Tony Jonesee959b02008-02-22 00:13:36 +01002459static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2460 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002461{
Tony Jonesee959b02008-02-22 00:13:36 +01002462 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002463
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002464 return sprintf(buf, "0x%016llx\n",
2465 (unsigned long long) be64_to_cpu(target->id_ext));
2466}
2467
Tony Jonesee959b02008-02-22 00:13:36 +01002468static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2469 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002470{
Tony Jonesee959b02008-02-22 00:13:36 +01002471 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002472
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002473 return sprintf(buf, "0x%016llx\n",
2474 (unsigned long long) be64_to_cpu(target->ioc_guid));
2475}
2476
Tony Jonesee959b02008-02-22 00:13:36 +01002477static ssize_t show_service_id(struct device *dev,
2478 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002479{
Tony Jonesee959b02008-02-22 00:13:36 +01002480 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002481
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002482 return sprintf(buf, "0x%016llx\n",
2483 (unsigned long long) be64_to_cpu(target->service_id));
2484}
2485
Tony Jonesee959b02008-02-22 00:13:36 +01002486static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2487 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002488{
Tony Jonesee959b02008-02-22 00:13:36 +01002489 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002490
Bart Van Assche747fe002014-10-30 14:48:05 +01002491 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002492}
2493
Bart Van Assche848b3082013-10-26 14:38:12 +02002494static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2495 char *buf)
2496{
2497 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2498
Bart Van Assche747fe002014-10-30 14:48:05 +01002499 return sprintf(buf, "%pI6\n", target->sgid.raw);
Bart Van Assche848b3082013-10-26 14:38:12 +02002500}
2501
Tony Jonesee959b02008-02-22 00:13:36 +01002502static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2503 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002504{
Tony Jonesee959b02008-02-22 00:13:36 +01002505 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Bart Van Assche509c07b2014-10-30 14:48:30 +01002506 struct srp_rdma_ch *ch = &target->ch;
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002507
Bart Van Assche509c07b2014-10-30 14:48:30 +01002508 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002509}
2510
Tony Jonesee959b02008-02-22 00:13:36 +01002511static ssize_t show_orig_dgid(struct device *dev,
2512 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002513{
Tony Jonesee959b02008-02-22 00:13:36 +01002514 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002515
Bart Van Assche747fe002014-10-30 14:48:05 +01002516 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002517}
2518
Bart Van Assche89de7482010-08-03 14:08:45 +00002519static ssize_t show_req_lim(struct device *dev,
2520 struct device_attribute *attr, char *buf)
2521{
2522 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2523
Bart Van Assche509c07b2014-10-30 14:48:30 +01002524 return sprintf(buf, "%d\n", target->ch.req_lim);
Bart Van Assche89de7482010-08-03 14:08:45 +00002525}
2526
Tony Jonesee959b02008-02-22 00:13:36 +01002527static ssize_t show_zero_req_lim(struct device *dev,
2528 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002529{
Tony Jonesee959b02008-02-22 00:13:36 +01002530 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002531
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002532 return sprintf(buf, "%d\n", target->zero_req_lim);
2533}
2534
Tony Jonesee959b02008-02-22 00:13:36 +01002535static ssize_t show_local_ib_port(struct device *dev,
2536 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002537{
Tony Jonesee959b02008-02-22 00:13:36 +01002538 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002539
2540 return sprintf(buf, "%d\n", target->srp_host->port);
2541}
2542
Tony Jonesee959b02008-02-22 00:13:36 +01002543static ssize_t show_local_ib_device(struct device *dev,
2544 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002545{
Tony Jonesee959b02008-02-22 00:13:36 +01002546 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002547
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002548 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002549}
2550
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002551static ssize_t show_comp_vector(struct device *dev,
2552 struct device_attribute *attr, char *buf)
2553{
2554 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2555
2556 return sprintf(buf, "%d\n", target->comp_vector);
2557}
2558
Vu Pham7bb312e2013-10-26 14:31:27 +02002559static ssize_t show_tl_retry_count(struct device *dev,
2560 struct device_attribute *attr, char *buf)
2561{
2562 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2563
2564 return sprintf(buf, "%d\n", target->tl_retry_count);
2565}
2566
David Dillow49248642011-01-14 18:23:24 -05002567static ssize_t show_cmd_sg_entries(struct device *dev,
2568 struct device_attribute *attr, char *buf)
2569{
2570 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2571
2572 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2573}
2574
David Dillowc07d4242011-01-16 13:57:10 -05002575static ssize_t show_allow_ext_sg(struct device *dev,
2576 struct device_attribute *attr, char *buf)
2577{
2578 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2579
2580 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2581}
2582
Tony Jonesee959b02008-02-22 00:13:36 +01002583static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2584static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2585static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2586static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002587static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002588static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2589static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002590static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002591static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2592static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2593static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002594static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002595static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002596static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002597static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002598
Tony Jonesee959b02008-02-22 00:13:36 +01002599static struct device_attribute *srp_host_attrs[] = {
2600 &dev_attr_id_ext,
2601 &dev_attr_ioc_guid,
2602 &dev_attr_service_id,
2603 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002604 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002605 &dev_attr_dgid,
2606 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002607 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002608 &dev_attr_zero_req_lim,
2609 &dev_attr_local_ib_port,
2610 &dev_attr_local_ib_device,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002611 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002612 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002613 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002614 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002615 NULL
2616};
2617
Roland Dreieraef9ec32005-11-02 14:07:13 -08002618static struct scsi_host_template srp_template = {
2619 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002620 .name = "InfiniBand SRP initiator",
2621 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002622 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002623 .info = srp_target_info,
2624 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002625 .change_queue_depth = srp_change_queue_depth,
Christoph Hellwiga62182f2014-10-02 14:39:55 +02002626 .change_queue_type = scsi_change_queue_type,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002627 .eh_abort_handler = srp_abort,
2628 .eh_device_reset_handler = srp_reset_device,
2629 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002630 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002631 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002632 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002633 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002634 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002635 .use_clustering = ENABLE_CLUSTERING,
2636 .shost_attrs = srp_host_attrs
Roland Dreieraef9ec32005-11-02 14:07:13 -08002637};
2638
Bart Van Assche34aa6542014-10-30 14:47:22 +01002639static int srp_sdev_count(struct Scsi_Host *host)
2640{
2641 struct scsi_device *sdev;
2642 int c = 0;
2643
2644 shost_for_each_device(sdev, host)
2645 c++;
2646
2647 return c;
2648}
2649
Roland Dreieraef9ec32005-11-02 14:07:13 -08002650static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2651{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002652 struct srp_rport_identifiers ids;
2653 struct srp_rport *rport;
2654
Bart Van Assche34aa6542014-10-30 14:47:22 +01002655 target->state = SRP_TARGET_SCANNING;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002656 sprintf(target->target_name, "SRP.T10:%016llX",
2657 (unsigned long long) be64_to_cpu(target->id_ext));
2658
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002659 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002660 return -ENODEV;
2661
FUJITA Tomonori32368222007-06-27 16:33:12 +09002662 memcpy(ids.port_id, &target->id_ext, 8);
2663 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002664 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002665 rport = srp_rport_add(target->scsi_host, &ids);
2666 if (IS_ERR(rport)) {
2667 scsi_remove_host(target->scsi_host);
2668 return PTR_ERR(rport);
2669 }
2670
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002671 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002672 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002673
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002674 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002675 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002676 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002677
Roland Dreieraef9ec32005-11-02 14:07:13 -08002678 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002679 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002680
Bart Van Assche34aa6542014-10-30 14:47:22 +01002681 if (!target->connected || target->qp_in_error) {
2682 shost_printk(KERN_INFO, target->scsi_host,
2683 PFX "SCSI scan failed - removing SCSI host\n");
2684 srp_queue_remove_work(target);
2685 goto out;
2686 }
2687
2688 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2689 dev_name(&target->scsi_host->shost_gendev),
2690 srp_sdev_count(target->scsi_host));
2691
2692 spin_lock_irq(&target->lock);
2693 if (target->state == SRP_TARGET_SCANNING)
2694 target->state = SRP_TARGET_LIVE;
2695 spin_unlock_irq(&target->lock);
2696
2697out:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002698 return 0;
2699}
2700
Tony Jonesee959b02008-02-22 00:13:36 +01002701static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002702{
2703 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002704 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002705
2706 complete(&host->released);
2707}
2708
2709static struct class srp_class = {
2710 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002711 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002712};
2713
Bart Van Assche96fc2482013-06-28 14:51:26 +02002714/**
2715 * srp_conn_unique() - check whether the connection to a target is unique
Bart Van Asscheaf246632014-05-20 15:04:21 +02002716 * @host: SRP host.
2717 * @target: SRP target port.
Bart Van Assche96fc2482013-06-28 14:51:26 +02002718 */
2719static bool srp_conn_unique(struct srp_host *host,
2720 struct srp_target_port *target)
2721{
2722 struct srp_target_port *t;
2723 bool ret = false;
2724
2725 if (target->state == SRP_TARGET_REMOVED)
2726 goto out;
2727
2728 ret = true;
2729
2730 spin_lock(&host->target_lock);
2731 list_for_each_entry(t, &host->target_list, list) {
2732 if (t != target &&
2733 target->id_ext == t->id_ext &&
2734 target->ioc_guid == t->ioc_guid &&
2735 target->initiator_ext == t->initiator_ext) {
2736 ret = false;
2737 break;
2738 }
2739 }
2740 spin_unlock(&host->target_lock);
2741
2742out:
2743 return ret;
2744}
2745
Roland Dreieraef9ec32005-11-02 14:07:13 -08002746/*
2747 * Target ports are added by writing
2748 *
2749 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2750 * pkey=<P_Key>,service_id=<service ID>
2751 *
2752 * to the add_target sysfs attribute.
2753 */
2754enum {
2755 SRP_OPT_ERR = 0,
2756 SRP_OPT_ID_EXT = 1 << 0,
2757 SRP_OPT_IOC_GUID = 1 << 1,
2758 SRP_OPT_DGID = 1 << 2,
2759 SRP_OPT_PKEY = 1 << 3,
2760 SRP_OPT_SERVICE_ID = 1 << 4,
2761 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002762 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002763 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002764 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002765 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002766 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2767 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002768 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002769 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002770 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002771 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2772 SRP_OPT_IOC_GUID |
2773 SRP_OPT_DGID |
2774 SRP_OPT_PKEY |
2775 SRP_OPT_SERVICE_ID),
2776};
2777
Steven Whitehousea447c092008-10-13 10:46:57 +01002778static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002779 { SRP_OPT_ID_EXT, "id_ext=%s" },
2780 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2781 { SRP_OPT_DGID, "dgid=%s" },
2782 { SRP_OPT_PKEY, "pkey=%x" },
2783 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2784 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2785 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002786 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002787 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002788 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002789 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2790 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002791 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002792 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002793 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002794 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002795};
2796
2797static int srp_parse_options(const char *buf, struct srp_target_port *target)
2798{
2799 char *options, *sep_opt;
2800 char *p;
2801 char dgid[3];
2802 substring_t args[MAX_OPT_ARGS];
2803 int opt_mask = 0;
2804 int token;
2805 int ret = -EINVAL;
2806 int i;
2807
2808 options = kstrdup(buf, GFP_KERNEL);
2809 if (!options)
2810 return -ENOMEM;
2811
2812 sep_opt = options;
2813 while ((p = strsep(&sep_opt, ",")) != NULL) {
2814 if (!*p)
2815 continue;
2816
2817 token = match_token(p, srp_opt_tokens, args);
2818 opt_mask |= token;
2819
2820 switch (token) {
2821 case SRP_OPT_ID_EXT:
2822 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002823 if (!p) {
2824 ret = -ENOMEM;
2825 goto out;
2826 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002827 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2828 kfree(p);
2829 break;
2830
2831 case SRP_OPT_IOC_GUID:
2832 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002833 if (!p) {
2834 ret = -ENOMEM;
2835 goto out;
2836 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002837 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2838 kfree(p);
2839 break;
2840
2841 case SRP_OPT_DGID:
2842 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002843 if (!p) {
2844 ret = -ENOMEM;
2845 goto out;
2846 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002847 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002848 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002849 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002850 goto out;
2851 }
2852
2853 for (i = 0; i < 16; ++i) {
Bart Van Assche747fe002014-10-30 14:48:05 +01002854 strlcpy(dgid, p + i * 2, sizeof(dgid));
2855 if (sscanf(dgid, "%hhx",
2856 &target->orig_dgid.raw[i]) < 1) {
2857 ret = -EINVAL;
2858 kfree(p);
2859 goto out;
2860 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002861 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002862 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002863 break;
2864
2865 case SRP_OPT_PKEY:
2866 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002867 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002868 goto out;
2869 }
Bart Van Assche747fe002014-10-30 14:48:05 +01002870 target->pkey = cpu_to_be16(token);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002871 break;
2872
2873 case SRP_OPT_SERVICE_ID:
2874 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002875 if (!p) {
2876 ret = -ENOMEM;
2877 goto out;
2878 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002879 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2880 kfree(p);
2881 break;
2882
2883 case SRP_OPT_MAX_SECT:
2884 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002885 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002886 goto out;
2887 }
2888 target->scsi_host->max_sectors = token;
2889 break;
2890
Bart Van Assche4d73f952013-10-26 14:40:37 +02002891 case SRP_OPT_QUEUE_SIZE:
2892 if (match_int(args, &token) || token < 1) {
2893 pr_warn("bad queue_size parameter '%s'\n", p);
2894 goto out;
2895 }
2896 target->scsi_host->can_queue = token;
2897 target->queue_size = token + SRP_RSP_SQ_SIZE +
2898 SRP_TSK_MGMT_SQ_SIZE;
2899 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2900 target->scsi_host->cmd_per_lun = token;
2901 break;
2902
Vu Pham52fb2b502006-06-17 20:37:31 -07002903 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002904 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002905 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2906 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07002907 goto out;
2908 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02002909 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07002910 break;
2911
Ramachandra K0c0450db2006-06-17 20:37:38 -07002912 case SRP_OPT_IO_CLASS:
2913 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002914 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002915 goto out;
2916 }
2917 if (token != SRP_REV10_IB_IO_CLASS &&
2918 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002919 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2920 token, SRP_REV10_IB_IO_CLASS,
2921 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002922 goto out;
2923 }
2924 target->io_class = token;
2925 break;
2926
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002927 case SRP_OPT_INITIATOR_EXT:
2928 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002929 if (!p) {
2930 ret = -ENOMEM;
2931 goto out;
2932 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002933 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2934 kfree(p);
2935 break;
2936
David Dillow49248642011-01-14 18:23:24 -05002937 case SRP_OPT_CMD_SG_ENTRIES:
2938 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002939 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2940 p);
David Dillow49248642011-01-14 18:23:24 -05002941 goto out;
2942 }
2943 target->cmd_sg_cnt = token;
2944 break;
2945
David Dillowc07d4242011-01-16 13:57:10 -05002946 case SRP_OPT_ALLOW_EXT_SG:
2947 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002948 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05002949 goto out;
2950 }
2951 target->allow_ext_sg = !!token;
2952 break;
2953
2954 case SRP_OPT_SG_TABLESIZE:
2955 if (match_int(args, &token) || token < 1 ||
2956 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002957 pr_warn("bad max sg_tablesize parameter '%s'\n",
2958 p);
David Dillowc07d4242011-01-16 13:57:10 -05002959 goto out;
2960 }
2961 target->sg_tablesize = token;
2962 break;
2963
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002964 case SRP_OPT_COMP_VECTOR:
2965 if (match_int(args, &token) || token < 0) {
2966 pr_warn("bad comp_vector parameter '%s'\n", p);
2967 goto out;
2968 }
2969 target->comp_vector = token;
2970 break;
2971
Vu Pham7bb312e2013-10-26 14:31:27 +02002972 case SRP_OPT_TL_RETRY_COUNT:
2973 if (match_int(args, &token) || token < 2 || token > 7) {
2974 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2975 p);
2976 goto out;
2977 }
2978 target->tl_retry_count = token;
2979 break;
2980
Roland Dreieraef9ec32005-11-02 14:07:13 -08002981 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002982 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2983 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002984 goto out;
2985 }
2986 }
2987
2988 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2989 ret = 0;
2990 else
2991 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2992 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2993 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002994 pr_warn("target creation request is missing parameter '%s'\n",
2995 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002996
Bart Van Assche4d73f952013-10-26 14:40:37 +02002997 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2998 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2999 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3000 target->scsi_host->cmd_per_lun,
3001 target->scsi_host->can_queue);
3002
Roland Dreieraef9ec32005-11-02 14:07:13 -08003003out:
3004 kfree(options);
3005 return ret;
3006}
3007
Tony Jonesee959b02008-02-22 00:13:36 +01003008static ssize_t srp_create_target(struct device *dev,
3009 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08003010 const char *buf, size_t count)
3011{
3012 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01003013 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003014 struct Scsi_Host *target_host;
3015 struct srp_target_port *target;
Bart Van Assche509c07b2014-10-30 14:48:30 +01003016 struct srp_rdma_ch *ch;
Bart Van Assched1b42892014-05-20 15:07:20 +02003017 struct srp_device *srp_dev = host->srp_dev;
3018 struct ib_device *ibdev = srp_dev->dev;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +02003019 int ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003020
3021 target_host = scsi_host_alloc(&srp_template,
3022 sizeof (struct srp_target_port));
3023 if (!target_host)
3024 return -ENOMEM;
3025
David Dillow49248642011-01-14 18:23:24 -05003026 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07003027 target_host->max_channel = 0;
3028 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01003029 target_host->max_lun = SRP_MAX_LUN;
3030 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08003031
Roland Dreieraef9ec32005-11-02 14:07:13 -08003032 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003033
David Dillow49248642011-01-14 18:23:24 -05003034 target->io_class = SRP_REV16A_IB_IO_CLASS;
3035 target->scsi_host = target_host;
3036 target->srp_host = host;
3037 target->lkey = host->srp_dev->mr->lkey;
3038 target->rkey = host->srp_dev->mr->rkey;
3039 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05003040 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3041 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02003042 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02003043 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003044
Bart Van Assche34aa6542014-10-30 14:47:22 +01003045 /*
3046 * Avoid that the SCSI host can be removed by srp_remove_target()
3047 * before this function returns.
3048 */
3049 scsi_host_get(target->scsi_host);
3050
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003051 mutex_lock(&host->add_target_mutex);
3052
Roland Dreieraef9ec32005-11-02 14:07:13 -08003053 ret = srp_parse_options(buf, target);
3054 if (ret)
3055 goto err;
3056
Bart Van Assche4d73f952013-10-26 14:40:37 +02003057 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3058
Bart Van Assche96fc2482013-06-28 14:51:26 +02003059 if (!srp_conn_unique(target->srp_host, target)) {
3060 shost_printk(KERN_INFO, target->scsi_host,
3061 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3062 be64_to_cpu(target->id_ext),
3063 be64_to_cpu(target->ioc_guid),
3064 be64_to_cpu(target->initiator_ext));
3065 ret = -EEXIST;
3066 goto err;
3067 }
3068
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003069 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
Bart Van Assched1b42892014-05-20 15:07:20 +02003070 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003071 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05003072 target->sg_tablesize = target->cmd_sg_cnt;
3073 }
3074
3075 target_host->sg_tablesize = target->sg_tablesize;
3076 target->indirect_size = target->sg_tablesize *
3077 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05003078 target->max_iu_len = sizeof (struct srp_cmd) +
3079 sizeof (struct srp_indirect_buf) +
3080 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3081
Bart Van Asschec1120f82013-10-26 14:35:08 +02003082 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003083 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05003084 spin_lock_init(&target->lock);
Bart Van Assche509c07b2014-10-30 14:48:30 +01003085 ch = &target->ch;
3086 ch->target = target;
3087 ch->comp_vector = target->comp_vector;
3088 spin_lock_init(&ch->lock);
3089 INIT_LIST_HEAD(&ch->free_tx);
3090 ret = srp_alloc_req_data(ch);
Bart Van Asscheb81d00b2013-10-26 14:38:47 +02003091 if (ret)
3092 goto err_free_mem;
David Dillow8f26c9f2011-01-14 19:45:50 -05003093
Bart Van Assche747fe002014-10-30 14:48:05 +01003094 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
Sagi Grimberg2088ca62014-03-14 13:51:58 +01003095 if (ret)
3096 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003097
Bart Van Assche509c07b2014-10-30 14:48:30 +01003098 ret = srp_create_ch_ib(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003099 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05003100 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003101
Bart Van Assche509c07b2014-10-30 14:48:30 +01003102 ret = srp_new_cm_id(ch);
David Dillow9fe4bcf2008-01-08 17:08:52 -05003103 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05003104 goto err_free_ib;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003105
Bart Van Assche509c07b2014-10-30 14:48:30 +01003106 ret = srp_connect_ch(ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003107 if (ret) {
David Dillow7aa54bd2008-01-07 18:23:41 -05003108 shost_printk(KERN_ERR, target->scsi_host,
3109 PFX "Connection failed\n");
Bart Van Assche394c5952014-10-30 14:46:27 +01003110 goto err_free_ib;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003111 }
3112
3113 ret = srp_add_target(host, target);
3114 if (ret)
3115 goto err_disconnect;
3116
Bart Van Assche34aa6542014-10-30 14:47:22 +01003117 if (target->state != SRP_TARGET_REMOVED) {
3118 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3119 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3120 be64_to_cpu(target->id_ext),
3121 be64_to_cpu(target->ioc_guid),
Bart Van Assche747fe002014-10-30 14:48:05 +01003122 be16_to_cpu(target->pkey),
Bart Van Assche34aa6542014-10-30 14:47:22 +01003123 be64_to_cpu(target->service_id),
Bart Van Assche747fe002014-10-30 14:48:05 +01003124 target->sgid.raw, target->orig_dgid.raw);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003125 }
Bart Van Asschee7ffde02014-03-14 13:52:21 +01003126
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003127 ret = count;
3128
3129out:
3130 mutex_unlock(&host->add_target_mutex);
Bart Van Assche34aa6542014-10-30 14:47:22 +01003131
3132 scsi_host_put(target->scsi_host);
3133
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003134 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003135
3136err_disconnect:
3137 srp_disconnect_target(target);
3138
David Dillow8f26c9f2011-01-14 19:45:50 -05003139err_free_ib:
Bart Van Assche509c07b2014-10-30 14:48:30 +01003140 srp_free_ch_ib(target, ch);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003141
David Dillow8f26c9f2011-01-14 19:45:50 -05003142err_free_mem:
Bart Van Assche509c07b2014-10-30 14:48:30 +01003143 srp_free_req_data(target, ch);
David Dillow8f26c9f2011-01-14 19:45:50 -05003144
Roland Dreieraef9ec32005-11-02 14:07:13 -08003145err:
3146 scsi_host_put(target_host);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003147 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003148}
3149
Tony Jonesee959b02008-02-22 00:13:36 +01003150static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003151
Tony Jonesee959b02008-02-22 00:13:36 +01003152static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3153 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003154{
Tony Jonesee959b02008-02-22 00:13:36 +01003155 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003156
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003157 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003158}
3159
Tony Jonesee959b02008-02-22 00:13:36 +01003160static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003161
Tony Jonesee959b02008-02-22 00:13:36 +01003162static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3163 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003164{
Tony Jonesee959b02008-02-22 00:13:36 +01003165 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003166
3167 return sprintf(buf, "%d\n", host->port);
3168}
3169
Tony Jonesee959b02008-02-22 00:13:36 +01003170static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003171
Roland Dreierf5358a12006-06-17 20:37:29 -07003172static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08003173{
3174 struct srp_host *host;
3175
3176 host = kzalloc(sizeof *host, GFP_KERNEL);
3177 if (!host)
3178 return NULL;
3179
3180 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003181 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003182 init_completion(&host->released);
Bart Van Assche2d7091b2014-03-14 13:52:45 +01003183 mutex_init(&host->add_target_mutex);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01003184 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003185 host->port = port;
3186
Tony Jonesee959b02008-02-22 00:13:36 +01003187 host->dev.class = &srp_class;
3188 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08003189 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003190
Tony Jonesee959b02008-02-22 00:13:36 +01003191 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07003192 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01003193 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003194 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003195 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003196 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01003197 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08003198 goto err_class;
3199
3200 return host;
3201
3202err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01003203 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003204
Roland Dreierf5358a12006-06-17 20:37:29 -07003205free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08003206 kfree(host);
3207
3208 return NULL;
3209}
3210
3211static void srp_add_one(struct ib_device *device)
3212{
Roland Dreierf5358a12006-06-17 20:37:29 -07003213 struct srp_device *srp_dev;
3214 struct ib_device_attr *dev_attr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003215 struct srp_host *host;
Bart Van Assche52ede082014-05-20 15:07:45 +02003216 int mr_page_shift, s, e, p;
3217 u64 max_pages_per_mr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003218
Roland Dreierf5358a12006-06-17 20:37:29 -07003219 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3220 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08003221 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003222
Roland Dreierf5358a12006-06-17 20:37:29 -07003223 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003224 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07003225 goto free_attr;
3226 }
3227
3228 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3229 if (!srp_dev)
3230 goto free_attr;
3231
Bart Van Assched1b42892014-05-20 15:07:20 +02003232 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3233 device->map_phys_fmr && device->unmap_fmr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003234 srp_dev->has_fr = (dev_attr->device_cap_flags &
3235 IB_DEVICE_MEM_MGT_EXTENSIONS);
3236 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3237 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3238
3239 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3240 (!srp_dev->has_fmr || prefer_fr));
Bart Van Assched1b42892014-05-20 15:07:20 +02003241
Roland Dreierf5358a12006-06-17 20:37:29 -07003242 /*
3243 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05003244 * minimum of 4096 bytes. We're unlikely to build large sglists
3245 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07003246 */
Bart Van Assche52ede082014-05-20 15:07:45 +02003247 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3248 srp_dev->mr_page_size = 1 << mr_page_shift;
3249 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3250 max_pages_per_mr = dev_attr->max_mr_size;
3251 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3252 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3253 max_pages_per_mr);
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003254 if (srp_dev->use_fast_reg) {
3255 srp_dev->max_pages_per_mr =
3256 min_t(u32, srp_dev->max_pages_per_mr,
3257 dev_attr->max_fast_reg_page_list_len);
3258 }
Bart Van Assche52ede082014-05-20 15:07:45 +02003259 srp_dev->mr_max_size = srp_dev->mr_page_size *
3260 srp_dev->max_pages_per_mr;
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003261 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
Bart Van Assche52ede082014-05-20 15:07:45 +02003262 device->name, mr_page_shift, dev_attr->max_mr_size,
Bart Van Assche5cfb1782014-05-20 15:08:34 +02003263 dev_attr->max_fast_reg_page_list_len,
Bart Van Assche52ede082014-05-20 15:07:45 +02003264 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
Roland Dreierf5358a12006-06-17 20:37:29 -07003265
3266 INIT_LIST_HEAD(&srp_dev->dev_list);
3267
3268 srp_dev->dev = device;
3269 srp_dev->pd = ib_alloc_pd(device);
3270 if (IS_ERR(srp_dev->pd))
3271 goto free_dev;
3272
3273 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3274 IB_ACCESS_LOCAL_WRITE |
3275 IB_ACCESS_REMOTE_READ |
3276 IB_ACCESS_REMOTE_WRITE);
3277 if (IS_ERR(srp_dev->mr))
3278 goto err_pd;
3279
Tom Tucker07ebafb2006-08-03 16:02:42 -05003280 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08003281 s = 0;
3282 e = 0;
3283 } else {
3284 s = 1;
3285 e = device->phys_port_cnt;
3286 }
3287
3288 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07003289 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003290 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07003291 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003292 }
3293
Roland Dreierf5358a12006-06-17 20:37:29 -07003294 ib_set_client_data(device, &srp_client, srp_dev);
3295
3296 goto free_attr;
3297
3298err_pd:
3299 ib_dealloc_pd(srp_dev->pd);
3300
3301free_dev:
3302 kfree(srp_dev);
3303
3304free_attr:
3305 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003306}
3307
3308static void srp_remove_one(struct ib_device *device)
3309{
Roland Dreierf5358a12006-06-17 20:37:29 -07003310 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003311 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003312 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003313
Roland Dreierf5358a12006-06-17 20:37:29 -07003314 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02003315 if (!srp_dev)
3316 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003317
Roland Dreierf5358a12006-06-17 20:37:29 -07003318 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01003319 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003320 /*
3321 * Wait for the sysfs entry to go away, so that no new
3322 * target ports can be created.
3323 */
3324 wait_for_completion(&host->released);
3325
3326 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003327 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003328 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003329 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003330 list_for_each_entry(target, &host->target_list, list)
3331 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07003332 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003333
3334 /*
Bart Van Asschebcc05912014-07-09 15:57:26 +02003335 * Wait for tl_err and target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08003336 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00003337 flush_workqueue(system_long_wq);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003338 flush_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003339
Roland Dreieraef9ec32005-11-02 14:07:13 -08003340 kfree(host);
3341 }
3342
Roland Dreierf5358a12006-06-17 20:37:29 -07003343 ib_dereg_mr(srp_dev->mr);
3344 ib_dealloc_pd(srp_dev->pd);
3345
3346 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003347}
3348
FUJITA Tomonori32368222007-06-27 16:33:12 +09003349static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003350 .has_rport_state = true,
3351 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02003352 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003353 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3354 .dev_loss_tmo = &srp_dev_loss_tmo,
3355 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02003356 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02003357 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09003358};
3359
Roland Dreieraef9ec32005-11-02 14:07:13 -08003360static int __init srp_init_module(void)
3361{
3362 int ret;
3363
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05003364 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00003365
David Dillow49248642011-01-14 18:23:24 -05003366 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003367 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05003368 if (!cmd_sg_entries)
3369 cmd_sg_entries = srp_sg_tablesize;
3370 }
3371
3372 if (!cmd_sg_entries)
3373 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3374
3375 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003376 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05003377 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07003378 }
3379
David Dillowc07d4242011-01-16 13:57:10 -05003380 if (!indirect_sg_entries)
3381 indirect_sg_entries = cmd_sg_entries;
3382 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003383 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3384 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05003385 indirect_sg_entries = cmd_sg_entries;
3386 }
3387
Bart Van Asschebcc05912014-07-09 15:57:26 +02003388 srp_remove_wq = create_workqueue("srp_remove");
Wei Yongjunda05be22014-08-14 08:56:22 +08003389 if (!srp_remove_wq) {
3390 ret = -ENOMEM;
Bart Van Asschebcc05912014-07-09 15:57:26 +02003391 goto out;
3392 }
3393
3394 ret = -ENOMEM;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003395 ib_srp_transport_template =
3396 srp_attach_transport(&ib_srp_transport_functions);
3397 if (!ib_srp_transport_template)
Bart Van Asschebcc05912014-07-09 15:57:26 +02003398 goto destroy_wq;
FUJITA Tomonori32368222007-06-27 16:33:12 +09003399
Roland Dreieraef9ec32005-11-02 14:07:13 -08003400 ret = class_register(&srp_class);
3401 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003402 pr_err("couldn't register class infiniband_srp\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003403 goto release_tr;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003404 }
3405
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003406 ib_sa_register_client(&srp_sa_client);
3407
Roland Dreieraef9ec32005-11-02 14:07:13 -08003408 ret = ib_register_client(&srp_client);
3409 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00003410 pr_err("couldn't register IB client\n");
Bart Van Asschebcc05912014-07-09 15:57:26 +02003411 goto unreg_sa;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003412 }
3413
Bart Van Asschebcc05912014-07-09 15:57:26 +02003414out:
3415 return ret;
3416
3417unreg_sa:
3418 ib_sa_unregister_client(&srp_sa_client);
3419 class_unregister(&srp_class);
3420
3421release_tr:
3422 srp_release_transport(ib_srp_transport_template);
3423
3424destroy_wq:
3425 destroy_workqueue(srp_remove_wq);
3426 goto out;
Roland Dreieraef9ec32005-11-02 14:07:13 -08003427}
3428
3429static void __exit srp_cleanup_module(void)
3430{
3431 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07003432 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003433 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09003434 srp_release_transport(ib_srp_transport_template);
Bart Van Asschebcc05912014-07-09 15:57:26 +02003435 destroy_workqueue(srp_remove_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08003436}
3437
3438module_init(srp_init_module);
3439module_exit(srp_cleanup_module);