blob: a8f8d0b8a777d8c2b5efc199995aa0afa1cd3b03 [file] [log] [blame]
Roland Dreieraef9ec32005-11-02 14:07:13 -08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Roland Dreieraef9ec32005-11-02 14:07:13 -080031 */
32
Bart Van Asschee0bda7d2012-01-14 12:39:44 +000033#define pr_fmt(fmt) PFX fmt
34
Roland Dreieraef9ec32005-11-02 14:07:13 -080035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
Tim Schmielaude259682006-01-08 01:02:05 -080042#include <linux/jiffies.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080043
Arun Sharma600634972011-07-26 16:09:06 -070044#include <linux/atomic.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_dbg.h>
Jack Wang71444b92013-11-07 11:37:37 +010049#include <scsi/scsi_tcq.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080050#include <scsi/srp.h>
FUJITA Tomonori32368222007-06-27 16:33:12 +090051#include <scsi/scsi_transport_srp.h>
Roland Dreieraef9ec32005-11-02 14:07:13 -080052
Roland Dreieraef9ec32005-11-02 14:07:13 -080053#include "ib_srp.h"
54
55#define DRV_NAME "ib_srp"
56#define PFX DRV_NAME ": "
Vu Phame8ca4132013-06-28 14:59:08 +020057#define DRV_VERSION "1.0"
58#define DRV_RELDATE "July 1, 2013"
Roland Dreieraef9ec32005-11-02 14:07:13 -080059
60MODULE_AUTHOR("Roland Dreier");
61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63MODULE_LICENSE("Dual BSD/GPL");
64
David Dillow49248642011-01-14 18:23:24 -050065static unsigned int srp_sg_tablesize;
66static unsigned int cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -050067static unsigned int indirect_sg_entries;
68static bool allow_ext_sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -080069static int topspin_workarounds = 1;
70
David Dillow49248642011-01-14 18:23:24 -050071module_param(srp_sg_tablesize, uint, 0444);
72MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
73
74module_param(cmd_sg_entries, uint, 0444);
75MODULE_PARM_DESC(cmd_sg_entries,
76 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
77
David Dillowc07d4242011-01-16 13:57:10 -050078module_param(indirect_sg_entries, uint, 0444);
79MODULE_PARM_DESC(indirect_sg_entries,
80 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
81
82module_param(allow_ext_sg, bool, 0444);
83MODULE_PARM_DESC(allow_ext_sg,
84 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
85
Roland Dreieraef9ec32005-11-02 14:07:13 -080086module_param(topspin_workarounds, int, 0444);
87MODULE_PARM_DESC(topspin_workarounds,
88 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
89
Bart Van Asscheed9b2262013-10-26 14:34:27 +020090static struct kernel_param_ops srp_tmo_ops;
91
Bart Van Asschea95cadb2013-10-26 14:37:17 +020092static int srp_reconnect_delay = 10;
93module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
94 S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
96
Bart Van Asscheed9b2262013-10-26 14:34:27 +020097static int srp_fast_io_fail_tmo = 15;
98module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
99 S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(fast_io_fail_tmo,
101 "Number of seconds between the observation of a transport"
102 " layer error and failing all I/O. \"off\" means that this"
103 " functionality is disabled.");
104
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200105static int srp_dev_loss_tmo = 600;
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200106module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
107 S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(dev_loss_tmo,
109 "Maximum number of seconds that the SRP transport should"
110 " insulate transport layer errors. After this time has been"
111 " exceeded the SCSI host is removed. Should be"
112 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
113 " if fast_io_fail_tmo has not been set. \"off\" means that"
114 " this functionality is disabled.");
115
Roland Dreieraef9ec32005-11-02 14:07:13 -0800116static void srp_add_one(struct ib_device *device);
117static void srp_remove_one(struct ib_device *device);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000118static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
119static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800120static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
121
FUJITA Tomonori32368222007-06-27 16:33:12 +0900122static struct scsi_transport_template *ib_srp_transport_template;
123
Roland Dreieraef9ec32005-11-02 14:07:13 -0800124static struct ib_client srp_client = {
125 .name = "srp",
126 .add = srp_add_one,
127 .remove = srp_remove_one
128};
129
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700130static struct ib_sa_client srp_sa_client;
131
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200132static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
133{
134 int tmo = *(int *)kp->arg;
135
136 if (tmo >= 0)
137 return sprintf(buffer, "%d", tmo);
138 else
139 return sprintf(buffer, "off");
140}
141
142static int srp_tmo_set(const char *val, const struct kernel_param *kp)
143{
144 int tmo, res;
145
146 if (strncmp(val, "off", 3) != 0) {
147 res = kstrtoint(val, 0, &tmo);
148 if (res)
149 goto out;
150 } else {
151 tmo = -1;
152 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200153 if (kp->arg == &srp_reconnect_delay)
154 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
155 srp_dev_loss_tmo);
156 else if (kp->arg == &srp_fast_io_fail_tmo)
157 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200158 else
Bart Van Asschea95cadb2013-10-26 14:37:17 +0200159 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
160 tmo);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200161 if (res)
162 goto out;
163 *(int *)kp->arg = tmo;
164
165out:
166 return res;
167}
168
169static struct kernel_param_ops srp_tmo_ops = {
170 .get = srp_tmo_get,
171 .set = srp_tmo_set,
172};
173
Roland Dreieraef9ec32005-11-02 14:07:13 -0800174static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
175{
176 return (struct srp_target_port *) host->hostdata;
177}
178
179static const char *srp_target_info(struct Scsi_Host *host)
180{
181 return host_to_target(host)->target_name;
182}
183
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700184static int srp_target_is_topspin(struct srp_target_port *target)
185{
186 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
Raghava Kondapalli3d1ff48d2007-08-03 10:45:18 -0700187 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700188
189 return topspin_workarounds &&
Raghava Kondapalli3d1ff48d2007-08-03 10:45:18 -0700190 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
191 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700192}
193
Roland Dreieraef9ec32005-11-02 14:07:13 -0800194static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
195 gfp_t gfp_mask,
196 enum dma_data_direction direction)
197{
198 struct srp_iu *iu;
199
200 iu = kmalloc(sizeof *iu, gfp_mask);
201 if (!iu)
202 goto out;
203
204 iu->buf = kzalloc(size, gfp_mask);
205 if (!iu->buf)
206 goto out_free_iu;
207
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100208 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
209 direction);
210 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
Roland Dreieraef9ec32005-11-02 14:07:13 -0800211 goto out_free_buf;
212
213 iu->size = size;
214 iu->direction = direction;
215
216 return iu;
217
218out_free_buf:
219 kfree(iu->buf);
220out_free_iu:
221 kfree(iu);
222out:
223 return NULL;
224}
225
226static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
227{
228 if (!iu)
229 return;
230
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100231 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
232 iu->direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800233 kfree(iu->buf);
234 kfree(iu);
235}
236
237static void srp_qp_event(struct ib_event *event, void *context)
238{
Bart Van Asschee0bda7d2012-01-14 12:39:44 +0000239 pr_debug("QP event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800240}
241
242static int srp_init_qp(struct srp_target_port *target,
243 struct ib_qp *qp)
244{
245 struct ib_qp_attr *attr;
246 int ret;
247
248 attr = kmalloc(sizeof *attr, GFP_KERNEL);
249 if (!attr)
250 return -ENOMEM;
251
Roland Dreier969a60f2008-07-14 23:48:43 -0700252 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
253 target->srp_host->port,
254 be16_to_cpu(target->path.pkey),
255 &attr->pkey_index);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800256 if (ret)
257 goto out;
258
259 attr->qp_state = IB_QPS_INIT;
260 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
261 IB_ACCESS_REMOTE_WRITE);
262 attr->port_num = target->srp_host->port;
263
264 ret = ib_modify_qp(qp, attr,
265 IB_QP_STATE |
266 IB_QP_PKEY_INDEX |
267 IB_QP_ACCESS_FLAGS |
268 IB_QP_PORT);
269
270out:
271 kfree(attr);
272 return ret;
273}
274
David Dillow9fe4bcf2008-01-08 17:08:52 -0500275static int srp_new_cm_id(struct srp_target_port *target)
276{
277 struct ib_cm_id *new_cm_id;
278
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100279 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
David Dillow9fe4bcf2008-01-08 17:08:52 -0500280 srp_cm_handler, target);
281 if (IS_ERR(new_cm_id))
282 return PTR_ERR(new_cm_id);
283
284 if (target->cm_id)
285 ib_destroy_cm_id(target->cm_id);
286 target->cm_id = new_cm_id;
287
288 return 0;
289}
290
Roland Dreieraef9ec32005-11-02 14:07:13 -0800291static int srp_create_target_ib(struct srp_target_port *target)
292{
293 struct ib_qp_init_attr *init_attr;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100294 struct ib_cq *recv_cq, *send_cq;
295 struct ib_qp *qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800296 int ret;
297
298 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
299 if (!init_attr)
300 return -ENOMEM;
301
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100302 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
Bart Van Assche4d73f952013-10-26 14:40:37 +0200303 srp_recv_completion, NULL, target,
304 target->queue_size, target->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100305 if (IS_ERR(recv_cq)) {
306 ret = PTR_ERR(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800307 goto err;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800308 }
309
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100310 send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
Bart Van Assche4d73f952013-10-26 14:40:37 +0200311 srp_send_completion, NULL, target,
312 target->queue_size, target->comp_vector);
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100313 if (IS_ERR(send_cq)) {
314 ret = PTR_ERR(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800315 goto err_recv_cq;
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000316 }
317
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100318 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800319
320 init_attr->event_handler = srp_qp_event;
Bart Van Assche4d73f952013-10-26 14:40:37 +0200321 init_attr->cap.max_send_wr = target->queue_size;
322 init_attr->cap.max_recv_wr = target->queue_size;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800323 init_attr->cap.max_recv_sge = 1;
324 init_attr->cap.max_send_sge = 1;
325 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
326 init_attr->qp_type = IB_QPT_RC;
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100327 init_attr->send_cq = send_cq;
328 init_attr->recv_cq = recv_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800329
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100330 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
331 if (IS_ERR(qp)) {
332 ret = PTR_ERR(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800333 goto err_send_cq;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800334 }
335
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100336 ret = srp_init_qp(target, qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800337 if (ret)
338 goto err_qp;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800339
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100340 if (target->qp)
341 ib_destroy_qp(target->qp);
342 if (target->recv_cq)
343 ib_destroy_cq(target->recv_cq);
344 if (target->send_cq)
345 ib_destroy_cq(target->send_cq);
346
347 target->qp = qp;
348 target->recv_cq = recv_cq;
349 target->send_cq = send_cq;
350
Roland Dreierda9d2f02010-02-24 15:07:59 -0800351 kfree(init_attr);
352 return 0;
353
354err_qp:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100355 ib_destroy_qp(qp);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800356
357err_send_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100358 ib_destroy_cq(send_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800359
360err_recv_cq:
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100361 ib_destroy_cq(recv_cq);
Roland Dreierda9d2f02010-02-24 15:07:59 -0800362
363err:
Roland Dreieraef9ec32005-11-02 14:07:13 -0800364 kfree(init_attr);
365 return ret;
366}
367
Bart Van Assche4d73f952013-10-26 14:40:37 +0200368/*
369 * Note: this function may be called without srp_alloc_iu_bufs() having been
370 * invoked. Hence the target->[rt]x_ring checks.
371 */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800372static void srp_free_target_ib(struct srp_target_port *target)
373{
374 int i;
375
376 ib_destroy_qp(target->qp);
Bart Van Assche9c03dc92010-02-02 19:23:54 +0000377 ib_destroy_cq(target->send_cq);
378 ib_destroy_cq(target->recv_cq);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800379
Ishai Rabinovitz73aa89e2012-11-26 11:44:53 +0100380 target->qp = NULL;
381 target->send_cq = target->recv_cq = NULL;
382
Bart Van Assche4d73f952013-10-26 14:40:37 +0200383 if (target->rx_ring) {
384 for (i = 0; i < target->queue_size; ++i)
385 srp_free_iu(target->srp_host, target->rx_ring[i]);
386 kfree(target->rx_ring);
387 target->rx_ring = NULL;
388 }
389 if (target->tx_ring) {
390 for (i = 0; i < target->queue_size; ++i)
391 srp_free_iu(target->srp_host, target->tx_ring[i]);
392 kfree(target->tx_ring);
393 target->tx_ring = NULL;
394 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800395}
396
397static void srp_path_rec_completion(int status,
398 struct ib_sa_path_rec *pathrec,
399 void *target_ptr)
400{
401 struct srp_target_port *target = target_ptr;
402
403 target->status = status;
404 if (status)
David Dillow7aa54bd2008-01-07 18:23:41 -0500405 shost_printk(KERN_ERR, target->scsi_host,
406 PFX "Got failed path rec status %d\n", status);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800407 else
408 target->path = *pathrec;
409 complete(&target->done);
410}
411
412static int srp_lookup_path(struct srp_target_port *target)
413{
414 target->path.numb_path = 1;
415
416 init_completion(&target->done);
417
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -0700418 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100419 target->srp_host->srp_dev->dev,
Roland Dreieraef9ec32005-11-02 14:07:13 -0800420 target->srp_host->port,
421 &target->path,
Sean Hefty247e0202007-08-08 15:51:18 -0700422 IB_SA_PATH_REC_SERVICE_ID |
Roland Dreieraef9ec32005-11-02 14:07:13 -0800423 IB_SA_PATH_REC_DGID |
424 IB_SA_PATH_REC_SGID |
425 IB_SA_PATH_REC_NUMB_PATH |
426 IB_SA_PATH_REC_PKEY,
427 SRP_PATH_REC_TIMEOUT_MS,
428 GFP_KERNEL,
429 srp_path_rec_completion,
430 target, &target->path_query);
431 if (target->path_query_id < 0)
432 return target->path_query_id;
433
434 wait_for_completion(&target->done);
435
436 if (target->status < 0)
David Dillow7aa54bd2008-01-07 18:23:41 -0500437 shost_printk(KERN_WARNING, target->scsi_host,
438 PFX "Path record query failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800439
440 return target->status;
441}
442
443static int srp_send_req(struct srp_target_port *target)
444{
445 struct {
446 struct ib_cm_req_param param;
447 struct srp_login_req priv;
448 } *req = NULL;
449 int status;
450
451 req = kzalloc(sizeof *req, GFP_KERNEL);
452 if (!req)
453 return -ENOMEM;
454
455 req->param.primary_path = &target->path;
456 req->param.alternate_path = NULL;
457 req->param.service_id = target->service_id;
458 req->param.qp_num = target->qp->qp_num;
459 req->param.qp_type = target->qp->qp_type;
460 req->param.private_data = &req->priv;
461 req->param.private_data_len = sizeof req->priv;
462 req->param.flow_control = 1;
463
464 get_random_bytes(&req->param.starting_psn, 4);
465 req->param.starting_psn &= 0xffffff;
466
467 /*
468 * Pick some arbitrary defaults here; we could make these
469 * module parameters if anyone cared about setting them.
470 */
471 req->param.responder_resources = 4;
472 req->param.remote_cm_response_timeout = 20;
473 req->param.local_cm_response_timeout = 20;
Vu Pham7bb312e2013-10-26 14:31:27 +0200474 req->param.retry_count = target->tl_retry_count;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800475 req->param.rnr_retry_count = 7;
476 req->param.max_cm_retries = 15;
477
478 req->priv.opcode = SRP_LOGIN_REQ;
479 req->priv.tag = 0;
David Dillow49248642011-01-14 18:23:24 -0500480 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800481 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
482 SRP_BUF_FORMAT_INDIRECT);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700483 /*
Roland Dreier3cd96562006-09-22 15:22:46 -0700484 * In the published SRP specification (draft rev. 16a), the
Ramachandra K0c0450db2006-06-17 20:37:38 -0700485 * port identifier format is 8 bytes of ID extension followed
486 * by 8 bytes of GUID. Older drafts put the two halves in the
487 * opposite order, so that the GUID comes first.
488 *
489 * Targets conforming to these obsolete drafts can be
490 * recognized by the I/O Class they report.
491 */
492 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
493 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200494 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700495 memcpy(req->priv.initiator_port_id + 8,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200496 &target->initiator_ext, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700497 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
498 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
499 } else {
500 memcpy(req->priv.initiator_port_id,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200501 &target->initiator_ext, 8);
502 memcpy(req->priv.initiator_port_id + 8,
503 &target->path.sgid.global.interface_id, 8);
Ramachandra K0c0450db2006-06-17 20:37:38 -0700504 memcpy(req->priv.target_port_id, &target->id_ext, 8);
505 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
506 }
507
Roland Dreieraef9ec32005-11-02 14:07:13 -0800508 /*
509 * Topspin/Cisco SRP targets will reject our login unless we
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200510 * zero out the first 8 bytes of our initiator port ID and set
511 * the second 8 bytes to the local node GUID.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800512 */
Roland Dreier5d7cbfd2007-08-03 10:45:18 -0700513 if (srp_target_is_topspin(target)) {
David Dillow7aa54bd2008-01-07 18:23:41 -0500514 shost_printk(KERN_DEBUG, target->scsi_host,
515 PFX "Topspin/Cisco initiator port ID workaround "
516 "activated for target GUID %016llx\n",
517 (unsigned long long) be64_to_cpu(target->ioc_guid));
Roland Dreieraef9ec32005-11-02 14:07:13 -0800518 memset(req->priv.initiator_port_id, 0, 8);
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +0200519 memcpy(req->priv.initiator_port_id + 8,
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100520 &target->srp_host->srp_dev->dev->node_guid, 8);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800521 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800522
523 status = ib_send_cm_req(target->cm_id, &req->param);
524
525 kfree(req);
526
527 return status;
528}
529
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000530static bool srp_queue_remove_work(struct srp_target_port *target)
531{
532 bool changed = false;
533
534 spin_lock_irq(&target->lock);
535 if (target->state != SRP_TARGET_REMOVED) {
536 target->state = SRP_TARGET_REMOVED;
537 changed = true;
538 }
539 spin_unlock_irq(&target->lock);
540
541 if (changed)
542 queue_work(system_long_wq, &target->remove_work);
543
544 return changed;
545}
546
Bart Van Assche294c8752011-12-25 12:18:12 +0000547static bool srp_change_conn_state(struct srp_target_port *target,
548 bool connected)
549{
550 bool changed = false;
551
552 spin_lock_irq(&target->lock);
553 if (target->connected != connected) {
554 target->connected = connected;
555 changed = true;
556 }
557 spin_unlock_irq(&target->lock);
558
559 return changed;
560}
561
Roland Dreieraef9ec32005-11-02 14:07:13 -0800562static void srp_disconnect_target(struct srp_target_port *target)
563{
Bart Van Assche294c8752011-12-25 12:18:12 +0000564 if (srp_change_conn_state(target, false)) {
565 /* XXX should send SRP_I_LOGOUT request */
Roland Dreieraef9ec32005-11-02 14:07:13 -0800566
Bart Van Assche294c8752011-12-25 12:18:12 +0000567 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
568 shost_printk(KERN_DEBUG, target->scsi_host,
569 PFX "Sending CM DREQ failed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +0000570 }
Roland Dreiere6581052006-05-17 09:13:21 -0700571 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800572}
573
David Dillow8f26c9f2011-01-14 19:45:50 -0500574static void srp_free_req_data(struct srp_target_port *target)
575{
David Dillowc07d4242011-01-16 13:57:10 -0500576 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500577 struct srp_request *req;
578 int i;
579
Bart Van Assche4d73f952013-10-26 14:40:37 +0200580 if (!target->req_ring)
581 return;
582
583 for (i = 0; i < target->req_ring_size; ++i) {
584 req = &target->req_ring[i];
David Dillow8f26c9f2011-01-14 19:45:50 -0500585 kfree(req->fmr_list);
586 kfree(req->map_page);
David Dillowc07d4242011-01-16 13:57:10 -0500587 if (req->indirect_dma_addr) {
588 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
589 target->indirect_size,
590 DMA_TO_DEVICE);
591 }
592 kfree(req->indirect_desc);
David Dillow8f26c9f2011-01-14 19:45:50 -0500593 }
Bart Van Assche4d73f952013-10-26 14:40:37 +0200594
595 kfree(target->req_ring);
596 target->req_ring = NULL;
David Dillow8f26c9f2011-01-14 19:45:50 -0500597}
598
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200599static int srp_alloc_req_data(struct srp_target_port *target)
600{
601 struct srp_device *srp_dev = target->srp_host->srp_dev;
602 struct ib_device *ibdev = srp_dev->dev;
603 struct srp_request *req;
604 dma_addr_t dma_addr;
605 int i, ret = -ENOMEM;
606
607 INIT_LIST_HEAD(&target->free_reqs);
608
Bart Van Assche4d73f952013-10-26 14:40:37 +0200609 target->req_ring = kzalloc(target->req_ring_size *
610 sizeof(*target->req_ring), GFP_KERNEL);
611 if (!target->req_ring)
612 goto out;
613
614 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Asscheb81d00b2013-10-26 14:38:47 +0200615 req = &target->req_ring[i];
616 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
617 GFP_KERNEL);
618 req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
619 GFP_KERNEL);
620 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
621 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
622 goto out;
623
624 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
625 target->indirect_size,
626 DMA_TO_DEVICE);
627 if (ib_dma_mapping_error(ibdev, dma_addr))
628 goto out;
629
630 req->indirect_dma_addr = dma_addr;
631 req->index = i;
632 list_add_tail(&req->list, &target->free_reqs);
633 }
634 ret = 0;
635
636out:
637 return ret;
638}
639
Bart Van Assche683b1592012-01-14 12:40:44 +0000640/**
641 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
642 * @shost: SCSI host whose attributes to remove from sysfs.
643 *
644 * Note: Any attributes defined in the host template and that did not exist
645 * before invocation of this function will be ignored.
646 */
647static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
648{
649 struct device_attribute **attr;
650
651 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
652 device_remove_file(&shost->shost_dev, *attr);
653}
654
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000655static void srp_remove_target(struct srp_target_port *target)
656{
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000657 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
658
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000659 srp_del_scsi_host_attr(target->scsi_host);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200660 srp_rport_get(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000661 srp_remove_host(target->scsi_host);
662 scsi_remove_host(target->scsi_host);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000663 srp_disconnect_target(target);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000664 ib_destroy_cm_id(target->cm_id);
665 srp_free_target_ib(target);
Bart Van Asschec1120f82013-10-26 14:35:08 +0200666 cancel_work_sync(&target->tl_err_work);
Bart Van Assche9dd69a62013-10-26 14:32:30 +0200667 srp_rport_put(target->rport);
Bart Van Asscheee12d6a2011-12-25 19:41:07 +0000668 srp_free_req_data(target);
669 scsi_host_put(target->scsi_host);
670}
671
David Howellsc4028952006-11-22 14:57:56 +0000672static void srp_remove_work(struct work_struct *work)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800673{
David Howellsc4028952006-11-22 14:57:56 +0000674 struct srp_target_port *target =
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000675 container_of(work, struct srp_target_port, remove_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800676
Bart Van Asscheef6c49d2011-12-26 16:49:18 +0000677 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800678
Bart Van Assche96fc2482013-06-28 14:51:26 +0200679 srp_remove_target(target);
680
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700681 spin_lock(&target->srp_host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800682 list_del(&target->list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -0700683 spin_unlock(&target->srp_host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800684}
685
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +0200686static void srp_rport_delete(struct srp_rport *rport)
687{
688 struct srp_target_port *target = rport->lld_data;
689
690 srp_queue_remove_work(target);
691}
692
Roland Dreieraef9ec32005-11-02 14:07:13 -0800693static int srp_connect_target(struct srp_target_port *target)
694{
David Dillow9fe4bcf2008-01-08 17:08:52 -0500695 int retries = 3;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800696 int ret;
697
Bart Van Assche294c8752011-12-25 12:18:12 +0000698 WARN_ON_ONCE(target->connected);
699
Bart Van Assche948d1e82011-09-03 09:25:42 +0200700 target->qp_in_error = false;
701
Roland Dreieraef9ec32005-11-02 14:07:13 -0800702 ret = srp_lookup_path(target);
703 if (ret)
704 return ret;
705
706 while (1) {
707 init_completion(&target->done);
708 ret = srp_send_req(target);
709 if (ret)
710 return ret;
711 wait_for_completion(&target->done);
712
713 /*
714 * The CM event handling code will set status to
715 * SRP_PORT_REDIRECT if we get a port redirect REJ
716 * back, or SRP_DLID_REDIRECT if we get a lid/qp
717 * redirect REJ back.
718 */
719 switch (target->status) {
720 case 0:
Bart Van Assche294c8752011-12-25 12:18:12 +0000721 srp_change_conn_state(target, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800722 return 0;
723
724 case SRP_PORT_REDIRECT:
725 ret = srp_lookup_path(target);
726 if (ret)
727 return ret;
728 break;
729
730 case SRP_DLID_REDIRECT:
731 break;
732
David Dillow9fe4bcf2008-01-08 17:08:52 -0500733 case SRP_STALE_CONN:
734 /* Our current CM id was stale, and is now in timewait.
735 * Try to reconnect with a new one.
736 */
737 if (!retries-- || srp_new_cm_id(target)) {
738 shost_printk(KERN_ERR, target->scsi_host, PFX
739 "giving up on stale connection\n");
740 target->status = -ECONNRESET;
741 return target->status;
742 }
743
744 shost_printk(KERN_ERR, target->scsi_host, PFX
745 "retrying stale connection\n");
746 break;
747
Roland Dreieraef9ec32005-11-02 14:07:13 -0800748 default:
749 return target->status;
750 }
751 }
752}
753
Roland Dreierd945e1d2006-05-09 10:50:28 -0700754static void srp_unmap_data(struct scsi_cmnd *scmnd,
755 struct srp_target_port *target,
756 struct srp_request *req)
757{
David Dillow8f26c9f2011-01-14 19:45:50 -0500758 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
759 struct ib_pool_fmr **pfmr;
760
FUJITA Tomonoribb350d12007-05-26 02:28:25 +0900761 if (!scsi_sglist(scmnd) ||
Roland Dreierd945e1d2006-05-09 10:50:28 -0700762 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
763 scmnd->sc_data_direction != DMA_FROM_DEVICE))
764 return;
765
David Dillow8f26c9f2011-01-14 19:45:50 -0500766 pfmr = req->fmr_list;
767 while (req->nfmr--)
768 ib_fmr_pool_unmap(*pfmr++);
Roland Dreierf5358a12006-06-17 20:37:29 -0700769
David Dillow8f26c9f2011-01-14 19:45:50 -0500770 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
771 scmnd->sc_data_direction);
Roland Dreierd945e1d2006-05-09 10:50:28 -0700772}
773
Bart Van Assche22032992012-08-14 13:18:53 +0000774/**
775 * srp_claim_req - Take ownership of the scmnd associated with a request.
776 * @target: SRP target port.
777 * @req: SRP request.
778 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
779 * ownership of @req->scmnd if it equals @scmnd.
780 *
781 * Return value:
782 * Either NULL or a pointer to the SCSI command the caller became owner of.
783 */
784static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
785 struct srp_request *req,
786 struct scsi_cmnd *scmnd)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700787{
Bart Van Assche94a91742010-11-26 14:50:09 -0500788 unsigned long flags;
789
Bart Van Assche22032992012-08-14 13:18:53 +0000790 spin_lock_irqsave(&target->lock, flags);
791 if (!scmnd) {
792 scmnd = req->scmnd;
793 req->scmnd = NULL;
794 } else if (req->scmnd == scmnd) {
795 req->scmnd = NULL;
796 } else {
797 scmnd = NULL;
798 }
799 spin_unlock_irqrestore(&target->lock, flags);
800
801 return scmnd;
802}
803
804/**
805 * srp_free_req() - Unmap data and add request to the free request list.
806 */
807static void srp_free_req(struct srp_target_port *target,
808 struct srp_request *req, struct scsi_cmnd *scmnd,
809 s32 req_lim_delta)
810{
811 unsigned long flags;
812
813 srp_unmap_data(scmnd, target, req);
814
Bart Van Asschee9684672010-11-26 15:08:38 -0500815 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -0500816 target->req_lim += req_lim_delta;
Bart Van Assche536ae142010-11-26 13:58:27 -0500817 list_add_tail(&req->list, &target->free_reqs);
Bart Van Asschee9684672010-11-26 15:08:38 -0500818 spin_unlock_irqrestore(&target->lock, flags);
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700819}
820
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200821static void srp_finish_req(struct srp_target_port *target,
822 struct srp_request *req, int result)
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700823{
Bart Van Assche22032992012-08-14 13:18:53 +0000824 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
825
826 if (scmnd) {
Bart Van Assche9b796d02012-08-24 10:27:54 +0000827 srp_free_req(target, req, scmnd, 0);
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200828 scmnd->result = result;
Bart Van Assche22032992012-08-14 13:18:53 +0000829 scmnd->scsi_done(scmnd);
Bart Van Assche22032992012-08-14 13:18:53 +0000830 }
Ishai Rabinovitz526b4ca2006-06-17 20:37:38 -0700831}
832
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200833static void srp_terminate_io(struct srp_rport *rport)
Roland Dreieraef9ec32005-11-02 14:07:13 -0800834{
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200835 struct srp_target_port *target = rport->lld_data;
836 int i;
Roland Dreieraef9ec32005-11-02 14:07:13 -0800837
Bart Van Assche4d73f952013-10-26 14:40:37 +0200838 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200839 struct srp_request *req = &target->req_ring[i];
840 srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
841 }
842}
843
844/*
845 * It is up to the caller to ensure that srp_rport_reconnect() calls are
846 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
847 * srp_reset_device() or srp_reset_host() calls will occur while this function
848 * is in progress. One way to realize that is not to call this function
849 * directly but to call srp_reconnect_rport() instead since that last function
850 * serializes calls of this function via rport->mutex and also blocks
851 * srp_queuecommand() calls before invoking this function.
852 */
853static int srp_rport_reconnect(struct srp_rport *rport)
854{
855 struct srp_target_port *target = rport->lld_data;
856 int i, ret;
Bart Van Assche09be70a2012-03-17 17:18:54 +0000857
Roland Dreieraef9ec32005-11-02 14:07:13 -0800858 srp_disconnect_target(target);
859 /*
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000860 * Now get a new local CM ID so that we avoid confusing the target in
861 * case things are really fouled up. Doing so also ensures that all CM
862 * callbacks will have finished before a new QP is allocated.
Roland Dreieraef9ec32005-11-02 14:07:13 -0800863 */
David Dillow9fe4bcf2008-01-08 17:08:52 -0500864 ret = srp_new_cm_id(target);
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000865 /*
866 * Whether or not creating a new CM ID succeeded, create a new
867 * QP. This guarantees that all completion callback function
868 * invocations have finished before request resetting starts.
869 */
870 if (ret == 0)
871 ret = srp_create_target_ib(target);
872 else
873 srp_create_target_ib(target);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800874
Bart Van Assche4d73f952013-10-26 14:40:37 +0200875 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche536ae142010-11-26 13:58:27 -0500876 struct srp_request *req = &target->req_ring[i];
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200877 srp_finish_req(target, req, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -0500878 }
Roland Dreieraef9ec32005-11-02 14:07:13 -0800879
Bart Van Assche536ae142010-11-26 13:58:27 -0500880 INIT_LIST_HEAD(&target->free_tx);
Bart Van Assche4d73f952013-10-26 14:40:37 +0200881 for (i = 0; i < target->queue_size; ++i)
Bart Van Assche536ae142010-11-26 13:58:27 -0500882 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -0800883
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +0000884 if (ret == 0)
885 ret = srp_connect_target(target);
Bart Van Assche09be70a2012-03-17 17:18:54 +0000886
Bart Van Asscheed9b2262013-10-26 14:34:27 +0200887 if (ret == 0)
888 shost_printk(KERN_INFO, target->scsi_host,
889 PFX "reconnect succeeded\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -0800890
891 return ret;
892}
893
David Dillow8f26c9f2011-01-14 19:45:50 -0500894static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
895 unsigned int dma_len, u32 rkey)
Roland Dreierf5358a12006-06-17 20:37:29 -0700896{
David Dillow8f26c9f2011-01-14 19:45:50 -0500897 struct srp_direct_buf *desc = state->desc;
898
899 desc->va = cpu_to_be64(dma_addr);
900 desc->key = cpu_to_be32(rkey);
901 desc->len = cpu_to_be32(dma_len);
902
903 state->total_len += dma_len;
904 state->desc++;
905 state->ndesc++;
906}
907
908static int srp_map_finish_fmr(struct srp_map_state *state,
909 struct srp_target_port *target)
910{
911 struct srp_device *dev = target->srp_host->srp_dev;
912 struct ib_pool_fmr *fmr;
Roland Dreierf5358a12006-06-17 20:37:29 -0700913 u64 io_addr = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -0500914
915 if (!state->npages)
916 return 0;
917
918 if (state->npages == 1) {
919 srp_map_desc(state, state->base_dma_addr, state->fmr_len,
920 target->rkey);
921 state->npages = state->fmr_len = 0;
922 return 0;
923 }
924
925 fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
926 state->npages, io_addr);
927 if (IS_ERR(fmr))
928 return PTR_ERR(fmr);
929
930 *state->next_fmr++ = fmr;
931 state->nfmr++;
932
933 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
934 state->npages = state->fmr_len = 0;
935 return 0;
936}
937
938static void srp_map_update_start(struct srp_map_state *state,
939 struct scatterlist *sg, int sg_index,
940 dma_addr_t dma_addr)
941{
942 state->unmapped_sg = sg;
943 state->unmapped_index = sg_index;
944 state->unmapped_addr = dma_addr;
945}
946
947static int srp_map_sg_entry(struct srp_map_state *state,
948 struct srp_target_port *target,
949 struct scatterlist *sg, int sg_index,
950 int use_fmr)
951{
Greg Kroah-Hartman05321932008-03-06 00:13:36 +0100952 struct srp_device *dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800953 struct ib_device *ibdev = dev->dev;
David Dillow8f26c9f2011-01-14 19:45:50 -0500954 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
955 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
956 unsigned int len;
957 int ret;
Roland Dreierf5358a12006-06-17 20:37:29 -0700958
David Dillow8f26c9f2011-01-14 19:45:50 -0500959 if (!dma_len)
960 return 0;
Roland Dreierf5358a12006-06-17 20:37:29 -0700961
David Dillow8f26c9f2011-01-14 19:45:50 -0500962 if (use_fmr == SRP_MAP_NO_FMR) {
963 /* Once we're in direct map mode for a request, we don't
964 * go back to FMR mode, so no need to update anything
965 * other than the descriptor.
966 */
967 srp_map_desc(state, dma_addr, dma_len, target->rkey);
968 return 0;
969 }
Ishai Rabinovitz559ce8f2006-08-03 10:35:43 -0700970
David Dillow8f26c9f2011-01-14 19:45:50 -0500971 /* If we start at an offset into the FMR page, don't merge into
972 * the current FMR. Finish it out, and use the kernel's MR for this
973 * sg entry. This is to avoid potential bugs on some SRP targets
974 * that were never quite defined, but went away when the initiator
975 * avoided using FMR on such page fragments.
976 */
977 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
978 ret = srp_map_finish_fmr(state, target);
979 if (ret)
980 return ret;
Ralph Campbell85507bc2006-12-12 14:30:55 -0800981
David Dillow8f26c9f2011-01-14 19:45:50 -0500982 srp_map_desc(state, dma_addr, dma_len, target->rkey);
983 srp_map_update_start(state, NULL, 0, 0);
984 return 0;
985 }
986
987 /* If this is the first sg to go into the FMR, save our position.
988 * We need to know the first unmapped entry, its index, and the
989 * first unmapped address within that entry to be able to restart
990 * mapping after an error.
991 */
992 if (!state->unmapped_sg)
993 srp_map_update_start(state, sg, sg_index, dma_addr);
994
995 while (dma_len) {
996 if (state->npages == SRP_FMR_SIZE) {
997 ret = srp_map_finish_fmr(state, target);
998 if (ret)
999 return ret;
1000
1001 srp_map_update_start(state, sg, sg_index, dma_addr);
Roland Dreierf5358a12006-06-17 20:37:29 -07001002 }
1003
David Dillow8f26c9f2011-01-14 19:45:50 -05001004 len = min_t(unsigned int, dma_len, dev->fmr_page_size);
1005
1006 if (!state->npages)
1007 state->base_dma_addr = dma_addr;
1008 state->pages[state->npages++] = dma_addr;
1009 state->fmr_len += len;
1010 dma_addr += len;
1011 dma_len -= len;
Roland Dreierf5358a12006-06-17 20:37:29 -07001012 }
1013
David Dillow8f26c9f2011-01-14 19:45:50 -05001014 /* If the last entry of the FMR wasn't a full page, then we need to
1015 * close it out and start a new one -- we can only merge at page
1016 * boundries.
1017 */
Roland Dreierf5358a12006-06-17 20:37:29 -07001018 ret = 0;
David Dillow8f26c9f2011-01-14 19:45:50 -05001019 if (len != dev->fmr_page_size) {
1020 ret = srp_map_finish_fmr(state, target);
1021 if (!ret)
1022 srp_map_update_start(state, NULL, 0, 0);
1023 }
Roland Dreierf5358a12006-06-17 20:37:29 -07001024 return ret;
1025}
1026
Roland Dreieraef9ec32005-11-02 14:07:13 -08001027static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1028 struct srp_request *req)
1029{
David Dillow8f26c9f2011-01-14 19:45:50 -05001030 struct scatterlist *scat, *sg;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001031 struct srp_cmd *cmd = req->cmd->buf;
David Dillow8f26c9f2011-01-14 19:45:50 -05001032 int i, len, nents, count, use_fmr;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001033 struct srp_device *dev;
1034 struct ib_device *ibdev;
David Dillow8f26c9f2011-01-14 19:45:50 -05001035 struct srp_map_state state;
1036 struct srp_indirect_buf *indirect_hdr;
David Dillow8f26c9f2011-01-14 19:45:50 -05001037 u32 table_len;
1038 u8 fmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001039
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001040 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001041 return sizeof (struct srp_cmd);
1042
1043 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1044 scmnd->sc_data_direction != DMA_TO_DEVICE) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001045 shost_printk(KERN_WARNING, target->scsi_host,
1046 PFX "Unhandled data direction %d\n",
1047 scmnd->sc_data_direction);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001048 return -EINVAL;
1049 }
1050
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001051 nents = scsi_sg_count(scmnd);
1052 scat = scsi_sglist(scmnd);
Roland Dreiercf368712006-03-24 15:47:26 -08001053
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001054 dev = target->srp_host->srp_dev;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001055 ibdev = dev->dev;
1056
1057 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
David Dillow8f26c9f2011-01-14 19:45:50 -05001058 if (unlikely(count == 0))
1059 return -EIO;
Roland Dreierf5358a12006-06-17 20:37:29 -07001060
1061 fmt = SRP_DATA_DESC_DIRECT;
1062 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
Roland Dreiercf368712006-03-24 15:47:26 -08001063
1064 if (count == 1) {
Roland Dreierf5358a12006-06-17 20:37:29 -07001065 /*
1066 * The midlayer only generated a single gather/scatter
1067 * entry, or DMA mapping coalesced everything to a
1068 * single entry. So a direct descriptor along with
1069 * the DMA MR suffices.
1070 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001071 struct srp_direct_buf *buf = (void *) cmd->add_data;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001072
Ralph Campbell85507bc2006-12-12 14:30:55 -08001073 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
David Dillow9af76272010-11-26 15:34:46 -05001074 buf->key = cpu_to_be32(target->rkey);
Ralph Campbell85507bc2006-12-12 14:30:55 -08001075 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
Roland Dreiercf368712006-03-24 15:47:26 -08001076
David Dillow8f26c9f2011-01-14 19:45:50 -05001077 req->nfmr = 0;
1078 goto map_complete;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001079 }
1080
David Dillow8f26c9f2011-01-14 19:45:50 -05001081 /* We have more than one scatter/gather entry, so build our indirect
1082 * descriptor table, trying to merge as many entries with FMR as we
1083 * can.
1084 */
1085 indirect_hdr = (void *) cmd->add_data;
1086
David Dillowc07d4242011-01-16 13:57:10 -05001087 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1088 target->indirect_size, DMA_TO_DEVICE);
1089
David Dillow8f26c9f2011-01-14 19:45:50 -05001090 memset(&state, 0, sizeof(state));
David Dillowc07d4242011-01-16 13:57:10 -05001091 state.desc = req->indirect_desc;
David Dillow8f26c9f2011-01-14 19:45:50 -05001092 state.pages = req->map_page;
1093 state.next_fmr = req->fmr_list;
1094
1095 use_fmr = dev->fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR;
1096
1097 for_each_sg(scat, sg, count, i) {
1098 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
1099 /* FMR mapping failed, so backtrack to the first
1100 * unmapped entry and continue on without using FMR.
1101 */
1102 dma_addr_t dma_addr;
1103 unsigned int dma_len;
1104
1105backtrack:
1106 sg = state.unmapped_sg;
1107 i = state.unmapped_index;
1108
1109 dma_addr = ib_sg_dma_address(ibdev, sg);
1110 dma_len = ib_sg_dma_len(ibdev, sg);
1111 dma_len -= (state.unmapped_addr - dma_addr);
1112 dma_addr = state.unmapped_addr;
1113 use_fmr = SRP_MAP_NO_FMR;
1114 srp_map_desc(&state, dma_addr, dma_len, target->rkey);
1115 }
1116 }
1117
1118 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(&state, target))
1119 goto backtrack;
1120
David Dillowc07d4242011-01-16 13:57:10 -05001121 /* We've mapped the request, now pull as much of the indirect
1122 * descriptor table as we can into the command buffer. If this
1123 * target is not using an external indirect table, we are
1124 * guaranteed to fit into the command, as the SCSI layer won't
1125 * give us more S/G entries than we allow.
David Dillow8f26c9f2011-01-14 19:45:50 -05001126 */
1127 req->nfmr = state.nfmr;
1128 if (state.ndesc == 1) {
1129 /* FMR mapping was able to collapse this to one entry,
1130 * so use a direct descriptor.
1131 */
1132 struct srp_direct_buf *buf = (void *) cmd->add_data;
1133
David Dillowc07d4242011-01-16 13:57:10 -05001134 *buf = req->indirect_desc[0];
David Dillow8f26c9f2011-01-14 19:45:50 -05001135 goto map_complete;
1136 }
1137
David Dillowc07d4242011-01-16 13:57:10 -05001138 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1139 !target->allow_ext_sg)) {
1140 shost_printk(KERN_ERR, target->scsi_host,
1141 "Could not fit S/G list into SRP_CMD\n");
1142 return -EIO;
1143 }
1144
1145 count = min(state.ndesc, target->cmd_sg_cnt);
David Dillow8f26c9f2011-01-14 19:45:50 -05001146 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1147
1148 fmt = SRP_DATA_DESC_INDIRECT;
1149 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
David Dillowc07d4242011-01-16 13:57:10 -05001150 len += count * sizeof (struct srp_direct_buf);
David Dillow8f26c9f2011-01-14 19:45:50 -05001151
David Dillowc07d4242011-01-16 13:57:10 -05001152 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1153 count * sizeof (struct srp_direct_buf));
David Dillow8f26c9f2011-01-14 19:45:50 -05001154
David Dillowc07d4242011-01-16 13:57:10 -05001155 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
David Dillow8f26c9f2011-01-14 19:45:50 -05001156 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1157 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1158 indirect_hdr->len = cpu_to_be32(state.total_len);
1159
1160 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
David Dillowc07d4242011-01-16 13:57:10 -05001161 cmd->data_out_desc_cnt = count;
David Dillow8f26c9f2011-01-14 19:45:50 -05001162 else
David Dillowc07d4242011-01-16 13:57:10 -05001163 cmd->data_in_desc_cnt = count;
1164
1165 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1166 DMA_TO_DEVICE);
David Dillow8f26c9f2011-01-14 19:45:50 -05001167
1168map_complete:
Roland Dreieraef9ec32005-11-02 14:07:13 -08001169 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1170 cmd->buf_fmt = fmt << 4;
1171 else
1172 cmd->buf_fmt = fmt;
1173
Roland Dreieraef9ec32005-11-02 14:07:13 -08001174 return len;
1175}
1176
David Dillow05a1d752010-10-08 14:48:14 -04001177/*
Bart Van Assche76c75b22010-11-26 14:37:47 -05001178 * Return an IU and possible credit to the free pool
1179 */
1180static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
1181 enum srp_iu_type iu_type)
1182{
1183 unsigned long flags;
1184
Bart Van Asschee9684672010-11-26 15:08:38 -05001185 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001186 list_add(&iu->list, &target->free_tx);
1187 if (iu_type != SRP_IU_RSP)
1188 ++target->req_lim;
Bart Van Asschee9684672010-11-26 15:08:38 -05001189 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001190}
1191
1192/*
Bart Van Asschee9684672010-11-26 15:08:38 -05001193 * Must be called with target->lock held to protect req_lim and free_tx.
1194 * If IU is not sent, it must be returned using srp_put_tx_iu().
David Dillow05a1d752010-10-08 14:48:14 -04001195 *
1196 * Note:
1197 * An upper limit for the number of allocated information units for each
1198 * request type is:
1199 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1200 * more than Scsi_Host.can_queue requests.
1201 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1202 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1203 * one unanswered SRP request to an initiator.
1204 */
1205static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
1206 enum srp_iu_type iu_type)
1207{
1208 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1209 struct srp_iu *iu;
1210
1211 srp_send_completion(target->send_cq, target);
1212
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001213 if (list_empty(&target->free_tx))
David Dillow05a1d752010-10-08 14:48:14 -04001214 return NULL;
1215
1216 /* Initiator responses to target requests do not consume credits */
Bart Van Assche76c75b22010-11-26 14:37:47 -05001217 if (iu_type != SRP_IU_RSP) {
1218 if (target->req_lim <= rsv) {
1219 ++target->zero_req_lim;
1220 return NULL;
1221 }
1222
1223 --target->req_lim;
David Dillow05a1d752010-10-08 14:48:14 -04001224 }
1225
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001226 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001227 list_del(&iu->list);
David Dillow05a1d752010-10-08 14:48:14 -04001228 return iu;
1229}
1230
Bart Van Assche76c75b22010-11-26 14:37:47 -05001231static int srp_post_send(struct srp_target_port *target,
1232 struct srp_iu *iu, int len)
David Dillow05a1d752010-10-08 14:48:14 -04001233{
1234 struct ib_sge list;
1235 struct ib_send_wr wr, *bad_wr;
David Dillow05a1d752010-10-08 14:48:14 -04001236
1237 list.addr = iu->dma;
1238 list.length = len;
David Dillow9af76272010-11-26 15:34:46 -05001239 list.lkey = target->lkey;
David Dillow05a1d752010-10-08 14:48:14 -04001240
1241 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001242 wr.wr_id = (uintptr_t) iu;
David Dillow05a1d752010-10-08 14:48:14 -04001243 wr.sg_list = &list;
1244 wr.num_sge = 1;
1245 wr.opcode = IB_WR_SEND;
1246 wr.send_flags = IB_SEND_SIGNALED;
1247
Bart Van Assche76c75b22010-11-26 14:37:47 -05001248 return ib_post_send(target->qp, &wr, &bad_wr);
David Dillow05a1d752010-10-08 14:48:14 -04001249}
1250
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001251static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
Bart Van Asschec996bb42010-07-30 10:59:05 +00001252{
Bart Van Asschec996bb42010-07-30 10:59:05 +00001253 struct ib_recv_wr wr, *bad_wr;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001254 struct ib_sge list;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001255
1256 list.addr = iu->dma;
1257 list.length = iu->size;
David Dillow9af76272010-11-26 15:34:46 -05001258 list.lkey = target->lkey;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001259
1260 wr.next = NULL;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001261 wr.wr_id = (uintptr_t) iu;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001262 wr.sg_list = &list;
1263 wr.num_sge = 1;
1264
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001265 return ib_post_recv(target->qp, &wr, &bad_wr);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001266}
1267
Roland Dreieraef9ec32005-11-02 14:07:13 -08001268static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1269{
1270 struct srp_request *req;
1271 struct scsi_cmnd *scmnd;
1272 unsigned long flags;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001273
Roland Dreieraef9ec32005-11-02 14:07:13 -08001274 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
Bart Van Asschee9684672010-11-26 15:08:38 -05001275 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001276 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
Bart Van Asschee9684672010-11-26 15:08:38 -05001277 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche94a91742010-11-26 14:50:09 -05001278
David Dillowf8b6e312010-11-26 13:02:21 -05001279 target->tsk_mgmt_status = -1;
1280 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1281 target->tsk_mgmt_status = rsp->data[3];
1282 complete(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001283 } else {
David Dillowf8b6e312010-11-26 13:02:21 -05001284 req = &target->req_ring[rsp->tag];
Bart Van Assche22032992012-08-14 13:18:53 +00001285 scmnd = srp_claim_req(target, req, NULL);
1286 if (!scmnd) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001287 shost_printk(KERN_ERR, target->scsi_host,
1288 "Null scmnd for RSP w/tag %016llx\n",
1289 (unsigned long long) rsp->tag);
Bart Van Assche22032992012-08-14 13:18:53 +00001290
1291 spin_lock_irqsave(&target->lock, flags);
1292 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1293 spin_unlock_irqrestore(&target->lock, flags);
1294
1295 return;
1296 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001297 scmnd->result = rsp->status;
1298
1299 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1300 memcpy(scmnd->sense_buffer, rsp->data +
1301 be32_to_cpu(rsp->resp_data_len),
1302 min_t(int, be32_to_cpu(rsp->sense_data_len),
1303 SCSI_SENSE_BUFFERSIZE));
1304 }
1305
1306 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001307 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001308 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
FUJITA Tomonoribb350d12007-05-26 02:28:25 +09001309 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001310
Bart Van Assche22032992012-08-14 13:18:53 +00001311 srp_free_req(target, req, scmnd,
1312 be32_to_cpu(rsp->req_lim_delta));
1313
David Dillowf8b6e312010-11-26 13:02:21 -05001314 scmnd->host_scribble = NULL;
1315 scmnd->scsi_done(scmnd);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001316 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001317}
1318
David Dillowbb125882010-10-08 14:40:47 -04001319static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1320 void *rsp, int len)
1321{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001322 struct ib_device *dev = target->srp_host->srp_dev->dev;
David Dillowbb125882010-10-08 14:40:47 -04001323 unsigned long flags;
1324 struct srp_iu *iu;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001325 int err;
David Dillowbb125882010-10-08 14:40:47 -04001326
Bart Van Asschee9684672010-11-26 15:08:38 -05001327 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001328 target->req_lim += req_delta;
David Dillowbb125882010-10-08 14:40:47 -04001329 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
Bart Van Asschee9684672010-11-26 15:08:38 -05001330 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001331
David Dillowbb125882010-10-08 14:40:47 -04001332 if (!iu) {
1333 shost_printk(KERN_ERR, target->scsi_host, PFX
1334 "no IU available to send response\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001335 return 1;
David Dillowbb125882010-10-08 14:40:47 -04001336 }
1337
1338 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1339 memcpy(iu->buf, rsp, len);
1340 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1341
Bart Van Assche76c75b22010-11-26 14:37:47 -05001342 err = srp_post_send(target, iu, len);
1343 if (err) {
David Dillowbb125882010-10-08 14:40:47 -04001344 shost_printk(KERN_ERR, target->scsi_host, PFX
1345 "unable to post response: %d\n", err);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001346 srp_put_tx_iu(target, iu, SRP_IU_RSP);
1347 }
David Dillowbb125882010-10-08 14:40:47 -04001348
David Dillowbb125882010-10-08 14:40:47 -04001349 return err;
1350}
1351
1352static void srp_process_cred_req(struct srp_target_port *target,
1353 struct srp_cred_req *req)
1354{
1355 struct srp_cred_rsp rsp = {
1356 .opcode = SRP_CRED_RSP,
1357 .tag = req->tag,
1358 };
1359 s32 delta = be32_to_cpu(req->req_lim_delta);
1360
1361 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1362 shost_printk(KERN_ERR, target->scsi_host, PFX
1363 "problems processing SRP_CRED_REQ\n");
1364}
1365
1366static void srp_process_aer_req(struct srp_target_port *target,
1367 struct srp_aer_req *req)
1368{
1369 struct srp_aer_rsp rsp = {
1370 .opcode = SRP_AER_RSP,
1371 .tag = req->tag,
1372 };
1373 s32 delta = be32_to_cpu(req->req_lim_delta);
1374
1375 shost_printk(KERN_ERR, target->scsi_host, PFX
1376 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1377
1378 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1379 shost_printk(KERN_ERR, target->scsi_host, PFX
1380 "problems processing SRP_AER_REQ\n");
1381}
1382
Roland Dreieraef9ec32005-11-02 14:07:13 -08001383static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1384{
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001385 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreier737b94e2011-05-23 11:30:04 -07001386 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
Bart Van Asschec996bb42010-07-30 10:59:05 +00001387 int res;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001388 u8 opcode;
1389
Ralph Campbell85507bc2006-12-12 14:30:55 -08001390 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1391 DMA_FROM_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001392
1393 opcode = *(u8 *) iu->buf;
1394
1395 if (0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001396 shost_printk(KERN_ERR, target->scsi_host,
1397 PFX "recv completion, opcode 0x%02x\n", opcode);
Bart Van Assche7a700812010-07-29 15:56:37 +00001398 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1399 iu->buf, wc->byte_len, true);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001400 }
1401
1402 switch (opcode) {
1403 case SRP_RSP:
1404 srp_process_rsp(target, iu->buf);
1405 break;
1406
David Dillowbb125882010-10-08 14:40:47 -04001407 case SRP_CRED_REQ:
1408 srp_process_cred_req(target, iu->buf);
1409 break;
1410
1411 case SRP_AER_REQ:
1412 srp_process_aer_req(target, iu->buf);
1413 break;
1414
Roland Dreieraef9ec32005-11-02 14:07:13 -08001415 case SRP_T_LOGOUT:
1416 /* XXX Handle target logout */
David Dillow7aa54bd2008-01-07 18:23:41 -05001417 shost_printk(KERN_WARNING, target->scsi_host,
1418 PFX "Got target logout request\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001419 break;
1420
1421 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001422 shost_printk(KERN_WARNING, target->scsi_host,
1423 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001424 break;
1425 }
1426
Ralph Campbell85507bc2006-12-12 14:30:55 -08001427 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1428 DMA_FROM_DEVICE);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001429
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001430 res = srp_post_recv(target, iu);
Bart Van Asschec996bb42010-07-30 10:59:05 +00001431 if (res != 0)
1432 shost_printk(KERN_ERR, target->scsi_host,
1433 PFX "Recv failed with error code %d\n", res);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001434}
1435
Bart Van Asschec1120f82013-10-26 14:35:08 +02001436/**
1437 * srp_tl_err_work() - handle a transport layer error
1438 *
1439 * Note: This function may get invoked before the rport has been created,
1440 * hence the target->rport test.
1441 */
1442static void srp_tl_err_work(struct work_struct *work)
1443{
1444 struct srp_target_port *target;
1445
1446 target = container_of(work, struct srp_target_port, tl_err_work);
1447 if (target->rport)
1448 srp_start_tl_fail_timers(target->rport);
1449}
1450
Bart Van Assche948d1e82011-09-03 09:25:42 +02001451static void srp_handle_qp_err(enum ib_wc_status wc_status,
1452 enum ib_wc_opcode wc_opcode,
1453 struct srp_target_port *target)
1454{
Bart Van Assche294c8752011-12-25 12:18:12 +00001455 if (target->connected && !target->qp_in_error) {
Bart Van Assche4f0af692012-11-26 11:16:40 +01001456 shost_printk(KERN_ERR, target->scsi_host,
1457 PFX "failed %s status %d\n",
1458 wc_opcode & IB_WC_RECV ? "receive" : "send",
1459 wc_status);
Bart Van Asschec1120f82013-10-26 14:35:08 +02001460 queue_work(system_long_wq, &target->tl_err_work);
Bart Van Assche4f0af692012-11-26 11:16:40 +01001461 }
Bart Van Assche948d1e82011-09-03 09:25:42 +02001462 target->qp_in_error = true;
1463}
1464
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001465static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001466{
1467 struct srp_target_port *target = target_ptr;
1468 struct ib_wc wc;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001469
1470 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1471 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001472 if (likely(wc.status == IB_WC_SUCCESS)) {
1473 srp_handle_recv(target, &wc);
1474 } else {
1475 srp_handle_qp_err(wc.status, wc.opcode, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001476 }
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001477 }
1478}
1479
1480static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1481{
1482 struct srp_target_port *target = target_ptr;
1483 struct ib_wc wc;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001484 struct srp_iu *iu;
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001485
1486 while (ib_poll_cq(cq, 1, &wc) > 0) {
Bart Van Assche948d1e82011-09-03 09:25:42 +02001487 if (likely(wc.status == IB_WC_SUCCESS)) {
1488 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1489 list_add(&iu->list, &target->free_tx);
1490 } else {
1491 srp_handle_qp_err(wc.status, wc.opcode, target);
Bart Van Assche9c03dc92010-02-02 19:23:54 +00001492 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001493 }
1494}
1495
Bart Van Assche76c75b22010-11-26 14:37:47 -05001496static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001497{
Bart Van Assche76c75b22010-11-26 14:37:47 -05001498 struct srp_target_port *target = host_to_target(shost);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001499 struct srp_rport *rport = target->rport;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001500 struct srp_request *req;
1501 struct srp_iu *iu;
1502 struct srp_cmd *cmd;
Ralph Campbell85507bc2006-12-12 14:30:55 -08001503 struct ib_device *dev;
Bart Van Assche76c75b22010-11-26 14:37:47 -05001504 unsigned long flags;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001505 int len, result;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001506 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1507
1508 /*
1509 * The SCSI EH thread is the only context from which srp_queuecommand()
1510 * can get invoked for blocked devices (SDEV_BLOCK /
1511 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1512 * locking the rport mutex if invoked from inside the SCSI EH.
1513 */
1514 if (in_scsi_eh)
1515 mutex_lock(&rport->mutex);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001516
Bart Van Asscheed9b2262013-10-26 14:34:27 +02001517 result = srp_chkready(target->rport);
1518 if (unlikely(result)) {
1519 scmnd->result = result;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001520 scmnd->scsi_done(scmnd);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001521 goto unlock_rport;
Bart Van Assche2ce19e72013-02-21 17:20:00 +00001522 }
1523
Bart Van Asschee9684672010-11-26 15:08:38 -05001524 spin_lock_irqsave(&target->lock, flags);
David Dillowbb125882010-10-08 14:40:47 -04001525 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001526 if (!iu)
Bart Van Assche695b8342011-01-13 19:02:25 +00001527 goto err_unlock;
1528
1529 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1530 list_del(&req->list);
1531 spin_unlock_irqrestore(&target->lock, flags);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001532
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01001533 dev = target->srp_host->srp_dev->dev;
David Dillow49248642011-01-14 18:23:24 -05001534 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001535 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001536
Roland Dreieraef9ec32005-11-02 14:07:13 -08001537 scmnd->result = 0;
David Dillowf8b6e312010-11-26 13:02:21 -05001538 scmnd->host_scribble = (void *) req;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001539
1540 cmd = iu->buf;
1541 memset(cmd, 0, sizeof *cmd);
1542
1543 cmd->opcode = SRP_CMD;
1544 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001545 cmd->tag = req->index;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001546 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1547
Roland Dreieraef9ec32005-11-02 14:07:13 -08001548 req->scmnd = scmnd;
1549 req->cmd = iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001550
1551 len = srp_map_data(scmnd, target, req);
1552 if (len < 0) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001553 shost_printk(KERN_ERR, target->scsi_host,
1554 PFX "Failed to map data\n");
Bart Van Assche76c75b22010-11-26 14:37:47 -05001555 goto err_iu;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001556 }
1557
David Dillow49248642011-01-14 18:23:24 -05001558 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
Ralph Campbell85507bc2006-12-12 14:30:55 -08001559 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001560
Bart Van Assche76c75b22010-11-26 14:37:47 -05001561 if (srp_post_send(target, iu, len)) {
David Dillow7aa54bd2008-01-07 18:23:41 -05001562 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001563 goto err_unmap;
1564 }
1565
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001566unlock_rport:
1567 if (in_scsi_eh)
1568 mutex_unlock(&rport->mutex);
1569
Roland Dreieraef9ec32005-11-02 14:07:13 -08001570 return 0;
1571
1572err_unmap:
1573 srp_unmap_data(scmnd, target, req);
1574
Bart Van Assche76c75b22010-11-26 14:37:47 -05001575err_iu:
1576 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1577
Bart Van Asschee9684672010-11-26 15:08:38 -05001578 spin_lock_irqsave(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001579 list_add(&req->list, &target->free_reqs);
Bart Van Assche695b8342011-01-13 19:02:25 +00001580
1581err_unlock:
Bart Van Asschee9684672010-11-26 15:08:38 -05001582 spin_unlock_irqrestore(&target->lock, flags);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001583
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001584 if (in_scsi_eh)
1585 mutex_unlock(&rport->mutex);
1586
Roland Dreieraef9ec32005-11-02 14:07:13 -08001587 return SCSI_MLQUEUE_HOST_BUSY;
1588}
1589
Bart Van Assche4d73f952013-10-26 14:40:37 +02001590/*
1591 * Note: the resources allocated in this function are freed in
1592 * srp_free_target_ib().
1593 */
Roland Dreieraef9ec32005-11-02 14:07:13 -08001594static int srp_alloc_iu_bufs(struct srp_target_port *target)
1595{
1596 int i;
1597
Bart Van Assche4d73f952013-10-26 14:40:37 +02001598 target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
1599 GFP_KERNEL);
1600 if (!target->rx_ring)
1601 goto err_no_ring;
1602 target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
1603 GFP_KERNEL);
1604 if (!target->tx_ring)
1605 goto err_no_ring;
1606
1607 for (i = 0; i < target->queue_size; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001608 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1609 target->max_ti_iu_len,
1610 GFP_KERNEL, DMA_FROM_DEVICE);
1611 if (!target->rx_ring[i])
1612 goto err;
1613 }
1614
Bart Van Assche4d73f952013-10-26 14:40:37 +02001615 for (i = 0; i < target->queue_size; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001616 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
David Dillow49248642011-01-14 18:23:24 -05001617 target->max_iu_len,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001618 GFP_KERNEL, DMA_TO_DEVICE);
1619 if (!target->tx_ring[i])
1620 goto err;
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05001621
1622 list_add(&target->tx_ring[i]->list, &target->free_tx);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001623 }
1624
1625 return 0;
1626
1627err:
Bart Van Assche4d73f952013-10-26 14:40:37 +02001628 for (i = 0; i < target->queue_size; ++i) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001629 srp_free_iu(target->srp_host, target->rx_ring[i]);
Bart Van Assche4d73f952013-10-26 14:40:37 +02001630 srp_free_iu(target->srp_host, target->tx_ring[i]);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001631 }
1632
Bart Van Assche4d73f952013-10-26 14:40:37 +02001633
1634err_no_ring:
1635 kfree(target->tx_ring);
1636 target->tx_ring = NULL;
1637 kfree(target->rx_ring);
1638 target->rx_ring = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001639
1640 return -ENOMEM;
1641}
1642
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001643static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1644{
1645 uint64_t T_tr_ns, max_compl_time_ms;
1646 uint32_t rq_tmo_jiffies;
1647
1648 /*
1649 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1650 * table 91), both the QP timeout and the retry count have to be set
1651 * for RC QP's during the RTR to RTS transition.
1652 */
1653 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1654 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1655
1656 /*
1657 * Set target->rq_tmo_jiffies to one second more than the largest time
1658 * it can take before an error completion is generated. See also
1659 * C9-140..142 in the IBTA spec for more information about how to
1660 * convert the QP Local ACK Timeout value to nanoseconds.
1661 */
1662 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1663 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1664 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1665 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1666
1667 return rq_tmo_jiffies;
1668}
1669
David Dillow961e0be2011-01-14 17:32:07 -05001670static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1671 struct srp_login_rsp *lrsp,
1672 struct srp_target_port *target)
1673{
1674 struct ib_qp_attr *qp_attr = NULL;
1675 int attr_mask = 0;
1676 int ret;
1677 int i;
1678
1679 if (lrsp->opcode == SRP_LOGIN_RSP) {
1680 target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
1681 target->req_lim = be32_to_cpu(lrsp->req_lim_delta);
1682
1683 /*
1684 * Reserve credits for task management so we don't
1685 * bounce requests back to the SCSI mid-layer.
1686 */
1687 target->scsi_host->can_queue
1688 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1689 target->scsi_host->can_queue);
Bart Van Assche4d73f952013-10-26 14:40:37 +02001690 target->scsi_host->cmd_per_lun
1691 = min_t(int, target->scsi_host->can_queue,
1692 target->scsi_host->cmd_per_lun);
David Dillow961e0be2011-01-14 17:32:07 -05001693 } else {
1694 shost_printk(KERN_WARNING, target->scsi_host,
1695 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
1696 ret = -ECONNRESET;
1697 goto error;
1698 }
1699
Bart Van Assche4d73f952013-10-26 14:40:37 +02001700 if (!target->rx_ring) {
David Dillow961e0be2011-01-14 17:32:07 -05001701 ret = srp_alloc_iu_bufs(target);
1702 if (ret)
1703 goto error;
1704 }
1705
1706 ret = -ENOMEM;
1707 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1708 if (!qp_attr)
1709 goto error;
1710
1711 qp_attr->qp_state = IB_QPS_RTR;
1712 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1713 if (ret)
1714 goto error_free;
1715
1716 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1717 if (ret)
1718 goto error_free;
1719
Bart Van Assche4d73f952013-10-26 14:40:37 +02001720 for (i = 0; i < target->queue_size; i++) {
David Dillow961e0be2011-01-14 17:32:07 -05001721 struct srp_iu *iu = target->rx_ring[i];
1722 ret = srp_post_recv(target, iu);
1723 if (ret)
1724 goto error_free;
1725 }
1726
1727 qp_attr->qp_state = IB_QPS_RTS;
1728 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1729 if (ret)
1730 goto error_free;
1731
Bart Van Asschec9b03c12011-09-03 09:34:48 +02001732 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1733
David Dillow961e0be2011-01-14 17:32:07 -05001734 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1735 if (ret)
1736 goto error_free;
1737
1738 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1739
1740error_free:
1741 kfree(qp_attr);
1742
1743error:
1744 target->status = ret;
1745}
1746
Roland Dreieraef9ec32005-11-02 14:07:13 -08001747static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1748 struct ib_cm_event *event,
1749 struct srp_target_port *target)
1750{
David Dillow7aa54bd2008-01-07 18:23:41 -05001751 struct Scsi_Host *shost = target->scsi_host;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001752 struct ib_class_port_info *cpi;
1753 int opcode;
1754
1755 switch (event->param.rej_rcvd.reason) {
1756 case IB_CM_REJ_PORT_CM_REDIRECT:
1757 cpi = event->param.rej_rcvd.ari;
1758 target->path.dlid = cpi->redirect_lid;
1759 target->path.pkey = cpi->redirect_pkey;
1760 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1761 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1762
1763 target->status = target->path.dlid ?
1764 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1765 break;
1766
1767 case IB_CM_REJ_PORT_REDIRECT:
Roland Dreier5d7cbfd2007-08-03 10:45:18 -07001768 if (srp_target_is_topspin(target)) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08001769 /*
1770 * Topspin/Cisco SRP gateways incorrectly send
1771 * reject reason code 25 when they mean 24
1772 * (port redirect).
1773 */
1774 memcpy(target->path.dgid.raw,
1775 event->param.rej_rcvd.ari, 16);
1776
David Dillow7aa54bd2008-01-07 18:23:41 -05001777 shost_printk(KERN_DEBUG, shost,
1778 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1779 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1780 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
Roland Dreieraef9ec32005-11-02 14:07:13 -08001781
1782 target->status = SRP_PORT_REDIRECT;
1783 } else {
David Dillow7aa54bd2008-01-07 18:23:41 -05001784 shost_printk(KERN_WARNING, shost,
1785 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001786 target->status = -ECONNRESET;
1787 }
1788 break;
1789
1790 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
David Dillow7aa54bd2008-01-07 18:23:41 -05001791 shost_printk(KERN_WARNING, shost,
1792 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001793 target->status = -ECONNRESET;
1794 break;
1795
1796 case IB_CM_REJ_CONSUMER_DEFINED:
1797 opcode = *(u8 *) event->private_data;
1798 if (opcode == SRP_LOGIN_REJ) {
1799 struct srp_login_rej *rej = event->private_data;
1800 u32 reason = be32_to_cpu(rej->reason);
1801
1802 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
David Dillow7aa54bd2008-01-07 18:23:41 -05001803 shost_printk(KERN_WARNING, shost,
1804 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001805 else
David Dillow7aa54bd2008-01-07 18:23:41 -05001806 shost_printk(KERN_WARNING, shost,
1807 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001808 } else
David Dillow7aa54bd2008-01-07 18:23:41 -05001809 shost_printk(KERN_WARNING, shost,
1810 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1811 " opcode 0x%02x\n", opcode);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001812 target->status = -ECONNRESET;
1813 break;
1814
David Dillow9fe4bcf2008-01-08 17:08:52 -05001815 case IB_CM_REJ_STALE_CONN:
1816 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1817 target->status = SRP_STALE_CONN;
1818 break;
1819
Roland Dreieraef9ec32005-11-02 14:07:13 -08001820 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001821 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1822 event->param.rej_rcvd.reason);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001823 target->status = -ECONNRESET;
1824 }
1825}
1826
1827static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1828{
1829 struct srp_target_port *target = cm_id->context;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001830 int comp = 0;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001831
1832 switch (event->event) {
1833 case IB_CM_REQ_ERROR:
David Dillow7aa54bd2008-01-07 18:23:41 -05001834 shost_printk(KERN_DEBUG, target->scsi_host,
1835 PFX "Sending CM REQ failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001836 comp = 1;
1837 target->status = -ECONNRESET;
1838 break;
1839
1840 case IB_CM_REP_RECEIVED:
1841 comp = 1;
David Dillow961e0be2011-01-14 17:32:07 -05001842 srp_cm_rep_handler(cm_id, event->private_data, target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001843 break;
1844
1845 case IB_CM_REJ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001846 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001847 comp = 1;
1848
1849 srp_cm_rej_handler(cm_id, event, target);
1850 break;
1851
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001852 case IB_CM_DREQ_RECEIVED:
David Dillow7aa54bd2008-01-07 18:23:41 -05001853 shost_printk(KERN_WARNING, target->scsi_host,
1854 PFX "DREQ received - connection closed\n");
Bart Van Assche294c8752011-12-25 12:18:12 +00001855 srp_change_conn_state(target, false);
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001856 if (ib_send_cm_drep(cm_id, NULL, 0))
David Dillow7aa54bd2008-01-07 18:23:41 -05001857 shost_printk(KERN_ERR, target->scsi_host,
1858 PFX "Sending CM DREP failed\n");
Bart Van Asschec1120f82013-10-26 14:35:08 +02001859 queue_work(system_long_wq, &target->tl_err_work);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001860 break;
1861
1862 case IB_CM_TIMEWAIT_EXIT:
David Dillow7aa54bd2008-01-07 18:23:41 -05001863 shost_printk(KERN_ERR, target->scsi_host,
1864 PFX "connection closed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08001865
Roland Dreieraef9ec32005-11-02 14:07:13 -08001866 target->status = 0;
1867 break;
1868
Ishai Rabinovitzb7ac4ab2006-06-17 20:37:32 -07001869 case IB_CM_MRA_RECEIVED:
1870 case IB_CM_DREQ_ERROR:
1871 case IB_CM_DREP_RECEIVED:
1872 break;
1873
Roland Dreieraef9ec32005-11-02 14:07:13 -08001874 default:
David Dillow7aa54bd2008-01-07 18:23:41 -05001875 shost_printk(KERN_WARNING, target->scsi_host,
1876 PFX "Unhandled CM event %d\n", event->event);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001877 break;
1878 }
1879
1880 if (comp)
1881 complete(&target->done);
1882
Roland Dreieraef9ec32005-11-02 14:07:13 -08001883 return 0;
1884}
1885
Jack Wang71444b92013-11-07 11:37:37 +01001886/**
1887 * srp_change_queue_type - changing device queue tag type
1888 * @sdev: scsi device struct
1889 * @tag_type: requested tag type
1890 *
1891 * Returns queue tag type.
1892 */
1893static int
1894srp_change_queue_type(struct scsi_device *sdev, int tag_type)
1895{
1896 if (sdev->tagged_supported) {
1897 scsi_set_tag_type(sdev, tag_type);
1898 if (tag_type)
1899 scsi_activate_tcq(sdev, sdev->queue_depth);
1900 else
1901 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1902 } else
1903 tag_type = 0;
1904
1905 return tag_type;
1906}
1907
1908/**
1909 * srp_change_queue_depth - setting device queue depth
1910 * @sdev: scsi device struct
1911 * @qdepth: requested queue depth
1912 * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
1913 * (see include/scsi/scsi_host.h for definition)
1914 *
1915 * Returns queue depth.
1916 */
1917static int
1918srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1919{
1920 struct Scsi_Host *shost = sdev->host;
1921 int max_depth;
1922 if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
1923 max_depth = shost->can_queue;
1924 if (!sdev->tagged_supported)
1925 max_depth = 1;
1926 if (qdepth > max_depth)
1927 qdepth = max_depth;
1928 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1929 } else if (reason == SCSI_QDEPTH_QFULL)
1930 scsi_track_queue_full(sdev, qdepth);
1931 else
1932 return -EOPNOTSUPP;
1933
1934 return sdev->queue_depth;
1935}
1936
Roland Dreierd945e1d2006-05-09 10:50:28 -07001937static int srp_send_tsk_mgmt(struct srp_target_port *target,
David Dillowf8b6e312010-11-26 13:02:21 -05001938 u64 req_tag, unsigned int lun, u8 func)
Roland Dreieraef9ec32005-11-02 14:07:13 -08001939{
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001940 struct srp_rport *rport = target->rport;
David Dillow19081f32010-10-18 08:54:49 -04001941 struct ib_device *dev = target->srp_host->srp_dev->dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001942 struct srp_iu *iu;
1943 struct srp_tsk_mgmt *tsk_mgmt;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001944
Bart Van Assche3780d1f2013-02-21 17:18:00 +00001945 if (!target->connected || target->qp_in_error)
1946 return -1;
1947
David Dillowf8b6e312010-11-26 13:02:21 -05001948 init_completion(&target->tsk_mgmt_done);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001949
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001950 /*
1951 * Lock the rport mutex to avoid that srp_create_target_ib() is
1952 * invoked while a task management function is being sent.
1953 */
1954 mutex_lock(&rport->mutex);
Bart Van Asschee9684672010-11-26 15:08:38 -05001955 spin_lock_irq(&target->lock);
David Dillowbb125882010-10-08 14:40:47 -04001956 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
Bart Van Asschee9684672010-11-26 15:08:38 -05001957 spin_unlock_irq(&target->lock);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001958
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001959 if (!iu) {
1960 mutex_unlock(&rport->mutex);
1961
Bart Van Assche76c75b22010-11-26 14:37:47 -05001962 return -1;
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001963 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08001964
David Dillow19081f32010-10-18 08:54:49 -04001965 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1966 DMA_TO_DEVICE);
Roland Dreieraef9ec32005-11-02 14:07:13 -08001967 tsk_mgmt = iu->buf;
1968 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1969
1970 tsk_mgmt->opcode = SRP_TSK_MGMT;
David Dillowf8b6e312010-11-26 13:02:21 -05001971 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1972 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001973 tsk_mgmt->tsk_mgmt_func = func;
David Dillowf8b6e312010-11-26 13:02:21 -05001974 tsk_mgmt->task_tag = req_tag;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001975
David Dillow19081f32010-10-18 08:54:49 -04001976 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1977 DMA_TO_DEVICE);
Bart Van Assche76c75b22010-11-26 14:37:47 -05001978 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1979 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001980 mutex_unlock(&rport->mutex);
1981
Bart Van Assche76c75b22010-11-26 14:37:47 -05001982 return -1;
1983 }
Bart Van Asschea95cadb2013-10-26 14:37:17 +02001984 mutex_unlock(&rport->mutex);
Roland Dreierd945e1d2006-05-09 10:50:28 -07001985
David Dillowf8b6e312010-11-26 13:02:21 -05001986 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
Roland Dreieraef9ec32005-11-02 14:07:13 -08001987 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
Roland Dreierd945e1d2006-05-09 10:50:28 -07001988 return -1;
Roland Dreieraef9ec32005-11-02 14:07:13 -08001989
Roland Dreierd945e1d2006-05-09 10:50:28 -07001990 return 0;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001991}
1992
Roland Dreieraef9ec32005-11-02 14:07:13 -08001993static int srp_abort(struct scsi_cmnd *scmnd)
1994{
Roland Dreierd945e1d2006-05-09 10:50:28 -07001995 struct srp_target_port *target = host_to_target(scmnd->device->host);
David Dillowf8b6e312010-11-26 13:02:21 -05001996 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
Bart Van Assche086f44f2013-06-12 15:23:04 +02001997 int ret;
Roland Dreierd945e1d2006-05-09 10:50:28 -07001998
David Dillow7aa54bd2008-01-07 18:23:41 -05001999 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002000
Bart Van Asschec7c4e7f2013-02-21 17:19:04 +00002001 if (!req || !srp_claim_req(target, req, scmnd))
Ishai Rabinovitz1033ff62007-01-16 17:26:22 +02002002 return FAILED;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002003 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
Bart Van Assche80d5e8a2013-07-10 17:36:35 +02002004 SRP_TSK_ABORT_TASK) == 0)
Bart Van Assche086f44f2013-06-12 15:23:04 +02002005 ret = SUCCESS;
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002006 else if (target->rport->state == SRP_RPORT_LOST)
Bart Van Assche99e1c132013-06-28 14:49:58 +02002007 ret = FAST_IO_FAIL;
Bart Van Assche086f44f2013-06-12 15:23:04 +02002008 else
2009 ret = FAILED;
Bart Van Assche22032992012-08-14 13:18:53 +00002010 srp_free_req(target, req, scmnd, 0);
2011 scmnd->result = DID_ABORT << 16;
Bart Van Assched8536672012-08-24 10:29:11 +00002012 scmnd->scsi_done(scmnd);
Roland Dreierd945e1d2006-05-09 10:50:28 -07002013
Bart Van Assche086f44f2013-06-12 15:23:04 +02002014 return ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002015}
2016
2017static int srp_reset_device(struct scsi_cmnd *scmnd)
2018{
Roland Dreierd945e1d2006-05-09 10:50:28 -07002019 struct srp_target_port *target = host_to_target(scmnd->device->host);
Bart Van Assche536ae142010-11-26 13:58:27 -05002020 int i;
Roland Dreierd945e1d2006-05-09 10:50:28 -07002021
David Dillow7aa54bd2008-01-07 18:23:41 -05002022 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002023
David Dillowf8b6e312010-11-26 13:02:21 -05002024 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
2025 SRP_TSK_LUN_RESET))
Roland Dreierd945e1d2006-05-09 10:50:28 -07002026 return FAILED;
David Dillowf8b6e312010-11-26 13:02:21 -05002027 if (target->tsk_mgmt_status)
Roland Dreierd945e1d2006-05-09 10:50:28 -07002028 return FAILED;
2029
Bart Van Assche4d73f952013-10-26 14:40:37 +02002030 for (i = 0; i < target->req_ring_size; ++i) {
Bart Van Assche536ae142010-11-26 13:58:27 -05002031 struct srp_request *req = &target->req_ring[i];
David Dillowf8b6e312010-11-26 13:02:21 -05002032 if (req->scmnd && req->scmnd->device == scmnd->device)
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002033 srp_finish_req(target, req, DID_RESET << 16);
Bart Van Assche536ae142010-11-26 13:58:27 -05002034 }
Roland Dreierd945e1d2006-05-09 10:50:28 -07002035
Roland Dreierd945e1d2006-05-09 10:50:28 -07002036 return SUCCESS;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002037}
2038
2039static int srp_reset_host(struct scsi_cmnd *scmnd)
2040{
2041 struct srp_target_port *target = host_to_target(scmnd->device->host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002042
David Dillow7aa54bd2008-01-07 18:23:41 -05002043 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002044
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002045 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002046}
2047
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002048static int srp_slave_configure(struct scsi_device *sdev)
2049{
2050 struct Scsi_Host *shost = sdev->host;
2051 struct srp_target_port *target = host_to_target(shost);
2052 struct request_queue *q = sdev->request_queue;
2053 unsigned long timeout;
2054
2055 if (sdev->type == TYPE_DISK) {
2056 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2057 blk_queue_rq_timeout(q, timeout);
2058 }
2059
2060 return 0;
2061}
2062
Tony Jonesee959b02008-02-22 00:13:36 +01002063static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2064 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002065{
Tony Jonesee959b02008-02-22 00:13:36 +01002066 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002067
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002068 return sprintf(buf, "0x%016llx\n",
2069 (unsigned long long) be64_to_cpu(target->id_ext));
2070}
2071
Tony Jonesee959b02008-02-22 00:13:36 +01002072static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2073 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002074{
Tony Jonesee959b02008-02-22 00:13:36 +01002075 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002076
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002077 return sprintf(buf, "0x%016llx\n",
2078 (unsigned long long) be64_to_cpu(target->ioc_guid));
2079}
2080
Tony Jonesee959b02008-02-22 00:13:36 +01002081static ssize_t show_service_id(struct device *dev,
2082 struct device_attribute *attr, char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002083{
Tony Jonesee959b02008-02-22 00:13:36 +01002084 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002085
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002086 return sprintf(buf, "0x%016llx\n",
2087 (unsigned long long) be64_to_cpu(target->service_id));
2088}
2089
Tony Jonesee959b02008-02-22 00:13:36 +01002090static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2091 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002092{
Tony Jonesee959b02008-02-22 00:13:36 +01002093 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002094
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002095 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
2096}
2097
Bart Van Assche848b3082013-10-26 14:38:12 +02002098static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2099 char *buf)
2100{
2101 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2102
2103 return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2104}
2105
Tony Jonesee959b02008-02-22 00:13:36 +01002106static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2107 char *buf)
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002108{
Tony Jonesee959b02008-02-22 00:13:36 +01002109 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002110
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002111 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002112}
2113
Tony Jonesee959b02008-02-22 00:13:36 +01002114static ssize_t show_orig_dgid(struct device *dev,
2115 struct device_attribute *attr, char *buf)
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002116{
Tony Jonesee959b02008-02-22 00:13:36 +01002117 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002118
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002119 return sprintf(buf, "%pI6\n", target->orig_dgid);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002120}
2121
Bart Van Assche89de7482010-08-03 14:08:45 +00002122static ssize_t show_req_lim(struct device *dev,
2123 struct device_attribute *attr, char *buf)
2124{
2125 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2126
Bart Van Assche89de7482010-08-03 14:08:45 +00002127 return sprintf(buf, "%d\n", target->req_lim);
2128}
2129
Tony Jonesee959b02008-02-22 00:13:36 +01002130static ssize_t show_zero_req_lim(struct device *dev,
2131 struct device_attribute *attr, char *buf)
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002132{
Tony Jonesee959b02008-02-22 00:13:36 +01002133 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002134
Roland Dreier6bfa24f2006-06-17 20:37:33 -07002135 return sprintf(buf, "%d\n", target->zero_req_lim);
2136}
2137
Tony Jonesee959b02008-02-22 00:13:36 +01002138static ssize_t show_local_ib_port(struct device *dev,
2139 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002140{
Tony Jonesee959b02008-02-22 00:13:36 +01002141 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002142
2143 return sprintf(buf, "%d\n", target->srp_host->port);
2144}
2145
Tony Jonesee959b02008-02-22 00:13:36 +01002146static ssize_t show_local_ib_device(struct device *dev,
2147 struct device_attribute *attr, char *buf)
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002148{
Tony Jonesee959b02008-02-22 00:13:36 +01002149 struct srp_target_port *target = host_to_target(class_to_shost(dev));
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002150
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002151 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
Ishai Rabinovitzded7f1a2006-08-15 17:34:52 +03002152}
2153
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002154static ssize_t show_comp_vector(struct device *dev,
2155 struct device_attribute *attr, char *buf)
2156{
2157 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2158
2159 return sprintf(buf, "%d\n", target->comp_vector);
2160}
2161
Vu Pham7bb312e2013-10-26 14:31:27 +02002162static ssize_t show_tl_retry_count(struct device *dev,
2163 struct device_attribute *attr, char *buf)
2164{
2165 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2166
2167 return sprintf(buf, "%d\n", target->tl_retry_count);
2168}
2169
David Dillow49248642011-01-14 18:23:24 -05002170static ssize_t show_cmd_sg_entries(struct device *dev,
2171 struct device_attribute *attr, char *buf)
2172{
2173 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2174
2175 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2176}
2177
David Dillowc07d4242011-01-16 13:57:10 -05002178static ssize_t show_allow_ext_sg(struct device *dev,
2179 struct device_attribute *attr, char *buf)
2180{
2181 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2182
2183 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2184}
2185
Tony Jonesee959b02008-02-22 00:13:36 +01002186static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2187static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2188static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2189static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
Bart Van Assche848b3082013-10-26 14:38:12 +02002190static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002191static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2192static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
Bart Van Assche89de7482010-08-03 14:08:45 +00002193static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
Tony Jonesee959b02008-02-22 00:13:36 +01002194static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2195static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2196static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002197static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
Vu Pham7bb312e2013-10-26 14:31:27 +02002198static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
David Dillow49248642011-01-14 18:23:24 -05002199static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
David Dillowc07d4242011-01-16 13:57:10 -05002200static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002201
Tony Jonesee959b02008-02-22 00:13:36 +01002202static struct device_attribute *srp_host_attrs[] = {
2203 &dev_attr_id_ext,
2204 &dev_attr_ioc_guid,
2205 &dev_attr_service_id,
2206 &dev_attr_pkey,
Bart Van Assche848b3082013-10-26 14:38:12 +02002207 &dev_attr_sgid,
Tony Jonesee959b02008-02-22 00:13:36 +01002208 &dev_attr_dgid,
2209 &dev_attr_orig_dgid,
Bart Van Assche89de7482010-08-03 14:08:45 +00002210 &dev_attr_req_lim,
Tony Jonesee959b02008-02-22 00:13:36 +01002211 &dev_attr_zero_req_lim,
2212 &dev_attr_local_ib_port,
2213 &dev_attr_local_ib_device,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002214 &dev_attr_comp_vector,
Vu Pham7bb312e2013-10-26 14:31:27 +02002215 &dev_attr_tl_retry_count,
David Dillow49248642011-01-14 18:23:24 -05002216 &dev_attr_cmd_sg_entries,
David Dillowc07d4242011-01-16 13:57:10 -05002217 &dev_attr_allow_ext_sg,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002218 NULL
2219};
2220
Roland Dreieraef9ec32005-11-02 14:07:13 -08002221static struct scsi_host_template srp_template = {
2222 .module = THIS_MODULE,
Roland Dreierb7f008f2007-05-06 21:18:11 -07002223 .name = "InfiniBand SRP initiator",
2224 .proc_name = DRV_NAME,
Bart Van Asschec9b03c12011-09-03 09:34:48 +02002225 .slave_configure = srp_slave_configure,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002226 .info = srp_target_info,
2227 .queuecommand = srp_queuecommand,
Jack Wang71444b92013-11-07 11:37:37 +01002228 .change_queue_depth = srp_change_queue_depth,
2229 .change_queue_type = srp_change_queue_type,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002230 .eh_abort_handler = srp_abort,
2231 .eh_device_reset_handler = srp_reset_device,
2232 .eh_host_reset_handler = srp_reset_host,
Bart Van Assche2742c1d2013-06-12 15:24:25 +02002233 .skip_settle_delay = true,
David Dillow49248642011-01-14 18:23:24 -05002234 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002235 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002236 .this_id = -1,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002237 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
Roland Dreier6ecb0c82006-03-20 10:08:23 -08002238 .use_clustering = ENABLE_CLUSTERING,
2239 .shost_attrs = srp_host_attrs
Roland Dreieraef9ec32005-11-02 14:07:13 -08002240};
2241
2242static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2243{
FUJITA Tomonori32368222007-06-27 16:33:12 +09002244 struct srp_rport_identifiers ids;
2245 struct srp_rport *rport;
2246
Roland Dreieraef9ec32005-11-02 14:07:13 -08002247 sprintf(target->target_name, "SRP.T10:%016llX",
2248 (unsigned long long) be64_to_cpu(target->id_ext));
2249
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002250 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002251 return -ENODEV;
2252
FUJITA Tomonori32368222007-06-27 16:33:12 +09002253 memcpy(ids.port_id, &target->id_ext, 8);
2254 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
FUJITA Tomonoriaebd5e42007-07-11 15:08:15 +09002255 ids.roles = SRP_RPORT_ROLE_TARGET;
FUJITA Tomonori32368222007-06-27 16:33:12 +09002256 rport = srp_rport_add(target->scsi_host, &ids);
2257 if (IS_ERR(rport)) {
2258 scsi_remove_host(target->scsi_host);
2259 return PTR_ERR(rport);
2260 }
2261
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002262 rport->lld_data = target;
Bart Van Assche9dd69a62013-10-26 14:32:30 +02002263 target->rport = rport;
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002264
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002265 spin_lock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002266 list_add_tail(&target->list, &host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002267 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002268
2269 target->state = SRP_TARGET_LIVE;
2270
Roland Dreieraef9ec32005-11-02 14:07:13 -08002271 scsi_scan_target(&target->scsi_host->shost_gendev,
Matthew Wilcox1962a4a2006-06-17 20:37:30 -07002272 0, target->scsi_id, SCAN_WILD_CARD, 0);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002273
2274 return 0;
2275}
2276
Tony Jonesee959b02008-02-22 00:13:36 +01002277static void srp_release_dev(struct device *dev)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002278{
2279 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002280 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002281
2282 complete(&host->released);
2283}
2284
2285static struct class srp_class = {
2286 .name = "infiniband_srp",
Tony Jonesee959b02008-02-22 00:13:36 +01002287 .dev_release = srp_release_dev
Roland Dreieraef9ec32005-11-02 14:07:13 -08002288};
2289
Bart Van Assche96fc2482013-06-28 14:51:26 +02002290/**
2291 * srp_conn_unique() - check whether the connection to a target is unique
2292 */
2293static bool srp_conn_unique(struct srp_host *host,
2294 struct srp_target_port *target)
2295{
2296 struct srp_target_port *t;
2297 bool ret = false;
2298
2299 if (target->state == SRP_TARGET_REMOVED)
2300 goto out;
2301
2302 ret = true;
2303
2304 spin_lock(&host->target_lock);
2305 list_for_each_entry(t, &host->target_list, list) {
2306 if (t != target &&
2307 target->id_ext == t->id_ext &&
2308 target->ioc_guid == t->ioc_guid &&
2309 target->initiator_ext == t->initiator_ext) {
2310 ret = false;
2311 break;
2312 }
2313 }
2314 spin_unlock(&host->target_lock);
2315
2316out:
2317 return ret;
2318}
2319
Roland Dreieraef9ec32005-11-02 14:07:13 -08002320/*
2321 * Target ports are added by writing
2322 *
2323 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2324 * pkey=<P_Key>,service_id=<service ID>
2325 *
2326 * to the add_target sysfs attribute.
2327 */
2328enum {
2329 SRP_OPT_ERR = 0,
2330 SRP_OPT_ID_EXT = 1 << 0,
2331 SRP_OPT_IOC_GUID = 1 << 1,
2332 SRP_OPT_DGID = 1 << 2,
2333 SRP_OPT_PKEY = 1 << 3,
2334 SRP_OPT_SERVICE_ID = 1 << 4,
2335 SRP_OPT_MAX_SECT = 1 << 5,
Vu Pham52fb2b502006-06-17 20:37:31 -07002336 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
Ramachandra K0c0450db2006-06-17 20:37:38 -07002337 SRP_OPT_IO_CLASS = 1 << 7,
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002338 SRP_OPT_INITIATOR_EXT = 1 << 8,
David Dillow49248642011-01-14 18:23:24 -05002339 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
David Dillowc07d4242011-01-16 13:57:10 -05002340 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2341 SRP_OPT_SG_TABLESIZE = 1 << 11,
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002342 SRP_OPT_COMP_VECTOR = 1 << 12,
Vu Pham7bb312e2013-10-26 14:31:27 +02002343 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
Bart Van Assche4d73f952013-10-26 14:40:37 +02002344 SRP_OPT_QUEUE_SIZE = 1 << 14,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002345 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2346 SRP_OPT_IOC_GUID |
2347 SRP_OPT_DGID |
2348 SRP_OPT_PKEY |
2349 SRP_OPT_SERVICE_ID),
2350};
2351
Steven Whitehousea447c092008-10-13 10:46:57 +01002352static const match_table_t srp_opt_tokens = {
Vu Pham52fb2b502006-06-17 20:37:31 -07002353 { SRP_OPT_ID_EXT, "id_ext=%s" },
2354 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2355 { SRP_OPT_DGID, "dgid=%s" },
2356 { SRP_OPT_PKEY, "pkey=%x" },
2357 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2358 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2359 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
Ramachandra K0c0450db2006-06-17 20:37:38 -07002360 { SRP_OPT_IO_CLASS, "io_class=%x" },
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002361 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
David Dillow49248642011-01-14 18:23:24 -05002362 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
David Dillowc07d4242011-01-16 13:57:10 -05002363 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2364 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002365 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
Vu Pham7bb312e2013-10-26 14:31:27 +02002366 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
Bart Van Assche4d73f952013-10-26 14:40:37 +02002367 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
Vu Pham52fb2b502006-06-17 20:37:31 -07002368 { SRP_OPT_ERR, NULL }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002369};
2370
2371static int srp_parse_options(const char *buf, struct srp_target_port *target)
2372{
2373 char *options, *sep_opt;
2374 char *p;
2375 char dgid[3];
2376 substring_t args[MAX_OPT_ARGS];
2377 int opt_mask = 0;
2378 int token;
2379 int ret = -EINVAL;
2380 int i;
2381
2382 options = kstrdup(buf, GFP_KERNEL);
2383 if (!options)
2384 return -ENOMEM;
2385
2386 sep_opt = options;
2387 while ((p = strsep(&sep_opt, ",")) != NULL) {
2388 if (!*p)
2389 continue;
2390
2391 token = match_token(p, srp_opt_tokens, args);
2392 opt_mask |= token;
2393
2394 switch (token) {
2395 case SRP_OPT_ID_EXT:
2396 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002397 if (!p) {
2398 ret = -ENOMEM;
2399 goto out;
2400 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002401 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2402 kfree(p);
2403 break;
2404
2405 case SRP_OPT_IOC_GUID:
2406 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002407 if (!p) {
2408 ret = -ENOMEM;
2409 goto out;
2410 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002411 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2412 kfree(p);
2413 break;
2414
2415 case SRP_OPT_DGID:
2416 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002417 if (!p) {
2418 ret = -ENOMEM;
2419 goto out;
2420 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002421 if (strlen(p) != 32) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002422 pr_warn("bad dest GID parameter '%s'\n", p);
Roland Dreierce1823f2006-04-03 09:31:04 -07002423 kfree(p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002424 goto out;
2425 }
2426
2427 for (i = 0; i < 16; ++i) {
2428 strlcpy(dgid, p + i * 2, 3);
2429 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2430 }
Roland Dreierbf17c1c2006-03-20 10:08:25 -08002431 kfree(p);
Ishai Rabinovitz3633b3d2007-05-06 21:18:11 -07002432 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002433 break;
2434
2435 case SRP_OPT_PKEY:
2436 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002437 pr_warn("bad P_Key parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002438 goto out;
2439 }
2440 target->path.pkey = cpu_to_be16(token);
2441 break;
2442
2443 case SRP_OPT_SERVICE_ID:
2444 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002445 if (!p) {
2446 ret = -ENOMEM;
2447 goto out;
2448 }
Roland Dreieraef9ec32005-11-02 14:07:13 -08002449 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
Sean Hefty247e0202007-08-08 15:51:18 -07002450 target->path.service_id = target->service_id;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002451 kfree(p);
2452 break;
2453
2454 case SRP_OPT_MAX_SECT:
2455 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002456 pr_warn("bad max sect parameter '%s'\n", p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002457 goto out;
2458 }
2459 target->scsi_host->max_sectors = token;
2460 break;
2461
Bart Van Assche4d73f952013-10-26 14:40:37 +02002462 case SRP_OPT_QUEUE_SIZE:
2463 if (match_int(args, &token) || token < 1) {
2464 pr_warn("bad queue_size parameter '%s'\n", p);
2465 goto out;
2466 }
2467 target->scsi_host->can_queue = token;
2468 target->queue_size = token + SRP_RSP_SQ_SIZE +
2469 SRP_TSK_MGMT_SQ_SIZE;
2470 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2471 target->scsi_host->cmd_per_lun = token;
2472 break;
2473
Vu Pham52fb2b502006-06-17 20:37:31 -07002474 case SRP_OPT_MAX_CMD_PER_LUN:
Bart Van Assche4d73f952013-10-26 14:40:37 +02002475 if (match_int(args, &token) || token < 1) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002476 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2477 p);
Vu Pham52fb2b502006-06-17 20:37:31 -07002478 goto out;
2479 }
Bart Van Assche4d73f952013-10-26 14:40:37 +02002480 target->scsi_host->cmd_per_lun = token;
Vu Pham52fb2b502006-06-17 20:37:31 -07002481 break;
2482
Ramachandra K0c0450db2006-06-17 20:37:38 -07002483 case SRP_OPT_IO_CLASS:
2484 if (match_hex(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002485 pr_warn("bad IO class parameter '%s'\n", p);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002486 goto out;
2487 }
2488 if (token != SRP_REV10_IB_IO_CLASS &&
2489 token != SRP_REV16A_IB_IO_CLASS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002490 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2491 token, SRP_REV10_IB_IO_CLASS,
2492 SRP_REV16A_IB_IO_CLASS);
Ramachandra K0c0450db2006-06-17 20:37:38 -07002493 goto out;
2494 }
2495 target->io_class = token;
2496 break;
2497
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002498 case SRP_OPT_INITIATOR_EXT:
2499 p = match_strdup(args);
Ishai Rabinovitza20f3a62007-01-16 17:20:25 +02002500 if (!p) {
2501 ret = -ENOMEM;
2502 goto out;
2503 }
Ishai Rabinovitz01cb9bc2006-10-04 15:28:56 +02002504 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2505 kfree(p);
2506 break;
2507
David Dillow49248642011-01-14 18:23:24 -05002508 case SRP_OPT_CMD_SG_ENTRIES:
2509 if (match_int(args, &token) || token < 1 || token > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002510 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2511 p);
David Dillow49248642011-01-14 18:23:24 -05002512 goto out;
2513 }
2514 target->cmd_sg_cnt = token;
2515 break;
2516
David Dillowc07d4242011-01-16 13:57:10 -05002517 case SRP_OPT_ALLOW_EXT_SG:
2518 if (match_int(args, &token)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002519 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
David Dillowc07d4242011-01-16 13:57:10 -05002520 goto out;
2521 }
2522 target->allow_ext_sg = !!token;
2523 break;
2524
2525 case SRP_OPT_SG_TABLESIZE:
2526 if (match_int(args, &token) || token < 1 ||
2527 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002528 pr_warn("bad max sg_tablesize parameter '%s'\n",
2529 p);
David Dillowc07d4242011-01-16 13:57:10 -05002530 goto out;
2531 }
2532 target->sg_tablesize = token;
2533 break;
2534
Bart Van Assche4b5e5f42013-06-28 14:57:42 +02002535 case SRP_OPT_COMP_VECTOR:
2536 if (match_int(args, &token) || token < 0) {
2537 pr_warn("bad comp_vector parameter '%s'\n", p);
2538 goto out;
2539 }
2540 target->comp_vector = token;
2541 break;
2542
Vu Pham7bb312e2013-10-26 14:31:27 +02002543 case SRP_OPT_TL_RETRY_COUNT:
2544 if (match_int(args, &token) || token < 2 || token > 7) {
2545 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
2546 p);
2547 goto out;
2548 }
2549 target->tl_retry_count = token;
2550 break;
2551
Roland Dreieraef9ec32005-11-02 14:07:13 -08002552 default:
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002553 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2554 p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002555 goto out;
2556 }
2557 }
2558
2559 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2560 ret = 0;
2561 else
2562 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2563 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2564 !(srp_opt_tokens[i].token & opt_mask))
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002565 pr_warn("target creation request is missing parameter '%s'\n",
2566 srp_opt_tokens[i].pattern);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002567
Bart Van Assche4d73f952013-10-26 14:40:37 +02002568 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
2569 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
2570 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
2571 target->scsi_host->cmd_per_lun,
2572 target->scsi_host->can_queue);
2573
Roland Dreieraef9ec32005-11-02 14:07:13 -08002574out:
2575 kfree(options);
2576 return ret;
2577}
2578
Tony Jonesee959b02008-02-22 00:13:36 +01002579static ssize_t srp_create_target(struct device *dev,
2580 struct device_attribute *attr,
Roland Dreieraef9ec32005-11-02 14:07:13 -08002581 const char *buf, size_t count)
2582{
2583 struct srp_host *host =
Tony Jonesee959b02008-02-22 00:13:36 +01002584 container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002585 struct Scsi_Host *target_host;
2586 struct srp_target_port *target;
David Dillowc07d4242011-01-16 13:57:10 -05002587 struct ib_device *ibdev = host->srp_dev->dev;
Bart Van Asscheb81d00b2013-10-26 14:38:47 +02002588 int ret;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002589
2590 target_host = scsi_host_alloc(&srp_template,
2591 sizeof (struct srp_target_port));
2592 if (!target_host)
2593 return -ENOMEM;
2594
David Dillow49248642011-01-14 18:23:24 -05002595 target_host->transportt = ib_srp_transport_template;
Bart Van Asschefd1b6c42011-07-13 09:19:16 -07002596 target_host->max_channel = 0;
2597 target_host->max_id = 1;
Arne Redlich3c8edf02006-11-15 12:43:00 +01002598 target_host->max_lun = SRP_MAX_LUN;
2599 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
Roland Dreier5f068992005-11-11 14:06:01 -08002600
Roland Dreieraef9ec32005-11-02 14:07:13 -08002601 target = host_to_target(target_host);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002602
David Dillow49248642011-01-14 18:23:24 -05002603 target->io_class = SRP_REV16A_IB_IO_CLASS;
2604 target->scsi_host = target_host;
2605 target->srp_host = host;
2606 target->lkey = host->srp_dev->mr->lkey;
2607 target->rkey = host->srp_dev->mr->rkey;
2608 target->cmd_sg_cnt = cmd_sg_entries;
David Dillowc07d4242011-01-16 13:57:10 -05002609 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2610 target->allow_ext_sg = allow_ext_sg;
Vu Pham7bb312e2013-10-26 14:31:27 +02002611 target->tl_retry_count = 7;
Bart Van Assche4d73f952013-10-26 14:40:37 +02002612 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002613
Roland Dreieraef9ec32005-11-02 14:07:13 -08002614 ret = srp_parse_options(buf, target);
2615 if (ret)
2616 goto err;
2617
Bart Van Assche4d73f952013-10-26 14:40:37 +02002618 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
2619
Bart Van Assche96fc2482013-06-28 14:51:26 +02002620 if (!srp_conn_unique(target->srp_host, target)) {
2621 shost_printk(KERN_INFO, target->scsi_host,
2622 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2623 be64_to_cpu(target->id_ext),
2624 be64_to_cpu(target->ioc_guid),
2625 be64_to_cpu(target->initiator_ext));
2626 ret = -EEXIST;
2627 goto err;
2628 }
2629
David Dillowc07d4242011-01-16 13:57:10 -05002630 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2631 target->cmd_sg_cnt < target->sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002632 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
David Dillowc07d4242011-01-16 13:57:10 -05002633 target->sg_tablesize = target->cmd_sg_cnt;
2634 }
2635
2636 target_host->sg_tablesize = target->sg_tablesize;
2637 target->indirect_size = target->sg_tablesize *
2638 sizeof (struct srp_direct_buf);
David Dillow49248642011-01-14 18:23:24 -05002639 target->max_iu_len = sizeof (struct srp_cmd) +
2640 sizeof (struct srp_indirect_buf) +
2641 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2642
Bart Van Asschec1120f82013-10-26 14:35:08 +02002643 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002644 INIT_WORK(&target->remove_work, srp_remove_work);
David Dillow8f26c9f2011-01-14 19:45:50 -05002645 spin_lock_init(&target->lock);
2646 INIT_LIST_HEAD(&target->free_tx);
Bart Van Asscheb81d00b2013-10-26 14:38:47 +02002647 ret = srp_alloc_req_data(target);
2648 if (ret)
2649 goto err_free_mem;
David Dillow8f26c9f2011-01-14 19:45:50 -05002650
David Dillowc07d4242011-01-16 13:57:10 -05002651 ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002652
David Dillow7aa54bd2008-01-07 18:23:41 -05002653 shost_printk(KERN_DEBUG, target->scsi_host, PFX
2654 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
Harvey Harrison5b095d9892008-10-29 12:52:50 -07002655 "service_id %016llx dgid %pI6\n",
Roland Dreieraef9ec32005-11-02 14:07:13 -08002656 (unsigned long long) be64_to_cpu(target->id_ext),
2657 (unsigned long long) be64_to_cpu(target->ioc_guid),
2658 be16_to_cpu(target->path.pkey),
2659 (unsigned long long) be64_to_cpu(target->service_id),
Harvey Harrison8867cd72008-10-28 22:36:33 -07002660 target->path.dgid.raw);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002661
2662 ret = srp_create_target_ib(target);
2663 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002664 goto err_free_mem;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002665
David Dillow9fe4bcf2008-01-08 17:08:52 -05002666 ret = srp_new_cm_id(target);
2667 if (ret)
David Dillow8f26c9f2011-01-14 19:45:50 -05002668 goto err_free_ib;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002669
2670 ret = srp_connect_target(target);
2671 if (ret) {
David Dillow7aa54bd2008-01-07 18:23:41 -05002672 shost_printk(KERN_ERR, target->scsi_host,
2673 PFX "Connection failed\n");
Roland Dreieraef9ec32005-11-02 14:07:13 -08002674 goto err_cm_id;
2675 }
2676
2677 ret = srp_add_target(host, target);
2678 if (ret)
2679 goto err_disconnect;
2680
2681 return count;
2682
2683err_disconnect:
2684 srp_disconnect_target(target);
2685
2686err_cm_id:
2687 ib_destroy_cm_id(target->cm_id);
2688
David Dillow8f26c9f2011-01-14 19:45:50 -05002689err_free_ib:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002690 srp_free_target_ib(target);
2691
David Dillow8f26c9f2011-01-14 19:45:50 -05002692err_free_mem:
2693 srp_free_req_data(target);
2694
Roland Dreieraef9ec32005-11-02 14:07:13 -08002695err:
2696 scsi_host_put(target_host);
2697
2698 return ret;
2699}
2700
Tony Jonesee959b02008-02-22 00:13:36 +01002701static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002702
Tony Jonesee959b02008-02-22 00:13:36 +01002703static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2704 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002705{
Tony Jonesee959b02008-02-22 00:13:36 +01002706 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002707
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002708 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002709}
2710
Tony Jonesee959b02008-02-22 00:13:36 +01002711static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002712
Tony Jonesee959b02008-02-22 00:13:36 +01002713static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2714 char *buf)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002715{
Tony Jonesee959b02008-02-22 00:13:36 +01002716 struct srp_host *host = container_of(dev, struct srp_host, dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002717
2718 return sprintf(buf, "%d\n", host->port);
2719}
2720
Tony Jonesee959b02008-02-22 00:13:36 +01002721static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002722
Roland Dreierf5358a12006-06-17 20:37:29 -07002723static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
Roland Dreieraef9ec32005-11-02 14:07:13 -08002724{
2725 struct srp_host *host;
2726
2727 host = kzalloc(sizeof *host, GFP_KERNEL);
2728 if (!host)
2729 return NULL;
2730
2731 INIT_LIST_HEAD(&host->target_list);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002732 spin_lock_init(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002733 init_completion(&host->released);
Greg Kroah-Hartman05321932008-03-06 00:13:36 +01002734 host->srp_dev = device;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002735 host->port = port;
2736
Tony Jonesee959b02008-02-22 00:13:36 +01002737 host->dev.class = &srp_class;
2738 host->dev.parent = device->dev->dma_device;
Kay Sieversd927e382009-01-06 10:44:39 -08002739 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002740
Tony Jonesee959b02008-02-22 00:13:36 +01002741 if (device_register(&host->dev))
Roland Dreierf5358a12006-06-17 20:37:29 -07002742 goto free_host;
Tony Jonesee959b02008-02-22 00:13:36 +01002743 if (device_create_file(&host->dev, &dev_attr_add_target))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002744 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002745 if (device_create_file(&host->dev, &dev_attr_ibdev))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002746 goto err_class;
Tony Jonesee959b02008-02-22 00:13:36 +01002747 if (device_create_file(&host->dev, &dev_attr_port))
Roland Dreieraef9ec32005-11-02 14:07:13 -08002748 goto err_class;
2749
2750 return host;
2751
2752err_class:
Tony Jonesee959b02008-02-22 00:13:36 +01002753 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002754
Roland Dreierf5358a12006-06-17 20:37:29 -07002755free_host:
Roland Dreieraef9ec32005-11-02 14:07:13 -08002756 kfree(host);
2757
2758 return NULL;
2759}
2760
2761static void srp_add_one(struct ib_device *device)
2762{
Roland Dreierf5358a12006-06-17 20:37:29 -07002763 struct srp_device *srp_dev;
2764 struct ib_device_attr *dev_attr;
2765 struct ib_fmr_pool_param fmr_param;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002766 struct srp_host *host;
David Dillowbe8b9812011-01-18 21:58:09 -05002767 int max_pages_per_fmr, fmr_page_shift, s, e, p;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002768
Roland Dreierf5358a12006-06-17 20:37:29 -07002769 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2770 if (!dev_attr)
Sean Heftycf311cd2006-01-10 07:39:34 -08002771 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002772
Roland Dreierf5358a12006-06-17 20:37:29 -07002773 if (ib_query_device(device, dev_attr)) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002774 pr_warn("Query device failed for %s\n", device->name);
Roland Dreierf5358a12006-06-17 20:37:29 -07002775 goto free_attr;
2776 }
2777
2778 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2779 if (!srp_dev)
2780 goto free_attr;
2781
2782 /*
2783 * Use the smallest page size supported by the HCA, down to a
David Dillow8f26c9f2011-01-14 19:45:50 -05002784 * minimum of 4096 bytes. We're unlikely to build large sglists
2785 * out of smaller entries.
Roland Dreierf5358a12006-06-17 20:37:29 -07002786 */
David Dillow8f26c9f2011-01-14 19:45:50 -05002787 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2788 srp_dev->fmr_page_size = 1 << fmr_page_shift;
2789 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2790 srp_dev->fmr_max_size = srp_dev->fmr_page_size * SRP_FMR_SIZE;
Roland Dreierf5358a12006-06-17 20:37:29 -07002791
2792 INIT_LIST_HEAD(&srp_dev->dev_list);
2793
2794 srp_dev->dev = device;
2795 srp_dev->pd = ib_alloc_pd(device);
2796 if (IS_ERR(srp_dev->pd))
2797 goto free_dev;
2798
2799 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2800 IB_ACCESS_LOCAL_WRITE |
2801 IB_ACCESS_REMOTE_READ |
2802 IB_ACCESS_REMOTE_WRITE);
2803 if (IS_ERR(srp_dev->mr))
2804 goto err_pd;
2805
David Dillowbe8b9812011-01-18 21:58:09 -05002806 for (max_pages_per_fmr = SRP_FMR_SIZE;
2807 max_pages_per_fmr >= SRP_FMR_MIN_SIZE;
2808 max_pages_per_fmr /= 2, srp_dev->fmr_max_size /= 2) {
2809 memset(&fmr_param, 0, sizeof fmr_param);
2810 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2811 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2812 fmr_param.cache = 1;
2813 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2814 fmr_param.page_shift = fmr_page_shift;
2815 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2816 IB_ACCESS_REMOTE_WRITE |
2817 IB_ACCESS_REMOTE_READ);
Roland Dreierf5358a12006-06-17 20:37:29 -07002818
David Dillowbe8b9812011-01-18 21:58:09 -05002819 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2820 if (!IS_ERR(srp_dev->fmr_pool))
2821 break;
2822 }
2823
Roland Dreierf5358a12006-06-17 20:37:29 -07002824 if (IS_ERR(srp_dev->fmr_pool))
2825 srp_dev->fmr_pool = NULL;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002826
Tom Tucker07ebafb2006-08-03 16:02:42 -05002827 if (device->node_type == RDMA_NODE_IB_SWITCH) {
Roland Dreieraef9ec32005-11-02 14:07:13 -08002828 s = 0;
2829 e = 0;
2830 } else {
2831 s = 1;
2832 e = device->phys_port_cnt;
2833 }
2834
2835 for (p = s; p <= e; ++p) {
Roland Dreierf5358a12006-06-17 20:37:29 -07002836 host = srp_add_port(srp_dev, p);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002837 if (host)
Roland Dreierf5358a12006-06-17 20:37:29 -07002838 list_add_tail(&host->list, &srp_dev->dev_list);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002839 }
2840
Roland Dreierf5358a12006-06-17 20:37:29 -07002841 ib_set_client_data(device, &srp_client, srp_dev);
2842
2843 goto free_attr;
2844
2845err_pd:
2846 ib_dealloc_pd(srp_dev->pd);
2847
2848free_dev:
2849 kfree(srp_dev);
2850
2851free_attr:
2852 kfree(dev_attr);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002853}
2854
2855static void srp_remove_one(struct ib_device *device)
2856{
Roland Dreierf5358a12006-06-17 20:37:29 -07002857 struct srp_device *srp_dev;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002858 struct srp_host *host, *tmp_host;
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002859 struct srp_target_port *target;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002860
Roland Dreierf5358a12006-06-17 20:37:29 -07002861 srp_dev = ib_get_client_data(device, &srp_client);
Dotan Barak1fe0cb82013-06-12 15:20:36 +02002862 if (!srp_dev)
2863 return;
Roland Dreieraef9ec32005-11-02 14:07:13 -08002864
Roland Dreierf5358a12006-06-17 20:37:29 -07002865 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
Tony Jonesee959b02008-02-22 00:13:36 +01002866 device_unregister(&host->dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002867 /*
2868 * Wait for the sysfs entry to go away, so that no new
2869 * target ports can be created.
2870 */
2871 wait_for_completion(&host->released);
2872
2873 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002874 * Remove all target ports.
Roland Dreieraef9ec32005-11-02 14:07:13 -08002875 */
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002876 spin_lock(&host->target_lock);
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002877 list_for_each_entry(target, &host->target_list, list)
2878 srp_queue_remove_work(target);
Matthew Wilcoxb3589fd2006-06-17 20:37:30 -07002879 spin_unlock(&host->target_lock);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002880
2881 /*
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002882 * Wait for target port removal tasks.
Roland Dreieraef9ec32005-11-02 14:07:13 -08002883 */
Bart Van Asscheef6c49d2011-12-26 16:49:18 +00002884 flush_workqueue(system_long_wq);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002885
Roland Dreieraef9ec32005-11-02 14:07:13 -08002886 kfree(host);
2887 }
2888
Roland Dreierf5358a12006-06-17 20:37:29 -07002889 if (srp_dev->fmr_pool)
2890 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2891 ib_dereg_mr(srp_dev->mr);
2892 ib_dealloc_pd(srp_dev->pd);
2893
2894 kfree(srp_dev);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002895}
2896
FUJITA Tomonori32368222007-06-27 16:33:12 +09002897static struct srp_function_template ib_srp_transport_functions = {
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002898 .has_rport_state = true,
2899 .reset_timer_if_blocked = true,
Bart Van Asschea95cadb2013-10-26 14:37:17 +02002900 .reconnect_delay = &srp_reconnect_delay,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002901 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
2902 .dev_loss_tmo = &srp_dev_loss_tmo,
2903 .reconnect = srp_rport_reconnect,
Bart Van Asschedc1bdbd2011-09-16 20:41:13 +02002904 .rport_delete = srp_rport_delete,
Bart Van Asscheed9b2262013-10-26 14:34:27 +02002905 .terminate_rport_io = srp_terminate_io,
FUJITA Tomonori32368222007-06-27 16:33:12 +09002906};
2907
Roland Dreieraef9ec32005-11-02 14:07:13 -08002908static int __init srp_init_module(void)
2909{
2910 int ret;
2911
Bart Van Asschedcb4cb82010-11-26 13:22:48 -05002912 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
Bart Van Asschedd5e6e32010-08-30 19:27:20 +00002913
David Dillow49248642011-01-14 18:23:24 -05002914 if (srp_sg_tablesize) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002915 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
David Dillow49248642011-01-14 18:23:24 -05002916 if (!cmd_sg_entries)
2917 cmd_sg_entries = srp_sg_tablesize;
2918 }
2919
2920 if (!cmd_sg_entries)
2921 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2922
2923 if (cmd_sg_entries > 255) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002924 pr_warn("Clamping cmd_sg_entries to 255\n");
David Dillow49248642011-01-14 18:23:24 -05002925 cmd_sg_entries = 255;
David Dillow1e89a192008-04-16 21:01:12 -07002926 }
2927
David Dillowc07d4242011-01-16 13:57:10 -05002928 if (!indirect_sg_entries)
2929 indirect_sg_entries = cmd_sg_entries;
2930 else if (indirect_sg_entries < cmd_sg_entries) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002931 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2932 cmd_sg_entries);
David Dillowc07d4242011-01-16 13:57:10 -05002933 indirect_sg_entries = cmd_sg_entries;
2934 }
2935
FUJITA Tomonori32368222007-06-27 16:33:12 +09002936 ib_srp_transport_template =
2937 srp_attach_transport(&ib_srp_transport_functions);
2938 if (!ib_srp_transport_template)
2939 return -ENOMEM;
2940
Roland Dreieraef9ec32005-11-02 14:07:13 -08002941 ret = class_register(&srp_class);
2942 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002943 pr_err("couldn't register class infiniband_srp\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002944 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002945 return ret;
2946 }
2947
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002948 ib_sa_register_client(&srp_sa_client);
2949
Roland Dreieraef9ec32005-11-02 14:07:13 -08002950 ret = ib_register_client(&srp_client);
2951 if (ret) {
Bart Van Asschee0bda7d2012-01-14 12:39:44 +00002952 pr_err("couldn't register IB client\n");
FUJITA Tomonori32368222007-06-27 16:33:12 +09002953 srp_release_transport(ib_srp_transport_template);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002954 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002955 class_unregister(&srp_class);
2956 return ret;
2957 }
2958
2959 return 0;
2960}
2961
2962static void __exit srp_cleanup_module(void)
2963{
2964 ib_unregister_client(&srp_client);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07002965 ib_sa_unregister_client(&srp_sa_client);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002966 class_unregister(&srp_class);
FUJITA Tomonori32368222007-06-27 16:33:12 +09002967 srp_release_transport(ib_srp_transport_template);
Roland Dreieraef9ec32005-11-02 14:07:13 -08002968}
2969
2970module_init(srp_init_module);
2971module_exit(srp_cleanup_module);