blob: 027a818f287b1de796d7b17b55b371e5ee4859f8 [file] [log] [blame]
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001/*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
3 *
4 * (c) Copyright 2013 RisingTide Systems LLC.
5 *
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
18
19#include <linux/string.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/socket.h>
23#include <linux/in.h>
24#include <linux/in6.h>
25#include <rdma/ib_verbs.h>
26#include <rdma/rdma_cm.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/iscsi/iscsi_transport.h>
30
31#include "isert_proto.h"
32#include "ib_isert.h"
33
34#define ISERT_MAX_CONN 8
35#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
37
38static DEFINE_MUTEX(device_list_mutex);
39static LIST_HEAD(device_list);
40static struct workqueue_struct *isert_rx_wq;
41static struct workqueue_struct *isert_comp_wq;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -080042
43static void
44isert_qp_event_callback(struct ib_event *e, void *context)
45{
46 struct isert_conn *isert_conn = (struct isert_conn *)context;
47
48 pr_err("isert_qp_event_callback event: %d\n", e->event);
49 switch (e->event) {
50 case IB_EVENT_COMM_EST:
51 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
52 break;
53 case IB_EVENT_QP_LAST_WQE_REACHED:
54 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
55 break;
56 default:
57 break;
58 }
59}
60
61static int
62isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
63{
64 int ret;
65
66 ret = ib_query_device(ib_dev, devattr);
67 if (ret) {
68 pr_err("ib_query_device() failed: %d\n", ret);
69 return ret;
70 }
71 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
72 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
73
74 return 0;
75}
76
77static int
78isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
79{
80 struct isert_device *device = isert_conn->conn_device;
81 struct ib_qp_init_attr attr;
82 struct ib_device_attr devattr;
83 int ret, index, min_index = 0;
84
85 memset(&devattr, 0, sizeof(struct ib_device_attr));
86 ret = isert_query_device(cma_id->device, &devattr);
87 if (ret)
88 return ret;
89
90 mutex_lock(&device_list_mutex);
91 for (index = 0; index < device->cqs_used; index++)
92 if (device->cq_active_qps[index] <
93 device->cq_active_qps[min_index])
94 min_index = index;
95 device->cq_active_qps[min_index]++;
96 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
97 mutex_unlock(&device_list_mutex);
98
99 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
100 attr.event_handler = isert_qp_event_callback;
101 attr.qp_context = isert_conn;
102 attr.send_cq = device->dev_tx_cq[min_index];
103 attr.recv_cq = device->dev_rx_cq[min_index];
104 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
105 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
106 /*
107 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
108 * work-around for RDMA_READ..
109 */
110 attr.cap.max_send_sge = devattr.max_sge - 2;
111 isert_conn->max_sge = attr.cap.max_send_sge;
112
113 attr.cap.max_recv_sge = 1;
114 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
115 attr.qp_type = IB_QPT_RC;
116
117 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
118 cma_id->device);
119 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
120 isert_conn->conn_pd->device);
121
122 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
123 if (ret) {
124 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
125 return ret;
126 }
127 isert_conn->conn_qp = cma_id->qp;
128 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
129
130 return 0;
131}
132
133static void
134isert_cq_event_callback(struct ib_event *e, void *context)
135{
136 pr_debug("isert_cq_event_callback event: %d\n", e->event);
137}
138
139static int
140isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
141{
142 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
143 struct iser_rx_desc *rx_desc;
144 struct ib_sge *rx_sg;
145 u64 dma_addr;
146 int i, j;
147
148 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
149 sizeof(struct iser_rx_desc), GFP_KERNEL);
150 if (!isert_conn->conn_rx_descs)
151 goto fail;
152
153 rx_desc = isert_conn->conn_rx_descs;
154
155 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
156 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
157 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
158 if (ib_dma_mapping_error(ib_dev, dma_addr))
159 goto dma_map_fail;
160
161 rx_desc->dma_addr = dma_addr;
162
163 rx_sg = &rx_desc->rx_sg;
164 rx_sg->addr = rx_desc->dma_addr;
165 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
166 rx_sg->lkey = isert_conn->conn_mr->lkey;
167 }
168
169 isert_conn->conn_rx_desc_head = 0;
170 return 0;
171
172dma_map_fail:
173 rx_desc = isert_conn->conn_rx_descs;
174 for (j = 0; j < i; j++, rx_desc++) {
175 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
176 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
177 }
178 kfree(isert_conn->conn_rx_descs);
179 isert_conn->conn_rx_descs = NULL;
180fail:
181 return -ENOMEM;
182}
183
184static void
185isert_free_rx_descriptors(struct isert_conn *isert_conn)
186{
187 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
188 struct iser_rx_desc *rx_desc;
189 int i;
190
191 if (!isert_conn->conn_rx_descs)
192 return;
193
194 rx_desc = isert_conn->conn_rx_descs;
195 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
196 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
197 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
198 }
199
200 kfree(isert_conn->conn_rx_descs);
201 isert_conn->conn_rx_descs = NULL;
202}
203
204static void isert_cq_tx_callback(struct ib_cq *, void *);
205static void isert_cq_rx_callback(struct ib_cq *, void *);
206
207static int
208isert_create_device_ib_res(struct isert_device *device)
209{
210 struct ib_device *ib_dev = device->ib_device;
211 struct isert_cq_desc *cq_desc;
212 int ret = 0, i, j;
213
214 device->cqs_used = min_t(int, num_online_cpus(),
215 device->ib_device->num_comp_vectors);
216 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
217 pr_debug("Using %d CQs, device %s supports %d vectors\n",
218 device->cqs_used, device->ib_device->name,
219 device->ib_device->num_comp_vectors);
220 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
221 device->cqs_used, GFP_KERNEL);
222 if (!device->cq_desc) {
223 pr_err("Unable to allocate device->cq_desc\n");
224 return -ENOMEM;
225 }
226 cq_desc = device->cq_desc;
227
228 device->dev_pd = ib_alloc_pd(ib_dev);
229 if (IS_ERR(device->dev_pd)) {
230 ret = PTR_ERR(device->dev_pd);
231 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
232 goto out_cq_desc;
233 }
234
235 for (i = 0; i < device->cqs_used; i++) {
236 cq_desc[i].device = device;
237 cq_desc[i].cq_index = i;
238
239 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
240 isert_cq_rx_callback,
241 isert_cq_event_callback,
242 (void *)&cq_desc[i],
243 ISER_MAX_RX_CQ_LEN, i);
244 if (IS_ERR(device->dev_rx_cq[i]))
245 goto out_cq;
246
247 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
248 isert_cq_tx_callback,
249 isert_cq_event_callback,
250 (void *)&cq_desc[i],
251 ISER_MAX_TX_CQ_LEN, i);
252 if (IS_ERR(device->dev_tx_cq[i]))
253 goto out_cq;
254
255 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
256 goto out_cq;
257
258 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
259 goto out_cq;
260 }
261
262 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
263 if (IS_ERR(device->dev_mr)) {
264 ret = PTR_ERR(device->dev_mr);
265 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
266 goto out_cq;
267 }
268
269 return 0;
270
271out_cq:
272 for (j = 0; j < i; j++) {
273 cq_desc = &device->cq_desc[j];
274
275 if (device->dev_rx_cq[j]) {
276 cancel_work_sync(&cq_desc->cq_rx_work);
277 ib_destroy_cq(device->dev_rx_cq[j]);
278 }
279 if (device->dev_tx_cq[j]) {
280 cancel_work_sync(&cq_desc->cq_tx_work);
281 ib_destroy_cq(device->dev_tx_cq[j]);
282 }
283 }
284 ib_dealloc_pd(device->dev_pd);
285
286out_cq_desc:
287 kfree(device->cq_desc);
288
289 return ret;
290}
291
292static void
293isert_free_device_ib_res(struct isert_device *device)
294{
295 struct isert_cq_desc *cq_desc;
296 int i;
297
298 for (i = 0; i < device->cqs_used; i++) {
299 cq_desc = &device->cq_desc[i];
300
301 cancel_work_sync(&cq_desc->cq_rx_work);
302 cancel_work_sync(&cq_desc->cq_tx_work);
303 ib_destroy_cq(device->dev_rx_cq[i]);
304 ib_destroy_cq(device->dev_tx_cq[i]);
305 device->dev_rx_cq[i] = NULL;
306 device->dev_tx_cq[i] = NULL;
307 }
308
309 ib_dereg_mr(device->dev_mr);
310 ib_dealloc_pd(device->dev_pd);
311 kfree(device->cq_desc);
312}
313
314static void
315isert_device_try_release(struct isert_device *device)
316{
317 mutex_lock(&device_list_mutex);
318 device->refcount--;
319 if (!device->refcount) {
320 isert_free_device_ib_res(device);
321 list_del(&device->dev_node);
322 kfree(device);
323 }
324 mutex_unlock(&device_list_mutex);
325}
326
327static struct isert_device *
328isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
329{
330 struct isert_device *device;
331 int ret;
332
333 mutex_lock(&device_list_mutex);
334 list_for_each_entry(device, &device_list, dev_node) {
335 if (device->ib_device->node_guid == cma_id->device->node_guid) {
336 device->refcount++;
337 mutex_unlock(&device_list_mutex);
338 return device;
339 }
340 }
341
342 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
343 if (!device) {
344 mutex_unlock(&device_list_mutex);
345 return ERR_PTR(-ENOMEM);
346 }
347
348 INIT_LIST_HEAD(&device->dev_node);
349
350 device->ib_device = cma_id->device;
351 ret = isert_create_device_ib_res(device);
352 if (ret) {
353 kfree(device);
354 mutex_unlock(&device_list_mutex);
355 return ERR_PTR(ret);
356 }
357
358 device->refcount++;
359 list_add_tail(&device->dev_node, &device_list);
360 mutex_unlock(&device_list_mutex);
361
362 return device;
363}
364
365static int
366isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
367{
368 struct iscsi_np *np = cma_id->context;
369 struct isert_np *isert_np = np->np_context;
370 struct isert_conn *isert_conn;
371 struct isert_device *device;
372 struct ib_device *ib_dev = cma_id->device;
373 int ret = 0;
374
375 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
376 cma_id, cma_id->context);
377
378 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
379 if (!isert_conn) {
380 pr_err("Unable to allocate isert_conn\n");
381 return -ENOMEM;
382 }
383 isert_conn->state = ISER_CONN_INIT;
384 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
385 init_completion(&isert_conn->conn_login_comp);
386 init_waitqueue_head(&isert_conn->conn_wait);
387 init_waitqueue_head(&isert_conn->conn_wait_comp_err);
388 kref_init(&isert_conn->conn_kref);
389 kref_get(&isert_conn->conn_kref);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700390 mutex_init(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800391
392 cma_id->context = isert_conn;
393 isert_conn->conn_cm_id = cma_id;
394 isert_conn->responder_resources = event->param.conn.responder_resources;
395 isert_conn->initiator_depth = event->param.conn.initiator_depth;
396 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
397 isert_conn->responder_resources, isert_conn->initiator_depth);
398
399 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
400 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
401 if (!isert_conn->login_buf) {
402 pr_err("Unable to allocate isert_conn->login_buf\n");
403 ret = -ENOMEM;
404 goto out;
405 }
406
407 isert_conn->login_req_buf = isert_conn->login_buf;
408 isert_conn->login_rsp_buf = isert_conn->login_buf +
409 ISCSI_DEF_MAX_RECV_SEG_LEN;
410 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
411 isert_conn->login_buf, isert_conn->login_req_buf,
412 isert_conn->login_rsp_buf);
413
414 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
415 (void *)isert_conn->login_req_buf,
416 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
417
418 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
419 if (ret) {
420 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
421 ret);
422 isert_conn->login_req_dma = 0;
423 goto out_login_buf;
424 }
425
426 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
427 (void *)isert_conn->login_rsp_buf,
428 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
429
430 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
431 if (ret) {
432 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
433 ret);
434 isert_conn->login_rsp_dma = 0;
435 goto out_req_dma_map;
436 }
437
438 device = isert_device_find_by_ib_dev(cma_id);
439 if (IS_ERR(device)) {
440 ret = PTR_ERR(device);
441 goto out_rsp_dma_map;
442 }
443
444 isert_conn->conn_device = device;
445 isert_conn->conn_pd = device->dev_pd;
446 isert_conn->conn_mr = device->dev_mr;
447
448 ret = isert_conn_setup_qp(isert_conn, cma_id);
449 if (ret)
450 goto out_conn_dev;
451
452 mutex_lock(&isert_np->np_accept_mutex);
453 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
454 mutex_unlock(&isert_np->np_accept_mutex);
455
456 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
457 wake_up(&isert_np->np_accept_wq);
458 return 0;
459
460out_conn_dev:
461 isert_device_try_release(device);
462out_rsp_dma_map:
463 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
464 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
465out_req_dma_map:
466 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
467 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
468out_login_buf:
469 kfree(isert_conn->login_buf);
470out:
471 kfree(isert_conn);
472 return ret;
473}
474
475static void
476isert_connect_release(struct isert_conn *isert_conn)
477{
478 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
479 struct isert_device *device = isert_conn->conn_device;
480 int cq_index;
481
482 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
483
484 if (isert_conn->conn_qp) {
485 cq_index = ((struct isert_cq_desc *)
486 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
487 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
488 isert_conn->conn_device->cq_active_qps[cq_index]--;
489
490 rdma_destroy_qp(isert_conn->conn_cm_id);
491 }
492
493 isert_free_rx_descriptors(isert_conn);
494 rdma_destroy_id(isert_conn->conn_cm_id);
495
496 if (isert_conn->login_buf) {
497 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
498 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
499 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
500 ISCSI_DEF_MAX_RECV_SEG_LEN,
501 DMA_FROM_DEVICE);
502 kfree(isert_conn->login_buf);
503 }
504 kfree(isert_conn);
505
506 if (device)
507 isert_device_try_release(device);
508
509 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
510}
511
512static void
513isert_connected_handler(struct rdma_cm_id *cma_id)
514{
515 return;
516}
517
518static void
519isert_release_conn_kref(struct kref *kref)
520{
521 struct isert_conn *isert_conn = container_of(kref,
522 struct isert_conn, conn_kref);
523
524 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
525 current->comm, current->pid);
526
527 isert_connect_release(isert_conn);
528}
529
530static void
531isert_put_conn(struct isert_conn *isert_conn)
532{
533 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
534}
535
536static void
537isert_disconnect_work(struct work_struct *work)
538{
539 struct isert_conn *isert_conn = container_of(work,
540 struct isert_conn, conn_logout_work);
541
542 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700543 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800544 isert_conn->state = ISER_CONN_DOWN;
545
546 if (isert_conn->post_recv_buf_count == 0 &&
547 atomic_read(&isert_conn->post_send_buf_count) == 0) {
548 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700549 mutex_unlock(&isert_conn->conn_mutex);
550 goto wake_up;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800551 }
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700552 if (!isert_conn->conn_cm_id) {
553 mutex_unlock(&isert_conn->conn_mutex);
554 isert_put_conn(isert_conn);
555 return;
556 }
557 if (!isert_conn->logout_posted) {
558 pr_debug("Calling rdma_disconnect for !logout_posted from"
559 " isert_disconnect_work\n");
560 rdma_disconnect(isert_conn->conn_cm_id);
561 mutex_unlock(&isert_conn->conn_mutex);
562 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
563 goto wake_up;
564 }
565 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800566
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -0700567wake_up:
568 wake_up(&isert_conn->conn_wait);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800569 isert_put_conn(isert_conn);
570}
571
572static void
573isert_disconnected_handler(struct rdma_cm_id *cma_id)
574{
575 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
576
577 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
578 schedule_work(&isert_conn->conn_logout_work);
579}
580
581static int
582isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
583{
584 int ret = 0;
585
586 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
587 event->event, event->status, cma_id->context, cma_id);
588
589 switch (event->event) {
590 case RDMA_CM_EVENT_CONNECT_REQUEST:
591 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
592 ret = isert_connect_request(cma_id, event);
593 break;
594 case RDMA_CM_EVENT_ESTABLISHED:
595 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
596 isert_connected_handler(cma_id);
597 break;
598 case RDMA_CM_EVENT_DISCONNECTED:
599 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
600 isert_disconnected_handler(cma_id);
601 break;
602 case RDMA_CM_EVENT_DEVICE_REMOVAL:
603 case RDMA_CM_EVENT_ADDR_CHANGE:
604 break;
605 case RDMA_CM_EVENT_CONNECT_ERROR:
606 default:
607 pr_err("Unknown RDMA CMA event: %d\n", event->event);
608 break;
609 }
610
611 if (ret != 0) {
612 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
613 event->event, ret);
614 dump_stack();
615 }
616
617 return ret;
618}
619
620static int
621isert_post_recv(struct isert_conn *isert_conn, u32 count)
622{
623 struct ib_recv_wr *rx_wr, *rx_wr_failed;
624 int i, ret;
625 unsigned int rx_head = isert_conn->conn_rx_desc_head;
626 struct iser_rx_desc *rx_desc;
627
628 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
629 rx_desc = &isert_conn->conn_rx_descs[rx_head];
630 rx_wr->wr_id = (unsigned long)rx_desc;
631 rx_wr->sg_list = &rx_desc->rx_sg;
632 rx_wr->num_sge = 1;
633 rx_wr->next = rx_wr + 1;
634 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
635 }
636
637 rx_wr--;
638 rx_wr->next = NULL; /* mark end of work requests list */
639
640 isert_conn->post_recv_buf_count += count;
641 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
642 &rx_wr_failed);
643 if (ret) {
644 pr_err("ib_post_recv() failed with ret: %d\n", ret);
645 isert_conn->post_recv_buf_count -= count;
646 } else {
647 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
648 isert_conn->conn_rx_desc_head = rx_head;
649 }
650 return ret;
651}
652
653static int
654isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
655{
656 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
657 struct ib_send_wr send_wr, *send_wr_failed;
658 int ret;
659
660 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
661 ISER_HEADERS_LEN, DMA_TO_DEVICE);
662
663 send_wr.next = NULL;
664 send_wr.wr_id = (unsigned long)tx_desc;
665 send_wr.sg_list = tx_desc->tx_sg;
666 send_wr.num_sge = tx_desc->num_sge;
667 send_wr.opcode = IB_WR_SEND;
668 send_wr.send_flags = IB_SEND_SIGNALED;
669
670 atomic_inc(&isert_conn->post_send_buf_count);
671
672 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
673 if (ret) {
674 pr_err("ib_post_send() failed, ret: %d\n", ret);
675 atomic_dec(&isert_conn->post_send_buf_count);
676 }
677
678 return ret;
679}
680
681static void
682isert_create_send_desc(struct isert_conn *isert_conn,
683 struct isert_cmd *isert_cmd,
684 struct iser_tx_desc *tx_desc)
685{
686 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
687
688 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
689 ISER_HEADERS_LEN, DMA_TO_DEVICE);
690
691 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
692 tx_desc->iser_header.flags = ISER_VER;
693
694 tx_desc->num_sge = 1;
695 tx_desc->isert_cmd = isert_cmd;
696
697 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
698 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
699 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
700 }
701}
702
703static int
704isert_init_tx_hdrs(struct isert_conn *isert_conn,
705 struct iser_tx_desc *tx_desc)
706{
707 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
708 u64 dma_addr;
709
710 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
711 ISER_HEADERS_LEN, DMA_TO_DEVICE);
712 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
713 pr_err("ib_dma_mapping_error() failed\n");
714 return -ENOMEM;
715 }
716
717 tx_desc->dma_addr = dma_addr;
718 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
719 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
720 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
721
722 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
723 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
724 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
725
726 return 0;
727}
728
729static void
730isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
731{
732 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
733 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
734 send_wr->opcode = IB_WR_SEND;
735 send_wr->send_flags = IB_SEND_SIGNALED;
736 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
737 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
738}
739
740static int
741isert_rdma_post_recvl(struct isert_conn *isert_conn)
742{
743 struct ib_recv_wr rx_wr, *rx_wr_fail;
744 struct ib_sge sge;
745 int ret;
746
747 memset(&sge, 0, sizeof(struct ib_sge));
748 sge.addr = isert_conn->login_req_dma;
749 sge.length = ISER_RX_LOGIN_SIZE;
750 sge.lkey = isert_conn->conn_mr->lkey;
751
752 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
753 sge.addr, sge.length, sge.lkey);
754
755 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
756 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
757 rx_wr.sg_list = &sge;
758 rx_wr.num_sge = 1;
759
760 isert_conn->post_recv_buf_count++;
761 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
762 if (ret) {
763 pr_err("ib_post_recv() failed: %d\n", ret);
764 isert_conn->post_recv_buf_count--;
765 }
766
767 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
768 return ret;
769}
770
771static int
772isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
773 u32 length)
774{
775 struct isert_conn *isert_conn = conn->context;
776 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
777 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
778 int ret;
779
780 isert_create_send_desc(isert_conn, NULL, tx_desc);
781
782 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
783 sizeof(struct iscsi_hdr));
784
785 isert_init_tx_hdrs(isert_conn, tx_desc);
786
787 if (length > 0) {
788 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
789
790 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
791 length, DMA_TO_DEVICE);
792
793 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
794
795 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
796 length, DMA_TO_DEVICE);
797
798 tx_dsg->addr = isert_conn->login_rsp_dma;
799 tx_dsg->length = length;
800 tx_dsg->lkey = isert_conn->conn_mr->lkey;
801 tx_desc->num_sge = 2;
802 }
803 if (!login->login_failed) {
804 if (login->login_complete) {
805 ret = isert_alloc_rx_descriptors(isert_conn);
806 if (ret)
807 return ret;
808
809 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
810 if (ret)
811 return ret;
812
813 isert_conn->state = ISER_CONN_UP;
814 goto post_send;
815 }
816
817 ret = isert_rdma_post_recvl(isert_conn);
818 if (ret)
819 return ret;
820 }
821post_send:
822 ret = isert_post_send(isert_conn, tx_desc);
823 if (ret)
824 return ret;
825
826 return 0;
827}
828
829static void
830isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
831 struct isert_conn *isert_conn)
832{
833 struct iscsi_conn *conn = isert_conn->conn;
834 struct iscsi_login *login = conn->conn_login;
835 int size;
836
837 if (!login) {
838 pr_err("conn->conn_login is NULL\n");
839 dump_stack();
840 return;
841 }
842
843 if (login->first_request) {
844 struct iscsi_login_req *login_req =
845 (struct iscsi_login_req *)&rx_desc->iscsi_header;
846 /*
847 * Setup the initial iscsi_login values from the leading
848 * login request PDU.
849 */
850 login->leading_connection = (!login_req->tsih) ? 1 : 0;
851 login->current_stage =
852 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
853 >> 2;
854 login->version_min = login_req->min_version;
855 login->version_max = login_req->max_version;
856 memcpy(login->isid, login_req->isid, 6);
857 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
858 login->init_task_tag = login_req->itt;
859 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
860 login->cid = be16_to_cpu(login_req->cid);
861 login->tsih = be16_to_cpu(login_req->tsih);
862 }
863
864 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
865
866 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
867 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
868 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
869 memcpy(login->req_buf, &rx_desc->data[0], size);
870
Nicholas Bellinger6faaa852013-08-18 16:35:46 -0700871 if (login->first_request) {
872 complete(&isert_conn->conn_login_comp);
873 return;
874 }
875 schedule_delayed_work(&conn->login_work, 0);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800876}
877
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800878static struct iscsi_cmd
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700879*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800880{
881 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
882 struct isert_cmd *isert_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700883 struct iscsi_cmd *cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800884
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700885 cmd = iscsit_allocate_cmd(conn, gfp);
886 if (!cmd) {
887 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800888 return NULL;
889 }
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700890 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800891 isert_cmd->conn = isert_conn;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700892 isert_cmd->iscsi_cmd = cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800893
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700894 return cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800895}
896
897static int
898isert_handle_scsi_cmd(struct isert_conn *isert_conn,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -0700899 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
900 struct iser_rx_desc *rx_desc, unsigned char *buf)
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800901{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800902 struct iscsi_conn *conn = isert_conn->conn;
903 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
904 struct scatterlist *sg;
905 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
906 bool dump_payload = false;
907
908 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
909 if (rc < 0)
910 return rc;
911
912 imm_data = cmd->immediate_data;
913 imm_data_len = cmd->first_burst_len;
914 unsol_data = cmd->unsolicited_data;
915
916 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
917 if (rc < 0) {
918 return 0;
919 } else if (rc > 0) {
920 dump_payload = true;
921 goto sequence_cmd;
922 }
923
924 if (!imm_data)
925 return 0;
926
927 sg = &cmd->se_cmd.t_data_sg[0];
928 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
929
930 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
931 sg, sg_nents, &rx_desc->data[0], imm_data_len);
932
933 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
934
935 cmd->write_data_done += imm_data_len;
936
937 if (cmd->write_data_done == cmd->se_cmd.data_length) {
938 spin_lock_bh(&cmd->istate_lock);
939 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
940 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
941 spin_unlock_bh(&cmd->istate_lock);
942 }
943
944sequence_cmd:
Nicholas Bellinger561bf152013-07-03 03:58:58 -0700945 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800946
947 if (!rc && dump_payload == false && unsol_data)
948 iscsit_set_unsoliticed_dataout(cmd);
949
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -0800950 return 0;
951}
952
953static int
954isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
955 struct iser_rx_desc *rx_desc, unsigned char *buf)
956{
957 struct scatterlist *sg_start;
958 struct iscsi_conn *conn = isert_conn->conn;
959 struct iscsi_cmd *cmd = NULL;
960 struct iscsi_data *hdr = (struct iscsi_data *)buf;
961 u32 unsol_data_len = ntoh24(hdr->dlength);
962 int rc, sg_nents, sg_off, page_off;
963
964 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
965 if (rc < 0)
966 return rc;
967 else if (!cmd)
968 return 0;
969 /*
970 * FIXME: Unexpected unsolicited_data out
971 */
972 if (!cmd->unsolicited_data) {
973 pr_err("Received unexpected solicited data payload\n");
974 dump_stack();
975 return -1;
976 }
977
978 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
979 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
980
981 sg_off = cmd->write_data_done / PAGE_SIZE;
982 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
983 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
984 page_off = cmd->write_data_done % PAGE_SIZE;
985 /*
986 * FIXME: Non page-aligned unsolicited_data out
987 */
988 if (page_off) {
989 pr_err("Received unexpected non-page aligned data payload\n");
990 dump_stack();
991 return -1;
992 }
993 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
994 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
995
996 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
997 unsol_data_len);
998
999 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1000 if (rc < 0)
1001 return rc;
1002
1003 return 0;
1004}
1005
1006static int
Nicholas Bellinger778de362013-06-14 16:07:47 -07001007isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001008 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1009 unsigned char *buf)
Nicholas Bellinger778de362013-06-14 16:07:47 -07001010{
Nicholas Bellinger778de362013-06-14 16:07:47 -07001011 struct iscsi_conn *conn = isert_conn->conn;
1012 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1013 int rc;
1014
1015 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1016 if (rc < 0)
1017 return rc;
1018 /*
1019 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1020 */
1021
1022 return iscsit_process_nop_out(conn, cmd, hdr);
1023}
1024
1025static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001026isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001027 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1028 struct iscsi_text *hdr)
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001029{
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001030 struct iscsi_conn *conn = isert_conn->conn;
1031 u32 payload_length = ntoh24(hdr->dlength);
1032 int rc;
1033 unsigned char *text_in;
1034
1035 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1036 if (rc < 0)
1037 return rc;
1038
1039 text_in = kzalloc(payload_length, GFP_KERNEL);
1040 if (!text_in) {
1041 pr_err("Unable to allocate text_in of payload_length: %u\n",
1042 payload_length);
1043 return -ENOMEM;
1044 }
1045 cmd->text_in_ptr = text_in;
1046
1047 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1048
1049 return iscsit_process_text_cmd(conn, cmd, hdr);
1050}
1051
1052static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001053isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1054 uint32_t read_stag, uint64_t read_va,
1055 uint32_t write_stag, uint64_t write_va)
1056{
1057 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1058 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001059 struct iscsi_session *sess = conn->sess;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001060 struct iscsi_cmd *cmd;
1061 struct isert_cmd *isert_cmd;
1062 int ret = -EINVAL;
1063 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1064
Nicholas Bellingerca40d242013-07-07 17:45:08 -07001065 if (sess->sess_ops->SessionType &&
1066 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1067 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1068 " ignoring\n", opcode);
1069 return 0;
1070 }
1071
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001072 switch (opcode) {
1073 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001074 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001075 if (!cmd)
1076 break;
1077
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001078 isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001079 isert_cmd->read_stag = read_stag;
1080 isert_cmd->read_va = read_va;
1081 isert_cmd->write_stag = write_stag;
1082 isert_cmd->write_va = write_va;
1083
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001084 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001085 rx_desc, (unsigned char *)hdr);
1086 break;
1087 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001088 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001089 if (!cmd)
1090 break;
1091
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001092 isert_cmd = iscsit_priv_cmd(cmd);
1093 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
Nicholas Bellinger778de362013-06-14 16:07:47 -07001094 rx_desc, (unsigned char *)hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001095 break;
1096 case ISCSI_OP_SCSI_DATA_OUT:
1097 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1098 (unsigned char *)hdr);
1099 break;
1100 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001101 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001102 if (!cmd)
1103 break;
1104
1105 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1106 (unsigned char *)hdr);
1107 break;
1108 case ISCSI_OP_LOGOUT:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001109 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001110 if (!cmd)
1111 break;
1112
1113 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1114 if (ret > 0)
1115 wait_for_completion_timeout(&conn->conn_logout_comp,
1116 SECONDS_FOR_LOGOUT_COMP *
1117 HZ);
1118 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001119 case ISCSI_OP_TEXT:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001120 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001121 if (!cmd)
1122 break;
1123
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001124 isert_cmd = iscsit_priv_cmd(cmd);
1125 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001126 rx_desc, (struct iscsi_text *)hdr);
1127 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001128 default:
1129 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1130 dump_stack();
1131 break;
1132 }
1133
1134 return ret;
1135}
1136
1137static void
1138isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1139{
1140 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1141 uint64_t read_va = 0, write_va = 0;
1142 uint32_t read_stag = 0, write_stag = 0;
1143 int rc;
1144
1145 switch (iser_hdr->flags & 0xF0) {
1146 case ISCSI_CTRL:
1147 if (iser_hdr->flags & ISER_RSV) {
1148 read_stag = be32_to_cpu(iser_hdr->read_stag);
1149 read_va = be64_to_cpu(iser_hdr->read_va);
1150 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1151 read_stag, (unsigned long long)read_va);
1152 }
1153 if (iser_hdr->flags & ISER_WSV) {
1154 write_stag = be32_to_cpu(iser_hdr->write_stag);
1155 write_va = be64_to_cpu(iser_hdr->write_va);
1156 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1157 write_stag, (unsigned long long)write_va);
1158 }
1159
1160 pr_debug("ISER ISCSI_CTRL PDU\n");
1161 break;
1162 case ISER_HELLO:
1163 pr_err("iSER Hello message\n");
1164 break;
1165 default:
1166 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1167 break;
1168 }
1169
1170 rc = isert_rx_opcode(isert_conn, rx_desc,
1171 read_stag, read_va, write_stag, write_va);
1172}
1173
1174static void
1175isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1176 unsigned long xfer_len)
1177{
1178 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1179 struct iscsi_hdr *hdr;
1180 u64 rx_dma;
1181 int rx_buflen, outstanding;
1182
1183 if ((char *)desc == isert_conn->login_req_buf) {
1184 rx_dma = isert_conn->login_req_dma;
1185 rx_buflen = ISER_RX_LOGIN_SIZE;
1186 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1187 rx_dma, rx_buflen);
1188 } else {
1189 rx_dma = desc->dma_addr;
1190 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1191 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1192 rx_dma, rx_buflen);
1193 }
1194
1195 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1196
1197 hdr = &desc->iscsi_header;
1198 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1199 hdr->opcode, hdr->itt, hdr->flags,
1200 (int)(xfer_len - ISER_HEADERS_LEN));
1201
1202 if ((char *)desc == isert_conn->login_req_buf)
1203 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1204 isert_conn);
1205 else
1206 isert_rx_do_work(desc, isert_conn);
1207
1208 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1209 DMA_FROM_DEVICE);
1210
1211 isert_conn->post_recv_buf_count--;
1212 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1213 isert_conn->post_recv_buf_count);
1214
1215 if ((char *)desc == isert_conn->login_req_buf)
1216 return;
1217
1218 outstanding = isert_conn->post_recv_buf_count;
1219 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1220 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1221 ISERT_MIN_POSTED_RX);
1222 err = isert_post_recv(isert_conn, count);
1223 if (err) {
1224 pr_err("isert_post_recv() count: %d failed, %d\n",
1225 count, err);
1226 }
1227 }
1228}
1229
1230static void
1231isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1232{
1233 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1234 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1235
1236 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1237
1238 if (wr->sge) {
1239 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1240 wr->sge = NULL;
1241 }
1242
1243 kfree(wr->send_wr);
1244 wr->send_wr = NULL;
1245
1246 kfree(isert_cmd->ib_sge);
1247 isert_cmd->ib_sge = NULL;
1248}
1249
1250static void
1251isert_put_cmd(struct isert_cmd *isert_cmd)
1252{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001253 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001254 struct isert_conn *isert_conn = isert_cmd->conn;
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001255 struct iscsi_conn *conn = isert_conn->conn;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001256
1257 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1258
1259 switch (cmd->iscsi_opcode) {
1260 case ISCSI_OP_SCSI_CMD:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001261 spin_lock_bh(&conn->cmd_lock);
1262 if (!list_empty(&cmd->i_conn_node))
1263 list_del(&cmd->i_conn_node);
1264 spin_unlock_bh(&conn->cmd_lock);
1265
1266 if (cmd->data_direction == DMA_TO_DEVICE)
1267 iscsit_stop_dataout_timer(cmd);
1268
1269 isert_unmap_cmd(isert_cmd, isert_conn);
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001270 transport_generic_free_cmd(&cmd->se_cmd, 0);
1271 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001272 case ISCSI_OP_SCSI_TMFUNC:
Nicholas Bellinger186a9642013-07-03 03:11:48 -07001273 spin_lock_bh(&conn->cmd_lock);
1274 if (!list_empty(&cmd->i_conn_node))
1275 list_del(&cmd->i_conn_node);
1276 spin_unlock_bh(&conn->cmd_lock);
1277
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001278 transport_generic_free_cmd(&cmd->se_cmd, 0);
1279 break;
1280 case ISCSI_OP_REJECT:
1281 case ISCSI_OP_NOOP_OUT:
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001282 case ISCSI_OP_TEXT:
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001283 spin_lock_bh(&conn->cmd_lock);
1284 if (!list_empty(&cmd->i_conn_node))
1285 list_del(&cmd->i_conn_node);
1286 spin_unlock_bh(&conn->cmd_lock);
1287
1288 /*
1289 * Handle special case for REJECT when iscsi_add_reject*() has
1290 * overwritten the original iscsi_opcode assignment, and the
1291 * associated cmd->se_cmd needs to be released.
1292 */
1293 if (cmd->se_cmd.se_tfo != NULL) {
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001294 pr_debug("Calling transport_generic_free_cmd from"
1295 " isert_put_cmd for 0x%02x\n",
1296 cmd->iscsi_opcode);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001297 transport_generic_free_cmd(&cmd->se_cmd, 0);
1298 break;
1299 }
1300 /*
1301 * Fall-through
1302 */
1303 default:
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001304 iscsit_release_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001305 break;
1306 }
1307}
1308
1309static void
1310isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1311{
1312 if (tx_desc->dma_addr != 0) {
1313 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1314 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1315 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1316 tx_desc->dma_addr = 0;
1317 }
1318}
1319
1320static void
1321isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1322 struct ib_device *ib_dev)
1323{
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001324 if (isert_cmd->pdu_buf_dma != 0) {
1325 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1326 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1327 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1328 isert_cmd->pdu_buf_dma = 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001329 }
1330
1331 isert_unmap_tx_desc(tx_desc, ib_dev);
1332 isert_put_cmd(isert_cmd);
1333}
1334
1335static void
1336isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1337 struct isert_cmd *isert_cmd)
1338{
1339 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001340 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001341 struct se_cmd *se_cmd = &cmd->se_cmd;
1342 struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1343
1344 iscsit_stop_dataout_timer(cmd);
1345
1346 if (wr->sge) {
1347 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1348 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1349 wr->sge = NULL;
1350 }
1351
1352 if (isert_cmd->ib_sge) {
1353 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1354 kfree(isert_cmd->ib_sge);
1355 isert_cmd->ib_sge = NULL;
1356 }
1357
1358 cmd->write_data_done = se_cmd->data_length;
1359
1360 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1361 spin_lock_bh(&cmd->istate_lock);
1362 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1363 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1364 spin_unlock_bh(&cmd->istate_lock);
1365
1366 target_execute_cmd(se_cmd);
1367}
1368
1369static void
1370isert_do_control_comp(struct work_struct *work)
1371{
1372 struct isert_cmd *isert_cmd = container_of(work,
1373 struct isert_cmd, comp_work);
1374 struct isert_conn *isert_conn = isert_cmd->conn;
1375 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001376 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001377
1378 switch (cmd->i_state) {
1379 case ISTATE_SEND_TASKMGTRSP:
1380 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1381
1382 atomic_dec(&isert_conn->post_send_buf_count);
1383 iscsit_tmr_post_handler(cmd, cmd->conn);
1384
1385 cmd->i_state = ISTATE_SENT_STATUS;
1386 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1387 break;
1388 case ISTATE_SEND_REJECT:
1389 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1390 atomic_dec(&isert_conn->post_send_buf_count);
1391
1392 cmd->i_state = ISTATE_SENT_STATUS;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001393 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001394 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001395 case ISTATE_SEND_LOGOUTRSP:
1396 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1397 /*
1398 * Call atomic_dec(&isert_conn->post_send_buf_count)
1399 * from isert_free_conn()
1400 */
1401 isert_conn->logout_posted = true;
1402 iscsit_logout_post_handler(cmd, cmd->conn);
1403 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001404 case ISTATE_SEND_TEXTRSP:
1405 atomic_dec(&isert_conn->post_send_buf_count);
1406 cmd->i_state = ISTATE_SENT_STATUS;
1407 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1408 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001409 default:
1410 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1411 dump_stack();
1412 break;
1413 }
1414}
1415
1416static void
1417isert_response_completion(struct iser_tx_desc *tx_desc,
1418 struct isert_cmd *isert_cmd,
1419 struct isert_conn *isert_conn,
1420 struct ib_device *ib_dev)
1421{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001422 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001423
1424 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001425 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001426 cmd->i_state == ISTATE_SEND_REJECT ||
1427 cmd->i_state == ISTATE_SEND_TEXTRSP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001428 isert_unmap_tx_desc(tx_desc, ib_dev);
1429
1430 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1431 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1432 return;
1433 }
1434 atomic_dec(&isert_conn->post_send_buf_count);
1435
1436 cmd->i_state = ISTATE_SENT_STATUS;
1437 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1438}
1439
1440static void
1441isert_send_completion(struct iser_tx_desc *tx_desc,
1442 struct isert_conn *isert_conn)
1443{
1444 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1445 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1446 struct isert_rdma_wr *wr;
1447
1448 if (!isert_cmd) {
1449 atomic_dec(&isert_conn->post_send_buf_count);
1450 isert_unmap_tx_desc(tx_desc, ib_dev);
1451 return;
1452 }
1453 wr = &isert_cmd->rdma_wr;
1454
1455 switch (wr->iser_ib_op) {
1456 case ISER_IB_RECV:
1457 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1458 dump_stack();
1459 break;
1460 case ISER_IB_SEND:
1461 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1462 isert_response_completion(tx_desc, isert_cmd,
1463 isert_conn, ib_dev);
1464 break;
1465 case ISER_IB_RDMA_WRITE:
1466 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1467 dump_stack();
1468 break;
1469 case ISER_IB_RDMA_READ:
1470 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1471
1472 atomic_dec(&isert_conn->post_send_buf_count);
1473 isert_completion_rdma_read(tx_desc, isert_cmd);
1474 break;
1475 default:
1476 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1477 dump_stack();
1478 break;
1479 }
1480}
1481
1482static void
1483isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1484{
1485 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1486
1487 if (tx_desc) {
1488 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1489
1490 if (!isert_cmd)
1491 isert_unmap_tx_desc(tx_desc, ib_dev);
1492 else
1493 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1494 }
1495
1496 if (isert_conn->post_recv_buf_count == 0 &&
1497 atomic_read(&isert_conn->post_send_buf_count) == 0) {
1498 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1499 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1500
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07001501 mutex_lock(&isert_conn->conn_mutex);
1502 if (isert_conn->state != ISER_CONN_DOWN)
1503 isert_conn->state = ISER_CONN_TERMINATING;
1504 mutex_unlock(&isert_conn->conn_mutex);
1505
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001506 wake_up(&isert_conn->conn_wait_comp_err);
1507 }
1508}
1509
1510static void
1511isert_cq_tx_work(struct work_struct *work)
1512{
1513 struct isert_cq_desc *cq_desc = container_of(work,
1514 struct isert_cq_desc, cq_tx_work);
1515 struct isert_device *device = cq_desc->device;
1516 int cq_index = cq_desc->cq_index;
1517 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1518 struct isert_conn *isert_conn;
1519 struct iser_tx_desc *tx_desc;
1520 struct ib_wc wc;
1521
1522 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1523 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1524 isert_conn = wc.qp->qp_context;
1525
1526 if (wc.status == IB_WC_SUCCESS) {
1527 isert_send_completion(tx_desc, isert_conn);
1528 } else {
1529 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1530 pr_debug("TX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07001531 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001532 atomic_dec(&isert_conn->post_send_buf_count);
1533 isert_cq_comp_err(tx_desc, isert_conn);
1534 }
1535 }
1536
1537 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1538}
1539
1540static void
1541isert_cq_tx_callback(struct ib_cq *cq, void *context)
1542{
1543 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1544
1545 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1546 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1547}
1548
1549static void
1550isert_cq_rx_work(struct work_struct *work)
1551{
1552 struct isert_cq_desc *cq_desc = container_of(work,
1553 struct isert_cq_desc, cq_rx_work);
1554 struct isert_device *device = cq_desc->device;
1555 int cq_index = cq_desc->cq_index;
1556 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1557 struct isert_conn *isert_conn;
1558 struct iser_rx_desc *rx_desc;
1559 struct ib_wc wc;
1560 unsigned long xfer_len;
1561
1562 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1563 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1564 isert_conn = wc.qp->qp_context;
1565
1566 if (wc.status == IB_WC_SUCCESS) {
1567 xfer_len = (unsigned long)wc.byte_len;
1568 isert_rx_completion(rx_desc, isert_conn, xfer_len);
1569 } else {
1570 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07001571 if (wc.status != IB_WC_WR_FLUSH_ERR) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001572 pr_debug("RX wc.status: 0x%08x\n", wc.status);
Nicholas Bellingerc5a2adb2013-07-01 15:11:21 -07001573 pr_debug("RX wc.vendor_err: 0x%08x\n",
1574 wc.vendor_err);
1575 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001576 isert_conn->post_recv_buf_count--;
1577 isert_cq_comp_err(NULL, isert_conn);
1578 }
1579 }
1580
1581 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1582}
1583
1584static void
1585isert_cq_rx_callback(struct ib_cq *cq, void *context)
1586{
1587 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1588
1589 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1590 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1591}
1592
1593static int
1594isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1595{
1596 struct ib_send_wr *wr_failed;
1597 int ret;
1598
1599 atomic_inc(&isert_conn->post_send_buf_count);
1600
1601 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1602 &wr_failed);
1603 if (ret) {
1604 pr_err("ib_post_send failed with %d\n", ret);
1605 atomic_dec(&isert_conn->post_send_buf_count);
1606 return ret;
1607 }
1608 return ret;
1609}
1610
1611static int
1612isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1613{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001614 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001615 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1616 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1617 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1618 &isert_cmd->tx_desc.iscsi_header;
1619
1620 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1621 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1622 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1623 /*
1624 * Attach SENSE DATA payload to iSCSI Response PDU
1625 */
1626 if (cmd->se_cmd.sense_buffer &&
1627 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1628 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1629 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1630 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001631 u32 padding, pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001632
1633 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1634 cmd->sense_buffer);
1635 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1636
1637 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1638 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001639 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001640
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001641 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1642 (void *)cmd->sense_buffer, pdu_len,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001643 DMA_TO_DEVICE);
1644
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001645 isert_cmd->pdu_buf_len = pdu_len;
1646 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1647 tx_dsg->length = pdu_len;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001648 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1649 isert_cmd->tx_desc.num_sge = 2;
1650 }
1651
1652 isert_init_send_wr(isert_cmd, send_wr);
1653
1654 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1655
1656 return isert_post_response(isert_conn, isert_cmd);
1657}
1658
1659static int
1660isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1661 bool nopout_response)
1662{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001663 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001664 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1665 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1666
1667 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1668 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1669 &isert_cmd->tx_desc.iscsi_header,
1670 nopout_response);
1671 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1672 isert_init_send_wr(isert_cmd, send_wr);
1673
Masanari Iida8b513d02013-05-21 23:13:12 +09001674 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001675
1676 return isert_post_response(isert_conn, isert_cmd);
1677}
1678
1679static int
1680isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1681{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001682 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001683 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1684 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1685
1686 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1687 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1688 &isert_cmd->tx_desc.iscsi_header);
1689 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1690 isert_init_send_wr(isert_cmd, send_wr);
1691
1692 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1693
1694 return isert_post_response(isert_conn, isert_cmd);
1695}
1696
1697static int
1698isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1699{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001700 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001701 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1702 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1703
1704 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1705 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1706 &isert_cmd->tx_desc.iscsi_header);
1707 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1708 isert_init_send_wr(isert_cmd, send_wr);
1709
1710 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1711
1712 return isert_post_response(isert_conn, isert_cmd);
1713}
1714
1715static int
1716isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1717{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001718 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001719 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1720 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001721 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1722 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1723 struct iscsi_reject *hdr =
1724 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001725
1726 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001727 iscsit_build_reject(cmd, conn, hdr);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001728 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001729
1730 hton24(hdr->dlength, ISCSI_HDR_LEN);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001731 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001732 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1733 DMA_TO_DEVICE);
Nicholas Bellingerdbbc5d12013-07-03 19:39:37 -07001734 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1735 tx_dsg->addr = isert_cmd->pdu_buf_dma;
Nicholas Bellinger3df8f682013-06-26 02:31:42 -07001736 tx_dsg->length = ISCSI_HDR_LEN;
1737 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1738 isert_cmd->tx_desc.num_sge = 2;
1739
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001740 isert_init_send_wr(isert_cmd, send_wr);
1741
1742 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1743
1744 return isert_post_response(isert_conn, isert_cmd);
1745}
1746
1747static int
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001748isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1749{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001750 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07001751 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1752 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1753 struct iscsi_text_rsp *hdr =
1754 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1755 u32 txt_rsp_len;
1756 int rc;
1757
1758 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1759 rc = iscsit_build_text_rsp(cmd, conn, hdr);
1760 if (rc < 0)
1761 return rc;
1762
1763 txt_rsp_len = rc;
1764 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1765
1766 if (txt_rsp_len) {
1767 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1768 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1769 void *txt_rsp_buf = cmd->buf_ptr;
1770
1771 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1772 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1773
1774 isert_cmd->pdu_buf_len = txt_rsp_len;
1775 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1776 tx_dsg->length = txt_rsp_len;
1777 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1778 isert_cmd->tx_desc.num_sge = 2;
1779 }
1780 isert_init_send_wr(isert_cmd, send_wr);
1781
1782 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1783
1784 return isert_post_response(isert_conn, isert_cmd);
1785}
1786
1787static int
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001788isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1789 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1790 u32 data_left, u32 offset)
1791{
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001792 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001793 struct scatterlist *sg_start, *tmp_sg;
1794 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1795 u32 sg_off, page_off;
1796 int i = 0, sg_nents;
1797
1798 sg_off = offset / PAGE_SIZE;
1799 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1800 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1801 page_off = offset % PAGE_SIZE;
1802
1803 send_wr->sg_list = ib_sge;
1804 send_wr->num_sge = sg_nents;
1805 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1806 /*
1807 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1808 */
1809 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1810 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1811 (unsigned long long)tmp_sg->dma_address,
1812 tmp_sg->length, page_off);
1813
1814 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1815 ib_sge->length = min_t(u32, data_left,
1816 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1817 ib_sge->lkey = isert_conn->conn_mr->lkey;
1818
1819 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
1820 ib_sge->addr, ib_sge->length);
1821 page_off = 0;
1822 data_left -= ib_sge->length;
1823 ib_sge++;
1824 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1825 }
1826
1827 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1828 send_wr->sg_list, send_wr->num_sge);
1829
1830 return sg_nents;
1831}
1832
1833static int
1834isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1835{
1836 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001837 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001838 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1839 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1840 struct ib_send_wr *wr_failed, *send_wr;
1841 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1842 struct ib_sge *ib_sge;
1843 struct scatterlist *sg;
1844 u32 offset = 0, data_len, data_left, rdma_write_max;
1845 int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1846
1847 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1848
1849 sg = &se_cmd->t_data_sg[0];
1850 sg_nents = se_cmd->t_data_nents;
1851
1852 count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1853 if (unlikely(!count)) {
1854 pr_err("Unable to map put_datain SGs\n");
1855 return -EINVAL;
1856 }
1857 wr->sge = sg;
1858 wr->num_sge = sg_nents;
1859 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1860 count, sg, sg_nents);
1861
1862 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1863 if (!ib_sge) {
1864 pr_warn("Unable to allocate datain ib_sge\n");
1865 ret = -ENOMEM;
1866 goto unmap_sg;
1867 }
1868 isert_cmd->ib_sge = ib_sge;
1869
1870 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1871 ib_sge, se_cmd->t_data_nents);
1872
1873 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1874 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1875 GFP_KERNEL);
1876 if (!wr->send_wr) {
1877 pr_err("Unable to allocate wr->send_wr\n");
1878 ret = -ENOMEM;
1879 goto unmap_sg;
1880 }
1881 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1882 wr->send_wr, wr->send_wr_num);
1883
1884 iscsit_increment_maxcmdsn(cmd, conn->sess);
1885 cmd->stat_sn = conn->stat_sn++;
1886
1887 wr->isert_cmd = isert_cmd;
1888 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1889 data_left = se_cmd->data_length;
1890
1891 for (i = 0; i < wr->send_wr_num; i++) {
1892 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1893 data_len = min(data_left, rdma_write_max);
1894
1895 send_wr->opcode = IB_WR_RDMA_WRITE;
1896 send_wr->send_flags = 0;
1897 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1898 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1899
1900 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1901 send_wr, data_len, offset);
1902 ib_sge += ib_sge_cnt;
1903
1904 if (i + 1 == wr->send_wr_num)
1905 send_wr->next = &isert_cmd->tx_desc.send_wr;
1906 else
1907 send_wr->next = &wr->send_wr[i + 1];
1908
1909 offset += data_len;
1910 data_left -= data_len;
1911 }
1912 /*
1913 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1914 */
1915 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1916 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1917 &isert_cmd->tx_desc.iscsi_header);
1918 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1919 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1920
1921 atomic_inc(&isert_conn->post_send_buf_count);
1922
1923 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1924 if (rc) {
1925 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1926 atomic_dec(&isert_conn->post_send_buf_count);
1927 }
1928 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1929 return 1;
1930
1931unmap_sg:
1932 ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1933 return ret;
1934}
1935
1936static int
1937isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1938{
1939 struct se_cmd *se_cmd = &cmd->se_cmd;
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07001940 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08001941 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1942 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1943 struct ib_send_wr *wr_failed, *send_wr;
1944 struct ib_sge *ib_sge;
1945 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1946 struct scatterlist *sg_start;
1947 u32 sg_off, sg_nents, page_off, va_offset = 0;
1948 u32 offset = 0, data_len, data_left, rdma_write_max;
1949 int rc, ret = 0, count, i, ib_sge_cnt;
1950
1951 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1952 se_cmd->data_length, cmd->write_data_done);
1953
1954 sg_off = cmd->write_data_done / PAGE_SIZE;
1955 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1956 page_off = cmd->write_data_done % PAGE_SIZE;
1957
1958 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1959 sg_off, sg_start, page_off);
1960
1961 data_left = se_cmd->data_length - cmd->write_data_done;
1962 sg_nents = se_cmd->t_data_nents - sg_off;
1963
1964 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1965 data_left, sg_nents);
1966
1967 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1968 if (unlikely(!count)) {
1969 pr_err("Unable to map get_dataout SGs\n");
1970 return -EINVAL;
1971 }
1972 wr->sge = sg_start;
1973 wr->num_sge = sg_nents;
1974 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1975 count, sg_start, sg_nents);
1976
1977 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1978 if (!ib_sge) {
1979 pr_warn("Unable to allocate dataout ib_sge\n");
1980 ret = -ENOMEM;
1981 goto unmap_sg;
1982 }
1983 isert_cmd->ib_sge = ib_sge;
1984
1985 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1986 ib_sge, sg_nents);
1987
1988 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1989 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1990 GFP_KERNEL);
1991 if (!wr->send_wr) {
1992 pr_debug("Unable to allocate wr->send_wr\n");
1993 ret = -ENOMEM;
1994 goto unmap_sg;
1995 }
1996 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1997 wr->send_wr, wr->send_wr_num);
1998
1999 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2000
2001 wr->iser_ib_op = ISER_IB_RDMA_READ;
2002 wr->isert_cmd = isert_cmd;
2003 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2004 offset = cmd->write_data_done;
2005
2006 for (i = 0; i < wr->send_wr_num; i++) {
2007 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2008 data_len = min(data_left, rdma_write_max);
2009
2010 send_wr->opcode = IB_WR_RDMA_READ;
2011 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2012 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2013
2014 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2015 send_wr, data_len, offset);
2016 ib_sge += ib_sge_cnt;
2017
2018 if (i + 1 == wr->send_wr_num)
2019 send_wr->send_flags = IB_SEND_SIGNALED;
2020 else
2021 send_wr->next = &wr->send_wr[i + 1];
2022
2023 offset += data_len;
2024 va_offset += data_len;
2025 data_left -= data_len;
2026 }
2027
2028 atomic_inc(&isert_conn->post_send_buf_count);
2029
2030 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2031 if (rc) {
2032 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2033 atomic_dec(&isert_conn->post_send_buf_count);
2034 }
2035 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
2036 return 0;
2037
2038unmap_sg:
2039 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
2040 return ret;
2041}
2042
2043static int
2044isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2045{
2046 int ret;
2047
2048 switch (state) {
2049 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2050 ret = isert_put_nopin(cmd, conn, false);
2051 break;
2052 default:
2053 pr_err("Unknown immediate state: 0x%02x\n", state);
2054 ret = -EINVAL;
2055 break;
2056 }
2057
2058 return ret;
2059}
2060
2061static int
2062isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2063{
2064 int ret;
2065
2066 switch (state) {
2067 case ISTATE_SEND_LOGOUTRSP:
2068 ret = isert_put_logout_rsp(cmd, conn);
2069 if (!ret) {
2070 pr_debug("Returning iSER Logout -EAGAIN\n");
2071 ret = -EAGAIN;
2072 }
2073 break;
2074 case ISTATE_SEND_NOPIN:
2075 ret = isert_put_nopin(cmd, conn, true);
2076 break;
2077 case ISTATE_SEND_TASKMGTRSP:
2078 ret = isert_put_tm_rsp(cmd, conn);
2079 break;
2080 case ISTATE_SEND_REJECT:
2081 ret = isert_put_reject(cmd, conn);
2082 break;
Nicholas Bellingeradb54c22013-06-14 16:47:15 -07002083 case ISTATE_SEND_TEXTRSP:
2084 ret = isert_put_text_rsp(cmd, conn);
2085 break;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002086 case ISTATE_SEND_STATUS:
2087 /*
2088 * Special case for sending non GOOD SCSI status from TX thread
2089 * context during pre se_cmd excecution failure.
2090 */
2091 ret = isert_put_response(conn, cmd);
2092 break;
2093 default:
2094 pr_err("Unknown response state: 0x%02x\n", state);
2095 ret = -EINVAL;
2096 break;
2097 }
2098
2099 return ret;
2100}
2101
2102static int
2103isert_setup_np(struct iscsi_np *np,
2104 struct __kernel_sockaddr_storage *ksockaddr)
2105{
2106 struct isert_np *isert_np;
2107 struct rdma_cm_id *isert_lid;
2108 struct sockaddr *sa;
2109 int ret;
2110
2111 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2112 if (!isert_np) {
2113 pr_err("Unable to allocate struct isert_np\n");
2114 return -ENOMEM;
2115 }
2116 init_waitqueue_head(&isert_np->np_accept_wq);
2117 mutex_init(&isert_np->np_accept_mutex);
2118 INIT_LIST_HEAD(&isert_np->np_accept_list);
2119 init_completion(&isert_np->np_login_comp);
2120
2121 sa = (struct sockaddr *)ksockaddr;
2122 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2123 /*
2124 * Setup the np->np_sockaddr from the passed sockaddr setup
2125 * in iscsi_target_configfs.c code..
2126 */
2127 memcpy(&np->np_sockaddr, ksockaddr,
2128 sizeof(struct __kernel_sockaddr_storage));
2129
2130 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2131 IB_QPT_RC);
2132 if (IS_ERR(isert_lid)) {
2133 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2134 PTR_ERR(isert_lid));
2135 ret = PTR_ERR(isert_lid);
2136 goto out;
2137 }
2138
2139 ret = rdma_bind_addr(isert_lid, sa);
2140 if (ret) {
2141 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2142 goto out_lid;
2143 }
2144
2145 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2146 if (ret) {
2147 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2148 goto out_lid;
2149 }
2150
2151 isert_np->np_cm_id = isert_lid;
2152 np->np_context = isert_np;
2153 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2154
2155 return 0;
2156
2157out_lid:
2158 rdma_destroy_id(isert_lid);
2159out:
2160 kfree(isert_np);
2161 return ret;
2162}
2163
2164static int
2165isert_check_accept_queue(struct isert_np *isert_np)
2166{
2167 int empty;
2168
2169 mutex_lock(&isert_np->np_accept_mutex);
2170 empty = list_empty(&isert_np->np_accept_list);
2171 mutex_unlock(&isert_np->np_accept_mutex);
2172
2173 return empty;
2174}
2175
2176static int
2177isert_rdma_accept(struct isert_conn *isert_conn)
2178{
2179 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2180 struct rdma_conn_param cp;
2181 int ret;
2182
2183 memset(&cp, 0, sizeof(struct rdma_conn_param));
2184 cp.responder_resources = isert_conn->responder_resources;
2185 cp.initiator_depth = isert_conn->initiator_depth;
2186 cp.retry_count = 7;
2187 cp.rnr_retry_count = 7;
2188
2189 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2190
2191 ret = rdma_accept(cm_id, &cp);
2192 if (ret) {
2193 pr_err("rdma_accept() failed with: %d\n", ret);
2194 return ret;
2195 }
2196
2197 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2198
2199 return 0;
2200}
2201
2202static int
2203isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2204{
2205 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2206 int ret;
2207
2208 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
Nicholas Bellinger6faaa852013-08-18 16:35:46 -07002209 /*
2210 * For login requests after the first PDU, isert_rx_login_req() will
2211 * kick schedule_delayed_work(&conn->login_work) as the packet is
2212 * received, which turns this callback from iscsi_target_do_login_rx()
2213 * into a NOP.
2214 */
2215 if (!login->first_request)
2216 return 0;
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002217
2218 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2219 if (ret)
2220 return ret;
2221
2222 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2223 return 0;
2224}
2225
2226static void
2227isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2228 struct isert_conn *isert_conn)
2229{
2230 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2231 struct rdma_route *cm_route = &cm_id->route;
2232 struct sockaddr_in *sock_in;
2233 struct sockaddr_in6 *sock_in6;
2234
2235 conn->login_family = np->np_sockaddr.ss_family;
2236
2237 if (np->np_sockaddr.ss_family == AF_INET6) {
2238 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2239 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2240 &sock_in6->sin6_addr.in6_u);
2241 conn->login_port = ntohs(sock_in6->sin6_port);
2242
2243 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2244 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2245 &sock_in6->sin6_addr.in6_u);
2246 conn->local_port = ntohs(sock_in6->sin6_port);
2247 } else {
2248 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2249 sprintf(conn->login_ip, "%pI4",
2250 &sock_in->sin_addr.s_addr);
2251 conn->login_port = ntohs(sock_in->sin_port);
2252
2253 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2254 sprintf(conn->local_ip, "%pI4",
2255 &sock_in->sin_addr.s_addr);
2256 conn->local_port = ntohs(sock_in->sin_port);
2257 }
2258}
2259
2260static int
2261isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2262{
2263 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2264 struct isert_conn *isert_conn;
2265 int max_accept = 0, ret;
2266
2267accept_wait:
2268 ret = wait_event_interruptible(isert_np->np_accept_wq,
2269 !isert_check_accept_queue(isert_np) ||
2270 np->np_thread_state == ISCSI_NP_THREAD_RESET);
2271 if (max_accept > 5)
2272 return -ENODEV;
2273
2274 spin_lock_bh(&np->np_thread_lock);
2275 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2276 spin_unlock_bh(&np->np_thread_lock);
2277 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2278 return -ENODEV;
2279 }
2280 spin_unlock_bh(&np->np_thread_lock);
2281
2282 mutex_lock(&isert_np->np_accept_mutex);
2283 if (list_empty(&isert_np->np_accept_list)) {
2284 mutex_unlock(&isert_np->np_accept_mutex);
2285 max_accept++;
2286 goto accept_wait;
2287 }
2288 isert_conn = list_first_entry(&isert_np->np_accept_list,
2289 struct isert_conn, conn_accept_node);
2290 list_del_init(&isert_conn->conn_accept_node);
2291 mutex_unlock(&isert_np->np_accept_mutex);
2292
2293 conn->context = isert_conn;
2294 isert_conn->conn = conn;
2295 max_accept = 0;
2296
2297 ret = isert_rdma_post_recvl(isert_conn);
2298 if (ret)
2299 return ret;
2300
2301 ret = isert_rdma_accept(isert_conn);
2302 if (ret)
2303 return ret;
2304
2305 isert_set_conn_info(np, conn, isert_conn);
2306
2307 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2308 return 0;
2309}
2310
2311static void
2312isert_free_np(struct iscsi_np *np)
2313{
2314 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2315
2316 rdma_destroy_id(isert_np->np_cm_id);
2317
2318 np->np_context = NULL;
2319 kfree(isert_np);
2320}
2321
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002322static int isert_check_state(struct isert_conn *isert_conn, int state)
2323{
2324 int ret;
2325
2326 mutex_lock(&isert_conn->conn_mutex);
2327 ret = (isert_conn->state == state);
2328 mutex_unlock(&isert_conn->conn_mutex);
2329
2330 return ret;
2331}
2332
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002333static void isert_free_conn(struct iscsi_conn *conn)
2334{
2335 struct isert_conn *isert_conn = conn->context;
2336
2337 pr_debug("isert_free_conn: Starting \n");
2338 /*
2339 * Decrement post_send_buf_count for special case when called
2340 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2341 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002342 mutex_lock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002343 if (isert_conn->logout_posted)
2344 atomic_dec(&isert_conn->post_send_buf_count);
2345
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002346 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2347 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002348 rdma_disconnect(isert_conn->conn_cm_id);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002349 }
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002350 /*
2351 * Only wait for conn_wait_comp_err if the isert_conn made it
2352 * into full feature phase..
2353 */
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002354 if (isert_conn->state == ISER_CONN_UP) {
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002355 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2356 isert_conn->state);
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002357 mutex_unlock(&isert_conn->conn_mutex);
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002358
Nicholas Bellingerb2cb9642013-07-03 03:05:37 -07002359 wait_event(isert_conn->conn_wait_comp_err,
2360 (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2361
2362 wait_event(isert_conn->conn_wait,
2363 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2364
2365 isert_put_conn(isert_conn);
2366 return;
2367 }
2368 if (isert_conn->state == ISER_CONN_INIT) {
2369 mutex_unlock(&isert_conn->conn_mutex);
2370 isert_put_conn(isert_conn);
2371 return;
2372 }
2373 pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2374 isert_conn->state);
2375 mutex_unlock(&isert_conn->conn_mutex);
2376
2377 wait_event(isert_conn->conn_wait,
2378 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002379
2380 isert_put_conn(isert_conn);
2381}
2382
2383static struct iscsit_transport iser_target_transport = {
2384 .name = "IB/iSER",
2385 .transport_type = ISCSI_INFINIBAND,
Nicholas Bellingerd703ce22013-08-17 14:27:56 -07002386 .priv_size = sizeof(struct isert_cmd),
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002387 .owner = THIS_MODULE,
2388 .iscsit_setup_np = isert_setup_np,
2389 .iscsit_accept_np = isert_accept_np,
2390 .iscsit_free_np = isert_free_np,
2391 .iscsit_free_conn = isert_free_conn,
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002392 .iscsit_get_login_rx = isert_get_login_rx,
2393 .iscsit_put_login_tx = isert_put_login_tx,
2394 .iscsit_immediate_queue = isert_immediate_queue,
2395 .iscsit_response_queue = isert_response_queue,
2396 .iscsit_get_dataout = isert_get_dataout,
2397 .iscsit_queue_data_in = isert_put_datain,
2398 .iscsit_queue_status = isert_put_response,
2399};
2400
2401static int __init isert_init(void)
2402{
2403 int ret;
2404
2405 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2406 if (!isert_rx_wq) {
2407 pr_err("Unable to allocate isert_rx_wq\n");
2408 return -ENOMEM;
2409 }
2410
2411 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2412 if (!isert_comp_wq) {
2413 pr_err("Unable to allocate isert_comp_wq\n");
2414 ret = -ENOMEM;
2415 goto destroy_rx_wq;
2416 }
2417
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002418 iscsit_register_transport(&iser_target_transport);
2419 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2420 return 0;
2421
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002422destroy_rx_wq:
2423 destroy_workqueue(isert_rx_wq);
2424 return ret;
2425}
2426
2427static void __exit isert_exit(void)
2428{
Nicholas Bellingerb8d26b32013-03-07 00:56:19 -08002429 destroy_workqueue(isert_comp_wq);
2430 destroy_workqueue(isert_rx_wq);
2431 iscsit_unregister_transport(&iser_target_transport);
2432 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2433}
2434
2435MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2436MODULE_VERSION("0.1");
2437MODULE_AUTHOR("nab@Linux-iSCSI.org");
2438MODULE_LICENSE("GPL");
2439
2440module_init(isert_init);
2441module_exit(isert_exit);