blob: f89b64e278361d72131a0ef3879b6e53c4f883d4 [file] [log] [blame]
Namjae Jeon0626e662021-03-16 13:07:11 +09001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
5 *
6 * Author(s): Long Li <longli@microsoft.com>,
7 * Hyunchul Lee <hyc.lee@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 */
19
20#define SUBMOD_NAME "smb_direct"
21
22#include <linux/kthread.h>
Namjae Jeon0626e662021-03-16 13:07:11 +090023#include <linux/list.h>
24#include <linux/mempool.h>
25#include <linux/highmem.h>
26#include <linux/scatterlist.h>
27#include <rdma/ib_verbs.h>
28#include <rdma/rdma_cm.h>
29#include <rdma/rw.h>
30
31#include "glob.h"
32#include "connection.h"
33#include "smb_common.h"
34#include "smbstatus.h"
Namjae Jeon0626e662021-03-16 13:07:11 +090035#include "transport_rdma.h"
36
Namjae Jeoncb097b32021-12-29 23:02:50 +090037#define SMB_DIRECT_PORT_IWARP 5445
38#define SMB_DIRECT_PORT_INFINIBAND 445
Namjae Jeon0626e662021-03-16 13:07:11 +090039
40#define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
41
42/* SMB_DIRECT negotiation timeout in seconds */
43#define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
44
45#define SMB_DIRECT_MAX_SEND_SGES 8
46#define SMB_DIRECT_MAX_RECV_SGES 1
47
48/*
49 * Default maximum number of RDMA read/write outstanding on this connection
50 * This value is possibly decreased during QP creation on hardware limit
51 */
52#define SMB_DIRECT_CM_INITIATOR_DEPTH 8
53
54/* Maximum number of retries on data transfer operations */
55#define SMB_DIRECT_CM_RETRY 6
56/* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
57#define SMB_DIRECT_CM_RNR_RETRY 0
58
59/*
60 * User configurable initial values per SMB_DIRECT transport connection
Namjae Jeon92239582021-07-16 17:16:11 +090061 * as defined in [MS-SMBD] 3.1.1.1
Namjae Jeon0626e662021-03-16 13:07:11 +090062 * Those may change after a SMB_DIRECT negotiation
63 */
Namjae Jeoncb097b32021-12-29 23:02:50 +090064
65/* Set 445 port to SMB Direct port by default */
66static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
67
Namjae Jeon0626e662021-03-16 13:07:11 +090068/* The local peer's maximum number of credits to grant to the peer */
69static int smb_direct_receive_credit_max = 255;
70
71/* The remote peer's credit request of local peer */
72static int smb_direct_send_credit_target = 255;
73
74/* The maximum single message size can be sent to remote peer */
75static int smb_direct_max_send_size = 8192;
76
77/* The maximum fragmented upper-layer payload receive size supported */
78static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
79
80/* The maximum single-message size which can be received */
81static int smb_direct_max_receive_size = 8192;
82
83static int smb_direct_max_read_write_size = 1024 * 1024;
84
85static int smb_direct_max_outstanding_rw_ops = 8;
86
Hyunchul Lee31928a02021-12-29 23:02:15 +090087static LIST_HEAD(smb_direct_device_list);
88static DEFINE_RWLOCK(smb_direct_device_lock);
89
90struct smb_direct_device {
91 struct ib_device *ib_dev;
92 struct list_head list;
93};
94
Namjae Jeon0626e662021-03-16 13:07:11 +090095static struct smb_direct_listener {
96 struct rdma_cm_id *cm_id;
97} smb_direct_listener;
98
Namjae Jeon0626e662021-03-16 13:07:11 +090099static struct workqueue_struct *smb_direct_wq;
100
101enum smb_direct_status {
102 SMB_DIRECT_CS_NEW = 0,
103 SMB_DIRECT_CS_CONNECTED,
104 SMB_DIRECT_CS_DISCONNECTING,
105 SMB_DIRECT_CS_DISCONNECTED,
106};
107
108struct smb_direct_transport {
109 struct ksmbd_transport transport;
110
111 enum smb_direct_status status;
112 bool full_packet_received;
113 wait_queue_head_t wait_status;
114
115 struct rdma_cm_id *cm_id;
116 struct ib_cq *send_cq;
117 struct ib_cq *recv_cq;
118 struct ib_pd *pd;
119 struct ib_qp *qp;
120
121 int max_send_size;
122 int max_recv_size;
123 int max_fragmented_send_size;
124 int max_fragmented_recv_size;
125 int max_rdma_rw_size;
126
127 spinlock_t reassembly_queue_lock;
128 struct list_head reassembly_queue;
129 int reassembly_data_length;
130 int reassembly_queue_length;
131 int first_entry_offset;
132 wait_queue_head_t wait_reassembly_queue;
133
134 spinlock_t receive_credit_lock;
135 int recv_credits;
136 int count_avail_recvmsg;
137 int recv_credit_max;
138 int recv_credit_target;
139
140 spinlock_t recvmsg_queue_lock;
141 struct list_head recvmsg_queue;
142
143 spinlock_t empty_recvmsg_queue_lock;
144 struct list_head empty_recvmsg_queue;
145
146 int send_credit_target;
147 atomic_t send_credits;
148 spinlock_t lock_new_recv_credits;
149 int new_recv_credits;
150 atomic_t rw_avail_ops;
151
152 wait_queue_head_t wait_send_credits;
153 wait_queue_head_t wait_rw_avail_ops;
154
155 mempool_t *sendmsg_mempool;
156 struct kmem_cache *sendmsg_cache;
157 mempool_t *recvmsg_mempool;
158 struct kmem_cache *recvmsg_cache;
159
160 wait_queue_head_t wait_send_payload_pending;
161 atomic_t send_payload_pending;
162 wait_queue_head_t wait_send_pending;
163 atomic_t send_pending;
164
165 struct delayed_work post_recv_credits_work;
166 struct work_struct send_immediate_work;
167 struct work_struct disconnect_work;
168
169 bool negotiation_requested;
170};
171
172#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
Namjae Jeon0626e662021-03-16 13:07:11 +0900173
174enum {
175 SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
176 SMB_DIRECT_MSG_DATA_TRANSFER
177};
178
179static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
180
181struct smb_direct_send_ctx {
182 struct list_head msg_list;
183 int wr_cnt;
184 bool need_invalidate_rkey;
185 unsigned int remote_key;
186};
187
188struct smb_direct_sendmsg {
189 struct smb_direct_transport *transport;
190 struct ib_send_wr wr;
191 struct list_head list;
192 int num_sge;
193 struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
194 struct ib_cqe cqe;
195 u8 packet[];
196};
197
198struct smb_direct_recvmsg {
199 struct smb_direct_transport *transport;
200 struct list_head list;
201 int type;
202 struct ib_sge sge;
203 struct ib_cqe cqe;
204 bool first_segment;
205 u8 packet[];
206};
207
208struct smb_direct_rdma_rw_msg {
209 struct smb_direct_transport *t;
210 struct ib_cqe cqe;
211 struct completion *completion;
212 struct rdma_rw_ctx rw_ctx;
213 struct sg_table sgt;
214 struct scatterlist sg_list[0];
215};
216
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +0900217static inline int get_buf_page_count(void *buf, int size)
218{
219 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
220 (uintptr_t)buf / PAGE_SIZE;
221}
Namjae Jeon0626e662021-03-16 13:07:11 +0900222
223static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
224static void smb_direct_post_recv_credits(struct work_struct *work);
225static int smb_direct_post_send_data(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900226 struct smb_direct_send_ctx *send_ctx,
227 struct kvec *iov, int niov,
228 int remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900229
Namjae Jeon02d4b4a2021-06-25 13:43:01 +0900230static inline struct smb_direct_transport *
231smb_trans_direct_transfort(struct ksmbd_transport *t)
232{
233 return container_of(t, struct smb_direct_transport, transport);
234}
235
Namjae Jeon0626e662021-03-16 13:07:11 +0900236static inline void
237*smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
238{
239 return (void *)recvmsg->packet;
240}
241
242static inline bool is_receive_credit_post_required(int receive_credits,
Namjae Jeon070fb212021-05-26 17:57:12 +0900243 int avail_recvmsg_count)
Namjae Jeon0626e662021-03-16 13:07:11 +0900244{
245 return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
246 avail_recvmsg_count >= (receive_credits >> 2);
247}
248
249static struct
250smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
251{
252 struct smb_direct_recvmsg *recvmsg = NULL;
253
254 spin_lock(&t->recvmsg_queue_lock);
255 if (!list_empty(&t->recvmsg_queue)) {
256 recvmsg = list_first_entry(&t->recvmsg_queue,
257 struct smb_direct_recvmsg,
258 list);
259 list_del(&recvmsg->list);
260 }
261 spin_unlock(&t->recvmsg_queue_lock);
262 return recvmsg;
263}
264
265static void put_recvmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900266 struct smb_direct_recvmsg *recvmsg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900267{
268 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
Namjae Jeon070fb212021-05-26 17:57:12 +0900269 recvmsg->sge.length, DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900270
271 spin_lock(&t->recvmsg_queue_lock);
272 list_add(&recvmsg->list, &t->recvmsg_queue);
273 spin_unlock(&t->recvmsg_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900274}
275
276static struct
277smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
278{
279 struct smb_direct_recvmsg *recvmsg = NULL;
280
281 spin_lock(&t->empty_recvmsg_queue_lock);
282 if (!list_empty(&t->empty_recvmsg_queue)) {
Namjae Jeon64b39f42021-03-30 14:25:35 +0900283 recvmsg = list_first_entry(&t->empty_recvmsg_queue,
Namjae Jeon070fb212021-05-26 17:57:12 +0900284 struct smb_direct_recvmsg, list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900285 list_del(&recvmsg->list);
286 }
287 spin_unlock(&t->empty_recvmsg_queue_lock);
288 return recvmsg;
289}
290
291static void put_empty_recvmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900292 struct smb_direct_recvmsg *recvmsg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900293{
294 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
Namjae Jeon070fb212021-05-26 17:57:12 +0900295 recvmsg->sge.length, DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900296
297 spin_lock(&t->empty_recvmsg_queue_lock);
298 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
299 spin_unlock(&t->empty_recvmsg_queue_lock);
300}
301
302static void enqueue_reassembly(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900303 struct smb_direct_recvmsg *recvmsg,
304 int data_length)
Namjae Jeon0626e662021-03-16 13:07:11 +0900305{
306 spin_lock(&t->reassembly_queue_lock);
307 list_add_tail(&recvmsg->list, &t->reassembly_queue);
308 t->reassembly_queue_length++;
309 /*
310 * Make sure reassembly_data_length is updated after list and
311 * reassembly_queue_length are updated. On the dequeue side
312 * reassembly_data_length is checked without a lock to determine
313 * if reassembly_queue_length and list is up to date
314 */
315 virt_wmb();
316 t->reassembly_data_length += data_length;
317 spin_unlock(&t->reassembly_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900318}
319
Namjae Jeon64b39f42021-03-30 14:25:35 +0900320static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
Namjae Jeon0626e662021-03-16 13:07:11 +0900321{
322 if (!list_empty(&t->reassembly_queue))
323 return list_first_entry(&t->reassembly_queue,
324 struct smb_direct_recvmsg, list);
325 else
326 return NULL;
327}
328
329static void smb_direct_disconnect_rdma_work(struct work_struct *work)
330{
331 struct smb_direct_transport *t =
332 container_of(work, struct smb_direct_transport,
333 disconnect_work);
334
335 if (t->status == SMB_DIRECT_CS_CONNECTED) {
336 t->status = SMB_DIRECT_CS_DISCONNECTING;
337 rdma_disconnect(t->cm_id);
338 }
339}
340
341static void
342smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
343{
Hyunchul Lee323b1ea2021-08-12 10:23:08 +0900344 if (t->status == SMB_DIRECT_CS_CONNECTED)
345 queue_work(smb_direct_wq, &t->disconnect_work);
Namjae Jeon0626e662021-03-16 13:07:11 +0900346}
347
348static void smb_direct_send_immediate_work(struct work_struct *work)
349{
350 struct smb_direct_transport *t = container_of(work,
351 struct smb_direct_transport, send_immediate_work);
352
353 if (t->status != SMB_DIRECT_CS_CONNECTED)
354 return;
355
356 smb_direct_post_send_data(t, NULL, NULL, 0, 0);
357}
358
359static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
360{
361 struct smb_direct_transport *t;
362 struct ksmbd_conn *conn;
363
364 t = kzalloc(sizeof(*t), GFP_KERNEL);
365 if (!t)
366 return NULL;
367
368 t->cm_id = cm_id;
369 cm_id->context = t;
370
371 t->status = SMB_DIRECT_CS_NEW;
372 init_waitqueue_head(&t->wait_status);
373
374 spin_lock_init(&t->reassembly_queue_lock);
375 INIT_LIST_HEAD(&t->reassembly_queue);
376 t->reassembly_data_length = 0;
377 t->reassembly_queue_length = 0;
378 init_waitqueue_head(&t->wait_reassembly_queue);
379 init_waitqueue_head(&t->wait_send_credits);
380 init_waitqueue_head(&t->wait_rw_avail_ops);
381
382 spin_lock_init(&t->receive_credit_lock);
383 spin_lock_init(&t->recvmsg_queue_lock);
384 INIT_LIST_HEAD(&t->recvmsg_queue);
385
386 spin_lock_init(&t->empty_recvmsg_queue_lock);
387 INIT_LIST_HEAD(&t->empty_recvmsg_queue);
388
389 init_waitqueue_head(&t->wait_send_payload_pending);
390 atomic_set(&t->send_payload_pending, 0);
391 init_waitqueue_head(&t->wait_send_pending);
392 atomic_set(&t->send_pending, 0);
393
394 spin_lock_init(&t->lock_new_recv_credits);
395
396 INIT_DELAYED_WORK(&t->post_recv_credits_work,
397 smb_direct_post_recv_credits);
398 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
399 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
400
401 conn = ksmbd_conn_alloc();
402 if (!conn)
403 goto err;
404 conn->transport = KSMBD_TRANS(t);
405 KSMBD_TRANS(t)->conn = conn;
406 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
407 return t;
408err:
409 kfree(t);
410 return NULL;
411}
412
413static void free_transport(struct smb_direct_transport *t)
414{
415 struct smb_direct_recvmsg *recvmsg;
416
417 wake_up_interruptible(&t->wait_send_credits);
418
419 ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
420 wait_event(t->wait_send_payload_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +0900421 atomic_read(&t->send_payload_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900422 wait_event(t->wait_send_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +0900423 atomic_read(&t->send_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900424
425 cancel_work_sync(&t->disconnect_work);
426 cancel_delayed_work_sync(&t->post_recv_credits_work);
427 cancel_work_sync(&t->send_immediate_work);
428
429 if (t->qp) {
430 ib_drain_qp(t->qp);
431 ib_destroy_qp(t->qp);
432 }
433
434 ksmbd_debug(RDMA, "drain the reassembly queue\n");
435 do {
436 spin_lock(&t->reassembly_queue_lock);
437 recvmsg = get_first_reassembly(t);
438 if (recvmsg) {
439 list_del(&recvmsg->list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900440 spin_unlock(&t->reassembly_queue_lock);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900441 put_recvmsg(t, recvmsg);
442 } else {
443 spin_unlock(&t->reassembly_queue_lock);
444 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900445 } while (recvmsg);
446 t->reassembly_data_length = 0;
447
448 if (t->send_cq)
449 ib_free_cq(t->send_cq);
450 if (t->recv_cq)
451 ib_free_cq(t->recv_cq);
452 if (t->pd)
453 ib_dealloc_pd(t->pd);
454 if (t->cm_id)
455 rdma_destroy_id(t->cm_id);
456
457 smb_direct_destroy_pools(t);
458 ksmbd_conn_free(KSMBD_TRANS(t)->conn);
459 kfree(t);
460}
461
462static struct smb_direct_sendmsg
463*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
464{
465 struct smb_direct_sendmsg *msg;
466
467 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
468 if (!msg)
469 return ERR_PTR(-ENOMEM);
470 msg->transport = t;
471 INIT_LIST_HEAD(&msg->list);
472 msg->num_sge = 0;
473 return msg;
474}
475
476static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900477 struct smb_direct_sendmsg *msg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900478{
479 int i;
480
481 if (msg->num_sge > 0) {
482 ib_dma_unmap_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900483 msg->sge[0].addr, msg->sge[0].length,
484 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900485 for (i = 1; i < msg->num_sge; i++)
486 ib_dma_unmap_page(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900487 msg->sge[i].addr, msg->sge[i].length,
488 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900489 }
490 mempool_free(msg, t->sendmsg_mempool);
491}
492
493static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
494{
495 switch (recvmsg->type) {
496 case SMB_DIRECT_MSG_DATA_TRANSFER: {
497 struct smb_direct_data_transfer *req =
Namjae Jeon64b39f42021-03-30 14:25:35 +0900498 (struct smb_direct_data_transfer *)recvmsg->packet;
499 struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
Namjae Jeoncb451722021-11-03 08:08:44 +0900500 + le32_to_cpu(req->data_offset));
Namjae Jeon0626e662021-03-16 13:07:11 +0900501 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900502 "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
503 le16_to_cpu(req->credits_granted),
504 le16_to_cpu(req->credits_requested),
505 req->data_length, req->remaining_data_length,
506 hdr->ProtocolId, hdr->Command);
Namjae Jeon0626e662021-03-16 13:07:11 +0900507 break;
508 }
509 case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
510 struct smb_direct_negotiate_req *req =
511 (struct smb_direct_negotiate_req *)recvmsg->packet;
512 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900513 "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
514 le16_to_cpu(req->min_version),
515 le16_to_cpu(req->max_version),
516 le16_to_cpu(req->credits_requested),
517 le32_to_cpu(req->preferred_send_size),
518 le32_to_cpu(req->max_receive_size),
519 le32_to_cpu(req->max_fragmented_size));
Namjae Jeon0626e662021-03-16 13:07:11 +0900520 if (le16_to_cpu(req->min_version) > 0x0100 ||
Namjae Jeon64b39f42021-03-30 14:25:35 +0900521 le16_to_cpu(req->max_version) < 0x0100)
Namjae Jeon0626e662021-03-16 13:07:11 +0900522 return -EOPNOTSUPP;
523 if (le16_to_cpu(req->credits_requested) <= 0 ||
Namjae Jeon64b39f42021-03-30 14:25:35 +0900524 le32_to_cpu(req->max_receive_size) <= 128 ||
525 le32_to_cpu(req->max_fragmented_size) <=
526 128 * 1024)
Namjae Jeon0626e662021-03-16 13:07:11 +0900527 return -ECONNABORTED;
528
529 break;
530 }
531 default:
532 return -EINVAL;
533 }
534 return 0;
535}
536
537static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
538{
539 struct smb_direct_recvmsg *recvmsg;
540 struct smb_direct_transport *t;
541
542 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
543 t = recvmsg->transport;
544
545 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
546 if (wc->status != IB_WC_WR_FLUSH_ERR) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900547 pr_err("Recv error. status='%s (%d)' opcode=%d\n",
548 ib_wc_status_msg(wc->status), wc->status,
549 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900550 smb_direct_disconnect_rdma_connection(t);
551 }
552 put_empty_recvmsg(t, recvmsg);
553 return;
554 }
555
556 ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
Namjae Jeon070fb212021-05-26 17:57:12 +0900557 ib_wc_status_msg(wc->status), wc->status,
558 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900559
560 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
Namjae Jeon070fb212021-05-26 17:57:12 +0900561 recvmsg->sge.length, DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900562
563 switch (recvmsg->type) {
564 case SMB_DIRECT_MSG_NEGOTIATE_REQ:
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900565 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
566 put_empty_recvmsg(t, recvmsg);
567 return;
568 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900569 t->negotiation_requested = true;
570 t->full_packet_received = true;
571 wake_up_interruptible(&t->wait_status);
572 break;
573 case SMB_DIRECT_MSG_DATA_TRANSFER: {
574 struct smb_direct_data_transfer *data_transfer =
575 (struct smb_direct_data_transfer *)recvmsg->packet;
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900576 unsigned int data_length;
Namjae Jeon0626e662021-03-16 13:07:11 +0900577 int avail_recvmsg_count, receive_credits;
578
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900579 if (wc->byte_len <
580 offsetof(struct smb_direct_data_transfer, padding)) {
581 put_empty_recvmsg(t, recvmsg);
582 return;
583 }
584
585 data_length = le32_to_cpu(data_transfer->data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900586 if (data_length) {
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900587 if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
588 (u64)data_length) {
589 put_empty_recvmsg(t, recvmsg);
590 return;
591 }
592
Namjae Jeon0626e662021-03-16 13:07:11 +0900593 if (t->full_packet_received)
594 recvmsg->first_segment = true;
595
596 if (le32_to_cpu(data_transfer->remaining_data_length))
597 t->full_packet_received = false;
598 else
599 t->full_packet_received = true;
600
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900601 enqueue_reassembly(t, recvmsg, (int)data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900602 wake_up_interruptible(&t->wait_reassembly_queue);
603
604 spin_lock(&t->receive_credit_lock);
605 receive_credits = --(t->recv_credits);
606 avail_recvmsg_count = t->count_avail_recvmsg;
607 spin_unlock(&t->receive_credit_lock);
608 } else {
609 put_empty_recvmsg(t, recvmsg);
610
611 spin_lock(&t->receive_credit_lock);
612 receive_credits = --(t->recv_credits);
613 avail_recvmsg_count = ++(t->count_avail_recvmsg);
614 spin_unlock(&t->receive_credit_lock);
615 }
616
617 t->recv_credit_target =
618 le16_to_cpu(data_transfer->credits_requested);
619 atomic_add(le16_to_cpu(data_transfer->credits_granted),
Namjae Jeon070fb212021-05-26 17:57:12 +0900620 &t->send_credits);
Namjae Jeon0626e662021-03-16 13:07:11 +0900621
622 if (le16_to_cpu(data_transfer->flags) &
Namjae Jeon070fb212021-05-26 17:57:12 +0900623 SMB_DIRECT_RESPONSE_REQUESTED)
Namjae Jeon0626e662021-03-16 13:07:11 +0900624 queue_work(smb_direct_wq, &t->send_immediate_work);
625
626 if (atomic_read(&t->send_credits) > 0)
627 wake_up_interruptible(&t->wait_send_credits);
628
Namjae Jeon64b39f42021-03-30 14:25:35 +0900629 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
Namjae Jeon0626e662021-03-16 13:07:11 +0900630 mod_delayed_work(smb_direct_wq,
Namjae Jeon070fb212021-05-26 17:57:12 +0900631 &t->post_recv_credits_work, 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900632 break;
633 }
634 default:
635 break;
636 }
637}
638
639static int smb_direct_post_recv(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900640 struct smb_direct_recvmsg *recvmsg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900641{
642 struct ib_recv_wr wr;
643 int ret;
644
645 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900646 recvmsg->packet, t->max_recv_size,
647 DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900648 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
649 if (ret)
650 return ret;
651 recvmsg->sge.length = t->max_recv_size;
652 recvmsg->sge.lkey = t->pd->local_dma_lkey;
653 recvmsg->cqe.done = recv_done;
654
655 wr.wr_cqe = &recvmsg->cqe;
656 wr.next = NULL;
657 wr.sg_list = &recvmsg->sge;
658 wr.num_sge = 1;
659
660 ret = ib_post_recv(t->qp, &wr, NULL);
661 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900662 pr_err("Can't post recv: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +0900663 ib_dma_unmap_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900664 recvmsg->sge.addr, recvmsg->sge.length,
665 DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900666 smb_direct_disconnect_rdma_connection(t);
667 return ret;
668 }
669 return ret;
670}
671
672static int smb_direct_read(struct ksmbd_transport *t, char *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +0900673 unsigned int size)
Namjae Jeon0626e662021-03-16 13:07:11 +0900674{
675 struct smb_direct_recvmsg *recvmsg;
676 struct smb_direct_data_transfer *data_transfer;
677 int to_copy, to_read, data_read, offset;
678 u32 data_length, remaining_data_length, data_offset;
679 int rc;
Namjae Jeon02d4b4a2021-06-25 13:43:01 +0900680 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +0900681
682again:
683 if (st->status != SMB_DIRECT_CS_CONNECTED) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900684 pr_err("disconnected\n");
Namjae Jeon0626e662021-03-16 13:07:11 +0900685 return -ENOTCONN;
686 }
687
688 /*
689 * No need to hold the reassembly queue lock all the time as we are
690 * the only one reading from the front of the queue. The transport
691 * may add more entries to the back of the queue at the same time
692 */
693 if (st->reassembly_data_length >= size) {
694 int queue_length;
695 int queue_removed = 0;
696
697 /*
698 * Need to make sure reassembly_data_length is read before
699 * reading reassembly_queue_length and calling
700 * get_first_reassembly. This call is lock free
701 * as we never read at the end of the queue which are being
702 * updated in SOFTIRQ as more data is received
703 */
704 virt_rmb();
705 queue_length = st->reassembly_queue_length;
706 data_read = 0;
707 to_read = size;
708 offset = st->first_entry_offset;
709 while (data_read < size) {
710 recvmsg = get_first_reassembly(st);
711 data_transfer = smb_direct_recvmsg_payload(recvmsg);
712 data_length = le32_to_cpu(data_transfer->data_length);
713 remaining_data_length =
Namjae Jeon64b39f42021-03-30 14:25:35 +0900714 le32_to_cpu(data_transfer->remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900715 data_offset = le32_to_cpu(data_transfer->data_offset);
716
717 /*
718 * The upper layer expects RFC1002 length at the
719 * beginning of the payload. Return it to indicate
720 * the total length of the packet. This minimize the
721 * change to upper layer packet processing logic. This
722 * will be eventually remove when an intermediate
723 * transport layer is added
724 */
725 if (recvmsg->first_segment && size == 4) {
726 unsigned int rfc1002_len =
727 data_length + remaining_data_length;
728 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
729 data_read = 4;
730 recvmsg->first_segment = false;
731 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900732 "returning rfc1002 length %d\n",
733 rfc1002_len);
Namjae Jeon0626e662021-03-16 13:07:11 +0900734 goto read_rfc1002_done;
735 }
736
737 to_copy = min_t(int, data_length - offset, to_read);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900738 memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
Namjae Jeon070fb212021-05-26 17:57:12 +0900739 to_copy);
Namjae Jeon0626e662021-03-16 13:07:11 +0900740
741 /* move on to the next buffer? */
742 if (to_copy == data_length - offset) {
743 queue_length--;
744 /*
745 * No need to lock if we are not at the
746 * end of the queue
747 */
Namjae Jeon64b39f42021-03-30 14:25:35 +0900748 if (queue_length) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900749 list_del(&recvmsg->list);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900750 } else {
751 spin_lock_irq(&st->reassembly_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900752 list_del(&recvmsg->list);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900753 spin_unlock_irq(&st->reassembly_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900754 }
755 queue_removed++;
756 put_recvmsg(st, recvmsg);
757 offset = 0;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900758 } else {
Namjae Jeon0626e662021-03-16 13:07:11 +0900759 offset += to_copy;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900760 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900761
762 to_read -= to_copy;
763 data_read += to_copy;
764 }
765
766 spin_lock_irq(&st->reassembly_queue_lock);
767 st->reassembly_data_length -= data_read;
768 st->reassembly_queue_length -= queue_removed;
769 spin_unlock_irq(&st->reassembly_queue_lock);
770
771 spin_lock(&st->receive_credit_lock);
772 st->count_avail_recvmsg += queue_removed;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900773 if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900774 spin_unlock(&st->receive_credit_lock);
775 mod_delayed_work(smb_direct_wq,
Namjae Jeon070fb212021-05-26 17:57:12 +0900776 &st->post_recv_credits_work, 0);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900777 } else {
Namjae Jeon0626e662021-03-16 13:07:11 +0900778 spin_unlock(&st->receive_credit_lock);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900779 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900780
781 st->first_entry_offset = offset;
782 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900783 "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
784 data_read, st->reassembly_data_length,
785 st->first_entry_offset);
Namjae Jeon0626e662021-03-16 13:07:11 +0900786read_rfc1002_done:
787 return data_read;
788 }
789
790 ksmbd_debug(RDMA, "wait_event on more data\n");
Namjae Jeon64b39f42021-03-30 14:25:35 +0900791 rc = wait_event_interruptible(st->wait_reassembly_queue,
Namjae Jeon070fb212021-05-26 17:57:12 +0900792 st->reassembly_data_length >= size ||
793 st->status != SMB_DIRECT_CS_CONNECTED);
Namjae Jeon0626e662021-03-16 13:07:11 +0900794 if (rc)
795 return -EINTR;
796
797 goto again;
798}
799
800static void smb_direct_post_recv_credits(struct work_struct *work)
801{
802 struct smb_direct_transport *t = container_of(work,
803 struct smb_direct_transport, post_recv_credits_work.work);
804 struct smb_direct_recvmsg *recvmsg;
805 int receive_credits, credits = 0;
806 int ret;
807 int use_free = 1;
808
809 spin_lock(&t->receive_credit_lock);
810 receive_credits = t->recv_credits;
811 spin_unlock(&t->receive_credit_lock);
812
813 if (receive_credits < t->recv_credit_target) {
814 while (true) {
815 if (use_free)
816 recvmsg = get_free_recvmsg(t);
817 else
818 recvmsg = get_empty_recvmsg(t);
819 if (!recvmsg) {
820 if (use_free) {
821 use_free = 0;
822 continue;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900823 } else {
Namjae Jeon0626e662021-03-16 13:07:11 +0900824 break;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900825 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900826 }
827
828 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
829 recvmsg->first_segment = false;
830
831 ret = smb_direct_post_recv(t, recvmsg);
832 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900833 pr_err("Can't post recv: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +0900834 put_recvmsg(t, recvmsg);
835 break;
836 }
837 credits++;
838 }
839 }
840
841 spin_lock(&t->receive_credit_lock);
842 t->recv_credits += credits;
843 t->count_avail_recvmsg -= credits;
844 spin_unlock(&t->receive_credit_lock);
845
846 spin_lock(&t->lock_new_recv_credits);
847 t->new_recv_credits += credits;
848 spin_unlock(&t->lock_new_recv_credits);
849
850 if (credits)
851 queue_work(smb_direct_wq, &t->send_immediate_work);
852}
853
854static void send_done(struct ib_cq *cq, struct ib_wc *wc)
855{
856 struct smb_direct_sendmsg *sendmsg, *sibling;
857 struct smb_direct_transport *t;
858 struct list_head *pos, *prev, *end;
859
860 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
861 t = sendmsg->transport;
862
863 ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
Namjae Jeon070fb212021-05-26 17:57:12 +0900864 ib_wc_status_msg(wc->status), wc->status,
865 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900866
867 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900868 pr_err("Send error. status='%s (%d)', opcode=%d\n",
869 ib_wc_status_msg(wc->status), wc->status,
870 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900871 smb_direct_disconnect_rdma_connection(t);
872 }
873
874 if (sendmsg->num_sge > 1) {
875 if (atomic_dec_and_test(&t->send_payload_pending))
876 wake_up(&t->wait_send_payload_pending);
877 } else {
878 if (atomic_dec_and_test(&t->send_pending))
879 wake_up(&t->wait_send_pending);
880 }
881
882 /* iterate and free the list of messages in reverse. the list's head
883 * is invalid.
884 */
885 for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
Namjae Jeon070fb212021-05-26 17:57:12 +0900886 prev != end; pos = prev, prev = prev->prev) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900887 sibling = container_of(pos, struct smb_direct_sendmsg, list);
888 smb_direct_free_sendmsg(t, sibling);
889 }
890
891 sibling = container_of(pos, struct smb_direct_sendmsg, list);
892 smb_direct_free_sendmsg(t, sibling);
893}
894
895static int manage_credits_prior_sending(struct smb_direct_transport *t)
896{
897 int new_credits;
898
899 spin_lock(&t->lock_new_recv_credits);
900 new_credits = t->new_recv_credits;
901 t->new_recv_credits = 0;
902 spin_unlock(&t->lock_new_recv_credits);
903
904 return new_credits;
905}
906
907static int smb_direct_post_send(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900908 struct ib_send_wr *wr)
Namjae Jeon0626e662021-03-16 13:07:11 +0900909{
910 int ret;
911
912 if (wr->num_sge > 1)
913 atomic_inc(&t->send_payload_pending);
914 else
915 atomic_inc(&t->send_pending);
916
917 ret = ib_post_send(t->qp, wr, NULL);
918 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900919 pr_err("failed to post send: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +0900920 if (wr->num_sge > 1) {
921 if (atomic_dec_and_test(&t->send_payload_pending))
922 wake_up(&t->wait_send_payload_pending);
923 } else {
924 if (atomic_dec_and_test(&t->send_pending))
925 wake_up(&t->wait_send_pending);
926 }
927 smb_direct_disconnect_rdma_connection(t);
928 }
929 return ret;
930}
931
932static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900933 struct smb_direct_send_ctx *send_ctx,
934 bool need_invalidate_rkey,
935 unsigned int remote_key)
Namjae Jeon0626e662021-03-16 13:07:11 +0900936{
937 INIT_LIST_HEAD(&send_ctx->msg_list);
938 send_ctx->wr_cnt = 0;
939 send_ctx->need_invalidate_rkey = need_invalidate_rkey;
940 send_ctx->remote_key = remote_key;
941}
942
943static int smb_direct_flush_send_list(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900944 struct smb_direct_send_ctx *send_ctx,
945 bool is_last)
Namjae Jeon0626e662021-03-16 13:07:11 +0900946{
947 struct smb_direct_sendmsg *first, *last;
948 int ret;
949
950 if (list_empty(&send_ctx->msg_list))
951 return 0;
952
953 first = list_first_entry(&send_ctx->msg_list,
Namjae Jeon070fb212021-05-26 17:57:12 +0900954 struct smb_direct_sendmsg,
955 list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900956 last = list_last_entry(&send_ctx->msg_list,
Namjae Jeon070fb212021-05-26 17:57:12 +0900957 struct smb_direct_sendmsg,
958 list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900959
960 last->wr.send_flags = IB_SEND_SIGNALED;
961 last->wr.wr_cqe = &last->cqe;
962 if (is_last && send_ctx->need_invalidate_rkey) {
963 last->wr.opcode = IB_WR_SEND_WITH_INV;
964 last->wr.ex.invalidate_rkey = send_ctx->remote_key;
965 }
966
967 ret = smb_direct_post_send(t, &first->wr);
968 if (!ret) {
969 smb_direct_send_ctx_init(t, send_ctx,
Namjae Jeon070fb212021-05-26 17:57:12 +0900970 send_ctx->need_invalidate_rkey,
971 send_ctx->remote_key);
Namjae Jeon0626e662021-03-16 13:07:11 +0900972 } else {
973 atomic_add(send_ctx->wr_cnt, &t->send_credits);
974 wake_up(&t->wait_send_credits);
975 list_for_each_entry_safe(first, last, &send_ctx->msg_list,
Namjae Jeon070fb212021-05-26 17:57:12 +0900976 list) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900977 smb_direct_free_sendmsg(t, first);
978 }
979 }
980 return ret;
981}
982
983static int wait_for_credits(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900984 wait_queue_head_t *waitq, atomic_t *credits)
Namjae Jeon0626e662021-03-16 13:07:11 +0900985{
986 int ret;
987
988 do {
989 if (atomic_dec_return(credits) >= 0)
990 return 0;
991
992 atomic_inc(credits);
993 ret = wait_event_interruptible(*waitq,
Namjae Jeon070fb212021-05-26 17:57:12 +0900994 atomic_read(credits) > 0 ||
995 t->status != SMB_DIRECT_CS_CONNECTED);
Namjae Jeon0626e662021-03-16 13:07:11 +0900996
997 if (t->status != SMB_DIRECT_CS_CONNECTED)
998 return -ENOTCONN;
999 else if (ret < 0)
1000 return ret;
1001 } while (true);
1002}
1003
1004static int wait_for_send_credits(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001005 struct smb_direct_send_ctx *send_ctx)
Namjae Jeon0626e662021-03-16 13:07:11 +09001006{
1007 int ret;
1008
Namjae Jeon070fb212021-05-26 17:57:12 +09001009 if (send_ctx &&
1010 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
Namjae Jeon0626e662021-03-16 13:07:11 +09001011 ret = smb_direct_flush_send_list(t, send_ctx, false);
1012 if (ret)
1013 return ret;
1014 }
1015
1016 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
1017}
1018
1019static int smb_direct_create_header(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001020 int size, int remaining_data_length,
1021 struct smb_direct_sendmsg **sendmsg_out)
Namjae Jeon0626e662021-03-16 13:07:11 +09001022{
1023 struct smb_direct_sendmsg *sendmsg;
1024 struct smb_direct_data_transfer *packet;
1025 int header_length;
1026 int ret;
1027
1028 sendmsg = smb_direct_alloc_sendmsg(t);
Dan Carpenter8ef32962021-03-18 16:09:37 +03001029 if (IS_ERR(sendmsg))
1030 return PTR_ERR(sendmsg);
Namjae Jeon0626e662021-03-16 13:07:11 +09001031
1032 /* Fill in the packet header */
1033 packet = (struct smb_direct_data_transfer *)sendmsg->packet;
1034 packet->credits_requested = cpu_to_le16(t->send_credit_target);
1035 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1036
1037 packet->flags = 0;
1038 packet->reserved = 0;
1039 if (!size)
1040 packet->data_offset = 0;
1041 else
1042 packet->data_offset = cpu_to_le32(24);
1043 packet->data_length = cpu_to_le32(size);
1044 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
1045 packet->padding = 0;
1046
1047 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +09001048 "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
1049 le16_to_cpu(packet->credits_requested),
1050 le16_to_cpu(packet->credits_granted),
1051 le32_to_cpu(packet->data_offset),
1052 le32_to_cpu(packet->data_length),
1053 le32_to_cpu(packet->remaining_data_length));
Namjae Jeon0626e662021-03-16 13:07:11 +09001054
1055 /* Map the packet to DMA */
1056 header_length = sizeof(struct smb_direct_data_transfer);
1057 /* If this is a packet without payload, don't send padding */
1058 if (!size)
1059 header_length =
1060 offsetof(struct smb_direct_data_transfer, padding);
1061
1062 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1063 (void *)packet,
1064 header_length,
1065 DMA_TO_DEVICE);
1066 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1067 if (ret) {
1068 smb_direct_free_sendmsg(t, sendmsg);
1069 return ret;
1070 }
1071
1072 sendmsg->num_sge = 1;
1073 sendmsg->sge[0].length = header_length;
1074 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1075
1076 *sendmsg_out = sendmsg;
1077 return 0;
1078}
1079
Namjae Jeon64b39f42021-03-30 14:25:35 +09001080static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
Namjae Jeon0626e662021-03-16 13:07:11 +09001081{
1082 bool high = is_vmalloc_addr(buf);
1083 struct page *page;
1084 int offset, len;
1085 int i = 0;
1086
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +09001087 if (nentries < get_buf_page_count(buf, size))
Namjae Jeon0626e662021-03-16 13:07:11 +09001088 return -EINVAL;
1089
1090 offset = offset_in_page(buf);
1091 buf -= offset;
1092 while (size > 0) {
1093 len = min_t(int, PAGE_SIZE - offset, size);
1094 if (high)
1095 page = vmalloc_to_page(buf);
1096 else
1097 page = kmap_to_page(buf);
1098
1099 if (!sg_list)
1100 return -EINVAL;
1101 sg_set_page(sg_list, page, len, offset);
1102 sg_list = sg_next(sg_list);
1103
1104 buf += PAGE_SIZE;
1105 size -= len;
1106 offset = 0;
1107 i++;
1108 }
1109 return i;
1110}
1111
1112static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
Namjae Jeon070fb212021-05-26 17:57:12 +09001113 struct scatterlist *sg_list, int nentries,
1114 enum dma_data_direction dir)
Namjae Jeon0626e662021-03-16 13:07:11 +09001115{
1116 int npages;
1117
1118 npages = get_sg_list(buf, size, sg_list, nentries);
1119 if (npages <= 0)
1120 return -EINVAL;
1121 return ib_dma_map_sg(device, sg_list, npages, dir);
1122}
1123
1124static int post_sendmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001125 struct smb_direct_send_ctx *send_ctx,
1126 struct smb_direct_sendmsg *msg)
Namjae Jeon0626e662021-03-16 13:07:11 +09001127{
1128 int i;
1129
1130 for (i = 0; i < msg->num_sge; i++)
1131 ib_dma_sync_single_for_device(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001132 msg->sge[i].addr, msg->sge[i].length,
1133 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001134
1135 msg->cqe.done = send_done;
1136 msg->wr.opcode = IB_WR_SEND;
1137 msg->wr.sg_list = &msg->sge[0];
1138 msg->wr.num_sge = msg->num_sge;
1139 msg->wr.next = NULL;
1140
1141 if (send_ctx) {
1142 msg->wr.wr_cqe = NULL;
1143 msg->wr.send_flags = 0;
1144 if (!list_empty(&send_ctx->msg_list)) {
1145 struct smb_direct_sendmsg *last;
1146
1147 last = list_last_entry(&send_ctx->msg_list,
1148 struct smb_direct_sendmsg,
1149 list);
1150 last->wr.next = &msg->wr;
1151 }
1152 list_add_tail(&msg->list, &send_ctx->msg_list);
1153 send_ctx->wr_cnt++;
1154 return 0;
1155 }
1156
1157 msg->wr.wr_cqe = &msg->cqe;
1158 msg->wr.send_flags = IB_SEND_SIGNALED;
1159 return smb_direct_post_send(t, &msg->wr);
1160}
1161
1162static int smb_direct_post_send_data(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001163 struct smb_direct_send_ctx *send_ctx,
1164 struct kvec *iov, int niov,
1165 int remaining_data_length)
Namjae Jeon0626e662021-03-16 13:07:11 +09001166{
1167 int i, j, ret;
1168 struct smb_direct_sendmsg *msg;
1169 int data_length;
Namjae Jeon64b39f42021-03-30 14:25:35 +09001170 struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
Namjae Jeon0626e662021-03-16 13:07:11 +09001171
1172 ret = wait_for_send_credits(t, send_ctx);
1173 if (ret)
1174 return ret;
1175
1176 data_length = 0;
1177 for (i = 0; i < niov; i++)
1178 data_length += iov[i].iov_len;
1179
1180 ret = smb_direct_create_header(t, data_length, remaining_data_length,
1181 &msg);
1182 if (ret) {
1183 atomic_inc(&t->send_credits);
1184 return ret;
1185 }
1186
1187 for (i = 0; i < niov; i++) {
1188 struct ib_sge *sge;
1189 int sg_cnt;
1190
Namjae Jeon64b39f42021-03-30 14:25:35 +09001191 sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
Namjae Jeon0626e662021-03-16 13:07:11 +09001192 sg_cnt = get_mapped_sg_list(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001193 iov[i].iov_base, iov[i].iov_len,
1194 sg, SMB_DIRECT_MAX_SEND_SGES - 1,
1195 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001196 if (sg_cnt <= 0) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001197 pr_err("failed to map buffer\n");
Namjae Jeonbc3fcc92021-03-19 13:51:15 +09001198 ret = -ENOMEM;
Namjae Jeon0626e662021-03-16 13:07:11 +09001199 goto err;
Hyunchul Lee72d6cbb2021-08-30 13:27:43 +09001200 } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001201 pr_err("buffer not fitted into sges\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001202 ret = -E2BIG;
1203 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
1204 DMA_TO_DEVICE);
1205 goto err;
1206 }
1207
1208 for (j = 0; j < sg_cnt; j++) {
1209 sge = &msg->sge[msg->num_sge];
1210 sge->addr = sg_dma_address(&sg[j]);
1211 sge->length = sg_dma_len(&sg[j]);
1212 sge->lkey = t->pd->local_dma_lkey;
1213 msg->num_sge++;
1214 }
1215 }
1216
1217 ret = post_sendmsg(t, send_ctx, msg);
1218 if (ret)
1219 goto err;
1220 return 0;
1221err:
1222 smb_direct_free_sendmsg(t, msg);
1223 atomic_inc(&t->send_credits);
1224 return ret;
1225}
1226
1227static int smb_direct_writev(struct ksmbd_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001228 struct kvec *iov, int niovs, int buflen,
1229 bool need_invalidate, unsigned int remote_key)
Namjae Jeon0626e662021-03-16 13:07:11 +09001230{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001231 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +09001232 int remaining_data_length;
1233 int start, i, j;
1234 int max_iov_size = st->max_send_size -
1235 sizeof(struct smb_direct_data_transfer);
1236 int ret;
1237 struct kvec vec;
1238 struct smb_direct_send_ctx send_ctx;
1239
Namjae Jeonb8fc94c2021-07-07 14:56:44 +09001240 if (st->status != SMB_DIRECT_CS_CONNECTED)
1241 return -ENOTCONN;
Namjae Jeon0626e662021-03-16 13:07:11 +09001242
1243 //FIXME: skip RFC1002 header..
1244 buflen -= 4;
1245 iov[0].iov_base += 4;
1246 iov[0].iov_len -= 4;
1247
1248 remaining_data_length = buflen;
1249 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
1250
1251 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
1252 start = i = 0;
1253 buflen = 0;
1254 while (true) {
1255 buflen += iov[i].iov_len;
1256 if (buflen > max_iov_size) {
1257 if (i > start) {
1258 remaining_data_length -=
Namjae Jeon64b39f42021-03-30 14:25:35 +09001259 (buflen - iov[i].iov_len);
Namjae Jeon0626e662021-03-16 13:07:11 +09001260 ret = smb_direct_post_send_data(st, &send_ctx,
Namjae Jeon070fb212021-05-26 17:57:12 +09001261 &iov[start], i - start,
1262 remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +09001263 if (ret)
1264 goto done;
1265 } else {
1266 /* iov[start] is too big, break it */
Namjae Jeon64b39f42021-03-30 14:25:35 +09001267 int nvec = (buflen + max_iov_size - 1) /
Namjae Jeon0626e662021-03-16 13:07:11 +09001268 max_iov_size;
1269
1270 for (j = 0; j < nvec; j++) {
1271 vec.iov_base =
1272 (char *)iov[start].iov_base +
Namjae Jeon64b39f42021-03-30 14:25:35 +09001273 j * max_iov_size;
Namjae Jeon0626e662021-03-16 13:07:11 +09001274 vec.iov_len =
1275 min_t(int, max_iov_size,
Namjae Jeon070fb212021-05-26 17:57:12 +09001276 buflen - max_iov_size * j);
Namjae Jeon0626e662021-03-16 13:07:11 +09001277 remaining_data_length -= vec.iov_len;
Namjae Jeon070fb212021-05-26 17:57:12 +09001278 ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
1279 remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +09001280 if (ret)
1281 goto done;
1282 }
1283 i++;
1284 if (i == niovs)
1285 break;
1286 }
1287 start = i;
1288 buflen = 0;
1289 } else {
1290 i++;
1291 if (i == niovs) {
1292 /* send out all remaining vecs */
1293 remaining_data_length -= buflen;
1294 ret = smb_direct_post_send_data(st, &send_ctx,
Namjae Jeon070fb212021-05-26 17:57:12 +09001295 &iov[start], i - start,
1296 remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +09001297 if (ret)
1298 goto done;
1299 break;
1300 }
1301 }
1302 }
1303
1304done:
1305 ret = smb_direct_flush_send_list(st, &send_ctx, true);
1306
1307 /*
1308 * As an optimization, we don't wait for individual I/O to finish
1309 * before sending the next one.
1310 * Send them all and wait for pending send count to get to 0
1311 * that means all the I/Os have been out and we are good to return
1312 */
1313
1314 wait_event(st->wait_send_payload_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +09001315 atomic_read(&st->send_payload_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +09001316 return ret;
1317}
1318
1319static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
Namjae Jeon070fb212021-05-26 17:57:12 +09001320 enum dma_data_direction dir)
Namjae Jeon0626e662021-03-16 13:07:11 +09001321{
1322 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
Namjae Jeon070fb212021-05-26 17:57:12 +09001323 struct smb_direct_rdma_rw_msg, cqe);
Namjae Jeon0626e662021-03-16 13:07:11 +09001324 struct smb_direct_transport *t = msg->t;
1325
1326 if (wc->status != IB_WC_SUCCESS) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001327 pr_err("read/write error. opcode = %d, status = %s(%d)\n",
1328 wc->opcode, ib_wc_status_msg(wc->status), wc->status);
Namjae Jeon0626e662021-03-16 13:07:11 +09001329 smb_direct_disconnect_rdma_connection(t);
1330 }
1331
1332 if (atomic_inc_return(&t->rw_avail_ops) > 0)
1333 wake_up(&t->wait_rw_avail_ops);
1334
1335 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
Namjae Jeon070fb212021-05-26 17:57:12 +09001336 msg->sg_list, msg->sgt.nents, dir);
Namjae Jeon0626e662021-03-16 13:07:11 +09001337 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1338 complete(msg->completion);
1339 kfree(msg);
1340}
1341
1342static void read_done(struct ib_cq *cq, struct ib_wc *wc)
1343{
1344 read_write_done(cq, wc, DMA_FROM_DEVICE);
1345}
1346
1347static void write_done(struct ib_cq *cq, struct ib_wc *wc)
1348{
1349 read_write_done(cq, wc, DMA_TO_DEVICE);
1350}
1351
1352static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +09001353 int buf_len, u32 remote_key, u64 remote_offset,
1354 u32 remote_len, bool is_read)
Namjae Jeon0626e662021-03-16 13:07:11 +09001355{
1356 struct smb_direct_rdma_rw_msg *msg;
1357 int ret;
1358 DECLARE_COMPLETION_ONSTACK(completion);
1359 struct ib_send_wr *first_wr = NULL;
1360
1361 ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
1362 if (ret < 0)
1363 return ret;
1364
1365 /* TODO: mempool */
1366 msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
Namjae Jeon070fb212021-05-26 17:57:12 +09001367 sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001368 if (!msg) {
1369 atomic_inc(&t->rw_avail_ops);
1370 return -ENOMEM;
1371 }
1372
1373 msg->sgt.sgl = &msg->sg_list[0];
1374 ret = sg_alloc_table_chained(&msg->sgt,
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +09001375 get_buf_page_count(buf, buf_len),
Namjae Jeon070fb212021-05-26 17:57:12 +09001376 msg->sg_list, SG_CHUNK_SIZE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001377 if (ret) {
1378 atomic_inc(&t->rw_avail_ops);
1379 kfree(msg);
1380 return -ENOMEM;
1381 }
1382
1383 ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
1384 if (ret <= 0) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001385 pr_err("failed to get pages\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001386 goto err;
1387 }
1388
1389 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +09001390 msg->sg_list, get_buf_page_count(buf, buf_len),
Namjae Jeon070fb212021-05-26 17:57:12 +09001391 0, remote_offset, remote_key,
1392 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001393 if (ret < 0) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001394 pr_err("failed to init rdma_rw_ctx: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001395 goto err;
1396 }
1397
1398 msg->t = t;
1399 msg->cqe.done = is_read ? read_done : write_done;
1400 msg->completion = &completion;
1401 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
Namjae Jeon070fb212021-05-26 17:57:12 +09001402 &msg->cqe, NULL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001403
1404 ret = ib_post_send(t->qp, first_wr, NULL);
1405 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001406 pr_err("failed to post send wr: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001407 goto err;
1408 }
1409
1410 wait_for_completion(&completion);
1411 return 0;
1412
1413err:
1414 atomic_inc(&t->rw_avail_ops);
1415 if (first_wr)
1416 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
Namjae Jeon070fb212021-05-26 17:57:12 +09001417 msg->sg_list, msg->sgt.nents,
1418 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001419 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1420 kfree(msg);
1421 return ret;
Namjae Jeon0626e662021-03-16 13:07:11 +09001422}
1423
Namjae Jeon64b39f42021-03-30 14:25:35 +09001424static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +09001425 unsigned int buflen, u32 remote_key,
1426 u64 remote_offset, u32 remote_len)
Namjae Jeon0626e662021-03-16 13:07:11 +09001427{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001428 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
Namjae Jeon070fb212021-05-26 17:57:12 +09001429 remote_key, remote_offset,
1430 remote_len, false);
Namjae Jeon0626e662021-03-16 13:07:11 +09001431}
1432
Namjae Jeon64b39f42021-03-30 14:25:35 +09001433static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +09001434 unsigned int buflen, u32 remote_key,
1435 u64 remote_offset, u32 remote_len)
Namjae Jeon0626e662021-03-16 13:07:11 +09001436{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001437 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
Namjae Jeon070fb212021-05-26 17:57:12 +09001438 remote_key, remote_offset,
1439 remote_len, true);
Namjae Jeon0626e662021-03-16 13:07:11 +09001440}
1441
1442static void smb_direct_disconnect(struct ksmbd_transport *t)
1443{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001444 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +09001445
1446 ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
1447
Hyunchul Lee323b1ea2021-08-12 10:23:08 +09001448 smb_direct_disconnect_rdma_work(&st->disconnect_work);
Namjae Jeon0626e662021-03-16 13:07:11 +09001449 wait_event_interruptible(st->wait_status,
Namjae Jeon070fb212021-05-26 17:57:12 +09001450 st->status == SMB_DIRECT_CS_DISCONNECTED);
Namjae Jeon0626e662021-03-16 13:07:11 +09001451 free_transport(st);
1452}
1453
1454static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
Namjae Jeon070fb212021-05-26 17:57:12 +09001455 struct rdma_cm_event *event)
Namjae Jeon0626e662021-03-16 13:07:11 +09001456{
1457 struct smb_direct_transport *t = cm_id->context;
1458
1459 ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09001460 cm_id, rdma_event_msg(event->event), event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001461
1462 switch (event->event) {
1463 case RDMA_CM_EVENT_ESTABLISHED: {
1464 t->status = SMB_DIRECT_CS_CONNECTED;
1465 wake_up_interruptible(&t->wait_status);
1466 break;
1467 }
1468 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1469 case RDMA_CM_EVENT_DISCONNECTED: {
1470 t->status = SMB_DIRECT_CS_DISCONNECTED;
1471 wake_up_interruptible(&t->wait_status);
1472 wake_up_interruptible(&t->wait_reassembly_queue);
1473 wake_up(&t->wait_send_credits);
1474 break;
1475 }
1476 case RDMA_CM_EVENT_CONNECT_ERROR: {
1477 t->status = SMB_DIRECT_CS_DISCONNECTED;
1478 wake_up_interruptible(&t->wait_status);
1479 break;
1480 }
1481 default:
Namjae Jeonbde16942021-06-28 15:23:19 +09001482 pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
1483 cm_id, rdma_event_msg(event->event),
1484 event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001485 break;
1486 }
1487 return 0;
1488}
1489
1490static void smb_direct_qpair_handler(struct ib_event *event, void *context)
1491{
1492 struct smb_direct_transport *t = context;
1493
1494 ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09001495 t->cm_id, ib_event_msg(event->event), event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001496
1497 switch (event->event) {
1498 case IB_EVENT_CQ_ERR:
1499 case IB_EVENT_QP_FATAL:
1500 smb_direct_disconnect_rdma_connection(t);
1501 break;
1502 default:
1503 break;
1504 }
1505}
1506
1507static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001508 int failed)
Namjae Jeon0626e662021-03-16 13:07:11 +09001509{
1510 struct smb_direct_sendmsg *sendmsg;
1511 struct smb_direct_negotiate_resp *resp;
1512 int ret;
1513
1514 sendmsg = smb_direct_alloc_sendmsg(t);
1515 if (IS_ERR(sendmsg))
1516 return -ENOMEM;
1517
1518 resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
1519 if (failed) {
1520 memset(resp, 0, sizeof(*resp));
1521 resp->min_version = cpu_to_le16(0x0100);
1522 resp->max_version = cpu_to_le16(0x0100);
1523 resp->status = STATUS_NOT_SUPPORTED;
1524 } else {
1525 resp->status = STATUS_SUCCESS;
1526 resp->min_version = SMB_DIRECT_VERSION_LE;
1527 resp->max_version = SMB_DIRECT_VERSION_LE;
1528 resp->negotiated_version = SMB_DIRECT_VERSION_LE;
1529 resp->reserved = 0;
1530 resp->credits_requested =
1531 cpu_to_le16(t->send_credit_target);
Namjae Jeon64b39f42021-03-30 14:25:35 +09001532 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
Namjae Jeon0626e662021-03-16 13:07:11 +09001533 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
1534 resp->preferred_send_size = cpu_to_le32(t->max_send_size);
1535 resp->max_receive_size = cpu_to_le32(t->max_recv_size);
1536 resp->max_fragmented_size =
1537 cpu_to_le32(t->max_fragmented_recv_size);
1538 }
1539
1540 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001541 (void *)resp, sizeof(*resp),
1542 DMA_TO_DEVICE);
1543 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
Namjae Jeon0626e662021-03-16 13:07:11 +09001544 if (ret) {
1545 smb_direct_free_sendmsg(t, sendmsg);
1546 return ret;
1547 }
1548
1549 sendmsg->num_sge = 1;
1550 sendmsg->sge[0].length = sizeof(*resp);
1551 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1552
1553 ret = post_sendmsg(t, NULL, sendmsg);
1554 if (ret) {
1555 smb_direct_free_sendmsg(t, sendmsg);
1556 return ret;
1557 }
1558
1559 wait_event(t->wait_send_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +09001560 atomic_read(&t->send_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +09001561 return 0;
1562}
1563
1564static int smb_direct_accept_client(struct smb_direct_transport *t)
1565{
1566 struct rdma_conn_param conn_param;
1567 struct ib_port_immutable port_immutable;
1568 u32 ird_ord_hdr[2];
1569 int ret;
1570
1571 memset(&conn_param, 0, sizeof(conn_param));
Namjae Jeon070fb212021-05-26 17:57:12 +09001572 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
1573 SMB_DIRECT_CM_INITIATOR_DEPTH);
Namjae Jeon0626e662021-03-16 13:07:11 +09001574 conn_param.responder_resources = 0;
1575
1576 t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001577 t->cm_id->port_num,
1578 &port_immutable);
Namjae Jeon0626e662021-03-16 13:07:11 +09001579 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1580 ird_ord_hdr[0] = conn_param.responder_resources;
1581 ird_ord_hdr[1] = 1;
1582 conn_param.private_data = ird_ord_hdr;
1583 conn_param.private_data_len = sizeof(ird_ord_hdr);
1584 } else {
1585 conn_param.private_data = NULL;
1586 conn_param.private_data_len = 0;
1587 }
1588 conn_param.retry_count = SMB_DIRECT_CM_RETRY;
1589 conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
1590 conn_param.flow_control = 0;
1591
1592 ret = rdma_accept(t->cm_id, &conn_param);
1593 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001594 pr_err("error at rdma_accept: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001595 return ret;
1596 }
1597
1598 wait_event_interruptible(t->wait_status,
1599 t->status != SMB_DIRECT_CS_NEW);
1600 if (t->status != SMB_DIRECT_CS_CONNECTED)
1601 return -ENOTCONN;
1602 return 0;
1603}
1604
1605static int smb_direct_negotiate(struct smb_direct_transport *t)
1606{
1607 int ret;
1608 struct smb_direct_recvmsg *recvmsg;
1609 struct smb_direct_negotiate_req *req;
1610
1611 recvmsg = get_free_recvmsg(t);
1612 if (!recvmsg)
1613 return -ENOMEM;
1614 recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
1615
1616 ret = smb_direct_post_recv(t, recvmsg);
1617 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001618 pr_err("Can't post recv: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001619 goto out;
1620 }
1621
1622 t->negotiation_requested = false;
1623 ret = smb_direct_accept_client(t);
1624 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001625 pr_err("Can't accept client\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001626 goto out;
1627 }
1628
1629 smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
1630
1631 ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
1632 ret = wait_event_interruptible_timeout(t->wait_status,
Namjae Jeon070fb212021-05-26 17:57:12 +09001633 t->negotiation_requested ||
1634 t->status == SMB_DIRECT_CS_DISCONNECTED,
1635 SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
Namjae Jeon0626e662021-03-16 13:07:11 +09001636 if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) {
1637 ret = ret < 0 ? ret : -ETIMEDOUT;
1638 goto out;
1639 }
1640
1641 ret = smb_direct_check_recvmsg(recvmsg);
1642 if (ret == -ECONNABORTED)
1643 goto out;
1644
1645 req = (struct smb_direct_negotiate_req *)recvmsg->packet;
1646 t->max_recv_size = min_t(int, t->max_recv_size,
Namjae Jeon070fb212021-05-26 17:57:12 +09001647 le32_to_cpu(req->preferred_send_size));
Namjae Jeon0626e662021-03-16 13:07:11 +09001648 t->max_send_size = min_t(int, t->max_send_size,
Namjae Jeon070fb212021-05-26 17:57:12 +09001649 le32_to_cpu(req->max_receive_size));
Namjae Jeon0626e662021-03-16 13:07:11 +09001650 t->max_fragmented_send_size =
1651 le32_to_cpu(req->max_fragmented_size);
1652
1653 ret = smb_direct_send_negotiate_response(t, ret);
1654out:
1655 if (recvmsg)
1656 put_recvmsg(t, recvmsg);
1657 return ret;
1658}
1659
1660static int smb_direct_init_params(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001661 struct ib_qp_cap *cap)
Namjae Jeon0626e662021-03-16 13:07:11 +09001662{
1663 struct ib_device *device = t->cm_id->device;
1664 int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
1665
1666 /* need 2 more sge. because a SMB_DIRECT header will be mapped,
1667 * and maybe a send buffer could be not page aligned.
1668 */
1669 t->max_send_size = smb_direct_max_send_size;
1670 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
1671 if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001672 pr_err("max_send_size %d is too large\n", t->max_send_size);
Namjae Jeon0626e662021-03-16 13:07:11 +09001673 return -EINVAL;
1674 }
1675
1676 /*
1677 * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
1678 * read/writes. HCA guarantees at least max_send_sge of sges for
1679 * a RDMA read/write work request, and if memory registration is used,
1680 * we need reg_mr, local_inv wrs for each read/write.
1681 */
1682 t->max_rdma_rw_size = smb_direct_max_read_write_size;
1683 max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1684 max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
1685 max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
1686 max_pages) * 2;
1687 max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
1688
1689 max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
1690 if (max_send_wrs > device->attrs.max_cqe ||
Namjae Jeon64b39f42021-03-30 14:25:35 +09001691 max_send_wrs > device->attrs.max_qp_wr) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001692 pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
1693 smb_direct_send_credit_target,
1694 smb_direct_max_outstanding_rw_ops);
1695 pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
1696 device->attrs.max_cqe, device->attrs.max_qp_wr);
Namjae Jeon0626e662021-03-16 13:07:11 +09001697 return -EINVAL;
1698 }
1699
1700 if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
1701 smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001702 pr_err("consider lowering receive_credit_max = %d\n",
1703 smb_direct_receive_credit_max);
1704 pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1705 device->attrs.max_cqe, device->attrs.max_qp_wr);
Namjae Jeon0626e662021-03-16 13:07:11 +09001706 return -EINVAL;
1707 }
1708
1709 if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001710 pr_err("warning: device max_send_sge = %d too small\n",
1711 device->attrs.max_send_sge);
Namjae Jeon0626e662021-03-16 13:07:11 +09001712 return -EINVAL;
1713 }
1714 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001715 pr_err("warning: device max_recv_sge = %d too small\n",
1716 device->attrs.max_recv_sge);
Namjae Jeon0626e662021-03-16 13:07:11 +09001717 return -EINVAL;
1718 }
1719
1720 t->recv_credits = 0;
1721 t->count_avail_recvmsg = 0;
1722
1723 t->recv_credit_max = smb_direct_receive_credit_max;
1724 t->recv_credit_target = 10;
1725 t->new_recv_credits = 0;
1726
1727 t->send_credit_target = smb_direct_send_credit_target;
1728 atomic_set(&t->send_credits, 0);
1729 atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
1730
1731 t->max_send_size = smb_direct_max_send_size;
1732 t->max_recv_size = smb_direct_max_receive_size;
1733 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
1734
1735 cap->max_send_wr = max_send_wrs;
1736 cap->max_recv_wr = t->recv_credit_max;
1737 cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
1738 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
1739 cap->max_inline_data = 0;
1740 cap->max_rdma_ctxs = 0;
1741 return 0;
1742}
1743
1744static void smb_direct_destroy_pools(struct smb_direct_transport *t)
1745{
1746 struct smb_direct_recvmsg *recvmsg;
1747
1748 while ((recvmsg = get_free_recvmsg(t)))
1749 mempool_free(recvmsg, t->recvmsg_mempool);
1750 while ((recvmsg = get_empty_recvmsg(t)))
1751 mempool_free(recvmsg, t->recvmsg_mempool);
1752
1753 mempool_destroy(t->recvmsg_mempool);
1754 t->recvmsg_mempool = NULL;
1755
1756 kmem_cache_destroy(t->recvmsg_cache);
1757 t->recvmsg_cache = NULL;
1758
1759 mempool_destroy(t->sendmsg_mempool);
1760 t->sendmsg_mempool = NULL;
1761
1762 kmem_cache_destroy(t->sendmsg_cache);
1763 t->sendmsg_cache = NULL;
1764}
1765
1766static int smb_direct_create_pools(struct smb_direct_transport *t)
1767{
1768 char name[80];
1769 int i;
1770 struct smb_direct_recvmsg *recvmsg;
1771
1772 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
1773 t->sendmsg_cache = kmem_cache_create(name,
Namjae Jeon070fb212021-05-26 17:57:12 +09001774 sizeof(struct smb_direct_sendmsg) +
1775 sizeof(struct smb_direct_negotiate_resp),
1776 0, SLAB_HWCACHE_ALIGN, NULL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001777 if (!t->sendmsg_cache)
1778 return -ENOMEM;
1779
1780 t->sendmsg_mempool = mempool_create(t->send_credit_target,
Namjae Jeon070fb212021-05-26 17:57:12 +09001781 mempool_alloc_slab, mempool_free_slab,
1782 t->sendmsg_cache);
Namjae Jeon0626e662021-03-16 13:07:11 +09001783 if (!t->sendmsg_mempool)
1784 goto err;
1785
1786 snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
1787 t->recvmsg_cache = kmem_cache_create(name,
Namjae Jeon070fb212021-05-26 17:57:12 +09001788 sizeof(struct smb_direct_recvmsg) +
1789 t->max_recv_size,
1790 0, SLAB_HWCACHE_ALIGN, NULL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001791 if (!t->recvmsg_cache)
1792 goto err;
1793
1794 t->recvmsg_mempool =
1795 mempool_create(t->recv_credit_max, mempool_alloc_slab,
Namjae Jeon070fb212021-05-26 17:57:12 +09001796 mempool_free_slab, t->recvmsg_cache);
Namjae Jeon0626e662021-03-16 13:07:11 +09001797 if (!t->recvmsg_mempool)
1798 goto err;
1799
1800 INIT_LIST_HEAD(&t->recvmsg_queue);
1801
1802 for (i = 0; i < t->recv_credit_max; i++) {
1803 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
1804 if (!recvmsg)
1805 goto err;
1806 recvmsg->transport = t;
1807 list_add(&recvmsg->list, &t->recvmsg_queue);
1808 }
1809 t->count_avail_recvmsg = t->recv_credit_max;
1810
1811 return 0;
1812err:
1813 smb_direct_destroy_pools(t);
1814 return -ENOMEM;
1815}
1816
1817static int smb_direct_create_qpair(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001818 struct ib_qp_cap *cap)
Namjae Jeon0626e662021-03-16 13:07:11 +09001819{
1820 int ret;
1821 struct ib_qp_init_attr qp_attr;
1822
1823 t->pd = ib_alloc_pd(t->cm_id->device, 0);
1824 if (IS_ERR(t->pd)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001825 pr_err("Can't create RDMA PD\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001826 ret = PTR_ERR(t->pd);
1827 t->pd = NULL;
1828 return ret;
1829 }
1830
1831 t->send_cq = ib_alloc_cq(t->cm_id->device, t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001832 t->send_credit_target, 0, IB_POLL_WORKQUEUE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001833 if (IS_ERR(t->send_cq)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001834 pr_err("Can't create RDMA send CQ\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001835 ret = PTR_ERR(t->send_cq);
1836 t->send_cq = NULL;
1837 goto err;
1838 }
1839
1840 t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001841 cap->max_send_wr + cap->max_rdma_ctxs,
1842 0, IB_POLL_WORKQUEUE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001843 if (IS_ERR(t->recv_cq)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001844 pr_err("Can't create RDMA recv CQ\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001845 ret = PTR_ERR(t->recv_cq);
1846 t->recv_cq = NULL;
1847 goto err;
1848 }
1849
1850 memset(&qp_attr, 0, sizeof(qp_attr));
1851 qp_attr.event_handler = smb_direct_qpair_handler;
1852 qp_attr.qp_context = t;
1853 qp_attr.cap = *cap;
1854 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1855 qp_attr.qp_type = IB_QPT_RC;
1856 qp_attr.send_cq = t->send_cq;
1857 qp_attr.recv_cq = t->recv_cq;
1858 qp_attr.port_num = ~0;
1859
1860 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
1861 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001862 pr_err("Can't create RDMA QP: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001863 goto err;
1864 }
1865
1866 t->qp = t->cm_id->qp;
1867 t->cm_id->event_handler = smb_direct_cm_handler;
1868
1869 return 0;
1870err:
1871 if (t->qp) {
1872 ib_destroy_qp(t->qp);
1873 t->qp = NULL;
1874 }
1875 if (t->recv_cq) {
1876 ib_destroy_cq(t->recv_cq);
1877 t->recv_cq = NULL;
1878 }
1879 if (t->send_cq) {
1880 ib_destroy_cq(t->send_cq);
1881 t->send_cq = NULL;
1882 }
1883 if (t->pd) {
1884 ib_dealloc_pd(t->pd);
1885 t->pd = NULL;
1886 }
1887 return ret;
1888}
1889
1890static int smb_direct_prepare(struct ksmbd_transport *t)
1891{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001892 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +09001893 int ret;
1894 struct ib_qp_cap qp_cap;
1895
1896 ret = smb_direct_init_params(st, &qp_cap);
1897 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001898 pr_err("Can't configure RDMA parameters\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001899 return ret;
1900 }
1901
1902 ret = smb_direct_create_pools(st);
1903 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001904 pr_err("Can't init RDMA pool: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001905 return ret;
1906 }
1907
1908 ret = smb_direct_create_qpair(st, &qp_cap);
1909 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001910 pr_err("Can't accept RDMA client: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001911 return ret;
1912 }
1913
1914 ret = smb_direct_negotiate(st);
1915 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001916 pr_err("Can't negotiate: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001917 return ret;
1918 }
1919
1920 st->status = SMB_DIRECT_CS_CONNECTED;
1921 return 0;
1922}
1923
1924static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
1925{
1926 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
1927 return false;
1928 if (attrs->max_fast_reg_page_list_len == 0)
1929 return false;
1930 return true;
1931}
1932
1933static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
1934{
1935 struct smb_direct_transport *t;
1936
1937 if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
1938 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +09001939 "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
1940 new_cm_id->device->attrs.device_cap_flags);
Namjae Jeon0626e662021-03-16 13:07:11 +09001941 return -EPROTONOSUPPORT;
1942 }
1943
1944 t = alloc_transport(new_cm_id);
1945 if (!t)
1946 return -ENOMEM;
1947
1948 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
Namjae Jeon070fb212021-05-26 17:57:12 +09001949 KSMBD_TRANS(t)->conn, "ksmbd:r%u",
Namjae Jeoncb097b32021-12-29 23:02:50 +09001950 smb_direct_port);
Namjae Jeon0626e662021-03-16 13:07:11 +09001951 if (IS_ERR(KSMBD_TRANS(t)->handler)) {
1952 int ret = PTR_ERR(KSMBD_TRANS(t)->handler);
1953
Namjae Jeonbde16942021-06-28 15:23:19 +09001954 pr_err("Can't start thread\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001955 free_transport(t);
1956 return ret;
1957 }
1958
1959 return 0;
1960}
1961
1962static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
Namjae Jeon070fb212021-05-26 17:57:12 +09001963 struct rdma_cm_event *event)
Namjae Jeon0626e662021-03-16 13:07:11 +09001964{
1965 switch (event->event) {
1966 case RDMA_CM_EVENT_CONNECT_REQUEST: {
1967 int ret = smb_direct_handle_connect_request(cm_id);
1968
1969 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001970 pr_err("Can't create transport: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001971 return ret;
1972 }
1973
1974 ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09001975 cm_id);
Namjae Jeon0626e662021-03-16 13:07:11 +09001976 break;
1977 }
1978 default:
Namjae Jeonbde16942021-06-28 15:23:19 +09001979 pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
1980 cm_id, rdma_event_msg(event->event), event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001981 break;
1982 }
1983 return 0;
1984}
1985
1986static int smb_direct_listen(int port)
1987{
1988 int ret;
1989 struct rdma_cm_id *cm_id;
1990 struct sockaddr_in sin = {
1991 .sin_family = AF_INET,
1992 .sin_addr.s_addr = htonl(INADDR_ANY),
1993 .sin_port = htons(port),
1994 };
1995
1996 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
Namjae Jeon070fb212021-05-26 17:57:12 +09001997 &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
Namjae Jeon0626e662021-03-16 13:07:11 +09001998 if (IS_ERR(cm_id)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001999 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
Namjae Jeon0626e662021-03-16 13:07:11 +09002000 return PTR_ERR(cm_id);
2001 }
2002
2003 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2004 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09002005 pr_err("Can't bind: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002006 goto err;
2007 }
2008
2009 smb_direct_listener.cm_id = cm_id;
2010
2011 ret = rdma_listen(cm_id, 10);
2012 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09002013 pr_err("Can't listen: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002014 goto err;
2015 }
2016 return 0;
2017err:
2018 smb_direct_listener.cm_id = NULL;
2019 rdma_destroy_id(cm_id);
2020 return ret;
2021}
2022
Hyunchul Lee31928a02021-12-29 23:02:15 +09002023static int smb_direct_ib_client_add(struct ib_device *ib_dev)
2024{
2025 struct smb_direct_device *smb_dev;
2026
Namjae Jeoncb097b32021-12-29 23:02:50 +09002027 /* Set 5445 port if device type is iWARP(No IB) */
2028 if (ib_dev->node_type != RDMA_NODE_IB_CA)
2029 smb_direct_port = SMB_DIRECT_PORT_IWARP;
2030
Hyunchul Lee31928a02021-12-29 23:02:15 +09002031 if (!ib_dev->ops.get_netdev ||
2032 !rdma_frwr_is_supported(&ib_dev->attrs))
2033 return 0;
2034
2035 smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
2036 if (!smb_dev)
2037 return -ENOMEM;
2038 smb_dev->ib_dev = ib_dev;
2039
2040 write_lock(&smb_direct_device_lock);
2041 list_add(&smb_dev->list, &smb_direct_device_list);
2042 write_unlock(&smb_direct_device_lock);
2043
2044 ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
2045 return 0;
2046}
2047
2048static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
2049 void *client_data)
2050{
2051 struct smb_direct_device *smb_dev, *tmp;
2052
2053 write_lock(&smb_direct_device_lock);
2054 list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
2055 if (smb_dev->ib_dev == ib_dev) {
2056 list_del(&smb_dev->list);
2057 kfree(smb_dev);
2058 break;
2059 }
2060 }
2061 write_unlock(&smb_direct_device_lock);
2062}
2063
2064static struct ib_client smb_direct_ib_client = {
2065 .name = "ksmbd_smb_direct_ib",
2066 .add = smb_direct_ib_client_add,
2067 .remove = smb_direct_ib_client_remove,
2068};
2069
Namjae Jeon0626e662021-03-16 13:07:11 +09002070int ksmbd_rdma_init(void)
2071{
2072 int ret;
2073
2074 smb_direct_listener.cm_id = NULL;
2075
Hyunchul Lee31928a02021-12-29 23:02:15 +09002076 ret = ib_register_client(&smb_direct_ib_client);
2077 if (ret) {
2078 pr_err("failed to ib_register_client\n");
2079 return ret;
2080 }
2081
Namjae Jeon0626e662021-03-16 13:07:11 +09002082 /* When a client is running out of send credits, the credits are
2083 * granted by the server's sending a packet using this queue.
2084 * This avoids the situation that a clients cannot send packets
2085 * for lack of credits
2086 */
2087 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
Namjae Jeon070fb212021-05-26 17:57:12 +09002088 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
Namjae Jeon0626e662021-03-16 13:07:11 +09002089 if (!smb_direct_wq)
2090 return -ENOMEM;
2091
Namjae Jeoncb097b32021-12-29 23:02:50 +09002092 ret = smb_direct_listen(smb_direct_port);
Namjae Jeon0626e662021-03-16 13:07:11 +09002093 if (ret) {
2094 destroy_workqueue(smb_direct_wq);
2095 smb_direct_wq = NULL;
Namjae Jeonbde16942021-06-28 15:23:19 +09002096 pr_err("Can't listen: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002097 return ret;
2098 }
2099
2100 ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09002101 smb_direct_listener.cm_id);
Namjae Jeon0626e662021-03-16 13:07:11 +09002102 return 0;
2103}
2104
Hyunchul Lee31928a02021-12-29 23:02:15 +09002105void ksmbd_rdma_destroy(void)
Namjae Jeon0626e662021-03-16 13:07:11 +09002106{
Hyunchul Lee31928a02021-12-29 23:02:15 +09002107 if (!smb_direct_listener.cm_id)
2108 return;
2109
2110 ib_unregister_client(&smb_direct_ib_client);
2111 rdma_destroy_id(smb_direct_listener.cm_id);
2112
Namjae Jeon0626e662021-03-16 13:07:11 +09002113 smb_direct_listener.cm_id = NULL;
2114
2115 if (smb_direct_wq) {
Namjae Jeon0626e662021-03-16 13:07:11 +09002116 destroy_workqueue(smb_direct_wq);
2117 smb_direct_wq = NULL;
2118 }
Namjae Jeon0626e662021-03-16 13:07:11 +09002119}
2120
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002121bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
2122{
Hyunchul Lee31928a02021-12-29 23:02:15 +09002123 struct smb_direct_device *smb_dev;
2124 int i;
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002125 bool rdma_capable = false;
2126
Hyunchul Lee31928a02021-12-29 23:02:15 +09002127 read_lock(&smb_direct_device_lock);
2128 list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
2129 for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
2130 struct net_device *ndev;
2131
2132 ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
2133 i + 1);
2134 if (!ndev)
2135 continue;
2136
2137 if (ndev == netdev) {
2138 dev_put(ndev);
2139 rdma_capable = true;
2140 goto out;
2141 }
2142 dev_put(ndev);
2143 }
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002144 }
Hyunchul Lee31928a02021-12-29 23:02:15 +09002145out:
2146 read_unlock(&smb_direct_device_lock);
2147
2148 if (rdma_capable == false) {
2149 struct ib_device *ibdev;
2150
2151 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
2152 if (ibdev) {
2153 if (rdma_frwr_is_supported(&ibdev->attrs))
2154 rdma_capable = true;
2155 ib_device_put(ibdev);
2156 }
2157 }
2158
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002159 return rdma_capable;
2160}
2161
Namjae Jeon0626e662021-03-16 13:07:11 +09002162static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
2163 .prepare = smb_direct_prepare,
2164 .disconnect = smb_direct_disconnect,
2165 .writev = smb_direct_writev,
2166 .read = smb_direct_read,
2167 .rdma_read = smb_direct_rdma_read,
2168 .rdma_write = smb_direct_rdma_write,
2169};