blob: ba5a22bc2e6d8d31c286716d128ef4ecb8bc12f2 [file] [log] [blame]
Namjae Jeon0626e662021-03-16 13:07:11 +09001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
5 *
6 * Author(s): Long Li <longli@microsoft.com>,
7 * Hyunchul Lee <hyc.lee@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 */
19
20#define SUBMOD_NAME "smb_direct"
21
22#include <linux/kthread.h>
Namjae Jeon0626e662021-03-16 13:07:11 +090023#include <linux/list.h>
24#include <linux/mempool.h>
25#include <linux/highmem.h>
26#include <linux/scatterlist.h>
27#include <rdma/ib_verbs.h>
28#include <rdma/rdma_cm.h>
29#include <rdma/rw.h>
30
31#include "glob.h"
32#include "connection.h"
33#include "smb_common.h"
34#include "smbstatus.h"
Namjae Jeon0626e662021-03-16 13:07:11 +090035#include "transport_rdma.h"
36
Namjae Jeoncb097b32021-12-29 23:02:50 +090037#define SMB_DIRECT_PORT_IWARP 5445
38#define SMB_DIRECT_PORT_INFINIBAND 445
Namjae Jeon0626e662021-03-16 13:07:11 +090039
40#define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
41
42/* SMB_DIRECT negotiation timeout in seconds */
43#define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
44
45#define SMB_DIRECT_MAX_SEND_SGES 8
46#define SMB_DIRECT_MAX_RECV_SGES 1
47
48/*
49 * Default maximum number of RDMA read/write outstanding on this connection
50 * This value is possibly decreased during QP creation on hardware limit
51 */
52#define SMB_DIRECT_CM_INITIATOR_DEPTH 8
53
54/* Maximum number of retries on data transfer operations */
55#define SMB_DIRECT_CM_RETRY 6
56/* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
57#define SMB_DIRECT_CM_RNR_RETRY 0
58
59/*
60 * User configurable initial values per SMB_DIRECT transport connection
Namjae Jeon92239582021-07-16 17:16:11 +090061 * as defined in [MS-SMBD] 3.1.1.1
Namjae Jeon0626e662021-03-16 13:07:11 +090062 * Those may change after a SMB_DIRECT negotiation
63 */
Namjae Jeoncb097b32021-12-29 23:02:50 +090064
65/* Set 445 port to SMB Direct port by default */
66static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
67
Namjae Jeon0626e662021-03-16 13:07:11 +090068/* The local peer's maximum number of credits to grant to the peer */
69static int smb_direct_receive_credit_max = 255;
70
71/* The remote peer's credit request of local peer */
72static int smb_direct_send_credit_target = 255;
73
74/* The maximum single message size can be sent to remote peer */
75static int smb_direct_max_send_size = 8192;
76
77/* The maximum fragmented upper-layer payload receive size supported */
78static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
79
80/* The maximum single-message size which can be received */
81static int smb_direct_max_receive_size = 8192;
82
Namjae Jeondeae24b2022-01-30 18:28:11 +090083static int smb_direct_max_read_write_size = 524224;
Namjae Jeon0626e662021-03-16 13:07:11 +090084
85static int smb_direct_max_outstanding_rw_ops = 8;
86
Hyunchul Lee31928a02021-12-29 23:02:15 +090087static LIST_HEAD(smb_direct_device_list);
88static DEFINE_RWLOCK(smb_direct_device_lock);
89
90struct smb_direct_device {
91 struct ib_device *ib_dev;
92 struct list_head list;
93};
94
Namjae Jeon0626e662021-03-16 13:07:11 +090095static struct smb_direct_listener {
96 struct rdma_cm_id *cm_id;
97} smb_direct_listener;
98
Namjae Jeon0626e662021-03-16 13:07:11 +090099static struct workqueue_struct *smb_direct_wq;
100
101enum smb_direct_status {
102 SMB_DIRECT_CS_NEW = 0,
103 SMB_DIRECT_CS_CONNECTED,
104 SMB_DIRECT_CS_DISCONNECTING,
105 SMB_DIRECT_CS_DISCONNECTED,
106};
107
108struct smb_direct_transport {
109 struct ksmbd_transport transport;
110
111 enum smb_direct_status status;
112 bool full_packet_received;
113 wait_queue_head_t wait_status;
114
115 struct rdma_cm_id *cm_id;
116 struct ib_cq *send_cq;
117 struct ib_cq *recv_cq;
118 struct ib_pd *pd;
119 struct ib_qp *qp;
120
121 int max_send_size;
122 int max_recv_size;
123 int max_fragmented_send_size;
124 int max_fragmented_recv_size;
125 int max_rdma_rw_size;
126
127 spinlock_t reassembly_queue_lock;
128 struct list_head reassembly_queue;
129 int reassembly_data_length;
130 int reassembly_queue_length;
131 int first_entry_offset;
132 wait_queue_head_t wait_reassembly_queue;
133
134 spinlock_t receive_credit_lock;
135 int recv_credits;
136 int count_avail_recvmsg;
137 int recv_credit_max;
138 int recv_credit_target;
139
140 spinlock_t recvmsg_queue_lock;
141 struct list_head recvmsg_queue;
142
143 spinlock_t empty_recvmsg_queue_lock;
144 struct list_head empty_recvmsg_queue;
145
146 int send_credit_target;
147 atomic_t send_credits;
148 spinlock_t lock_new_recv_credits;
149 int new_recv_credits;
150 atomic_t rw_avail_ops;
151
152 wait_queue_head_t wait_send_credits;
153 wait_queue_head_t wait_rw_avail_ops;
154
155 mempool_t *sendmsg_mempool;
156 struct kmem_cache *sendmsg_cache;
157 mempool_t *recvmsg_mempool;
158 struct kmem_cache *recvmsg_cache;
159
160 wait_queue_head_t wait_send_payload_pending;
161 atomic_t send_payload_pending;
162 wait_queue_head_t wait_send_pending;
163 atomic_t send_pending;
164
165 struct delayed_work post_recv_credits_work;
166 struct work_struct send_immediate_work;
167 struct work_struct disconnect_work;
168
169 bool negotiation_requested;
170};
171
172#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
Namjae Jeon0626e662021-03-16 13:07:11 +0900173
174enum {
175 SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
176 SMB_DIRECT_MSG_DATA_TRANSFER
177};
178
179static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
180
181struct smb_direct_send_ctx {
182 struct list_head msg_list;
183 int wr_cnt;
184 bool need_invalidate_rkey;
185 unsigned int remote_key;
186};
187
188struct smb_direct_sendmsg {
189 struct smb_direct_transport *transport;
190 struct ib_send_wr wr;
191 struct list_head list;
192 int num_sge;
193 struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
194 struct ib_cqe cqe;
195 u8 packet[];
196};
197
198struct smb_direct_recvmsg {
199 struct smb_direct_transport *transport;
200 struct list_head list;
201 int type;
202 struct ib_sge sge;
203 struct ib_cqe cqe;
204 bool first_segment;
205 u8 packet[];
206};
207
208struct smb_direct_rdma_rw_msg {
209 struct smb_direct_transport *t;
210 struct ib_cqe cqe;
211 struct completion *completion;
212 struct rdma_rw_ctx rw_ctx;
213 struct sg_table sgt;
214 struct scatterlist sg_list[0];
215};
216
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +0900217static inline int get_buf_page_count(void *buf, int size)
218{
219 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
220 (uintptr_t)buf / PAGE_SIZE;
221}
Namjae Jeon0626e662021-03-16 13:07:11 +0900222
223static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
224static void smb_direct_post_recv_credits(struct work_struct *work);
225static int smb_direct_post_send_data(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900226 struct smb_direct_send_ctx *send_ctx,
227 struct kvec *iov, int niov,
228 int remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900229
Namjae Jeon02d4b4a2021-06-25 13:43:01 +0900230static inline struct smb_direct_transport *
231smb_trans_direct_transfort(struct ksmbd_transport *t)
232{
233 return container_of(t, struct smb_direct_transport, transport);
234}
235
Namjae Jeon0626e662021-03-16 13:07:11 +0900236static inline void
237*smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg)
238{
239 return (void *)recvmsg->packet;
240}
241
242static inline bool is_receive_credit_post_required(int receive_credits,
Namjae Jeon070fb212021-05-26 17:57:12 +0900243 int avail_recvmsg_count)
Namjae Jeon0626e662021-03-16 13:07:11 +0900244{
245 return receive_credits <= (smb_direct_receive_credit_max >> 3) &&
246 avail_recvmsg_count >= (receive_credits >> 2);
247}
248
249static struct
250smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
251{
252 struct smb_direct_recvmsg *recvmsg = NULL;
253
254 spin_lock(&t->recvmsg_queue_lock);
255 if (!list_empty(&t->recvmsg_queue)) {
256 recvmsg = list_first_entry(&t->recvmsg_queue,
257 struct smb_direct_recvmsg,
258 list);
259 list_del(&recvmsg->list);
260 }
261 spin_unlock(&t->recvmsg_queue_lock);
262 return recvmsg;
263}
264
265static void put_recvmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900266 struct smb_direct_recvmsg *recvmsg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900267{
268 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
Namjae Jeon070fb212021-05-26 17:57:12 +0900269 recvmsg->sge.length, DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900270
271 spin_lock(&t->recvmsg_queue_lock);
272 list_add(&recvmsg->list, &t->recvmsg_queue);
273 spin_unlock(&t->recvmsg_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900274}
275
276static struct
277smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
278{
279 struct smb_direct_recvmsg *recvmsg = NULL;
280
281 spin_lock(&t->empty_recvmsg_queue_lock);
282 if (!list_empty(&t->empty_recvmsg_queue)) {
Namjae Jeon64b39f42021-03-30 14:25:35 +0900283 recvmsg = list_first_entry(&t->empty_recvmsg_queue,
Namjae Jeon070fb212021-05-26 17:57:12 +0900284 struct smb_direct_recvmsg, list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900285 list_del(&recvmsg->list);
286 }
287 spin_unlock(&t->empty_recvmsg_queue_lock);
288 return recvmsg;
289}
290
291static void put_empty_recvmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900292 struct smb_direct_recvmsg *recvmsg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900293{
294 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
Namjae Jeon070fb212021-05-26 17:57:12 +0900295 recvmsg->sge.length, DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900296
297 spin_lock(&t->empty_recvmsg_queue_lock);
298 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
299 spin_unlock(&t->empty_recvmsg_queue_lock);
300}
301
302static void enqueue_reassembly(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900303 struct smb_direct_recvmsg *recvmsg,
304 int data_length)
Namjae Jeon0626e662021-03-16 13:07:11 +0900305{
306 spin_lock(&t->reassembly_queue_lock);
307 list_add_tail(&recvmsg->list, &t->reassembly_queue);
308 t->reassembly_queue_length++;
309 /*
310 * Make sure reassembly_data_length is updated after list and
311 * reassembly_queue_length are updated. On the dequeue side
312 * reassembly_data_length is checked without a lock to determine
313 * if reassembly_queue_length and list is up to date
314 */
315 virt_wmb();
316 t->reassembly_data_length += data_length;
317 spin_unlock(&t->reassembly_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900318}
319
Namjae Jeon64b39f42021-03-30 14:25:35 +0900320static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t)
Namjae Jeon0626e662021-03-16 13:07:11 +0900321{
322 if (!list_empty(&t->reassembly_queue))
323 return list_first_entry(&t->reassembly_queue,
324 struct smb_direct_recvmsg, list);
325 else
326 return NULL;
327}
328
329static void smb_direct_disconnect_rdma_work(struct work_struct *work)
330{
331 struct smb_direct_transport *t =
332 container_of(work, struct smb_direct_transport,
333 disconnect_work);
334
335 if (t->status == SMB_DIRECT_CS_CONNECTED) {
336 t->status = SMB_DIRECT_CS_DISCONNECTING;
337 rdma_disconnect(t->cm_id);
338 }
339}
340
341static void
342smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t)
343{
Hyunchul Lee323b1ea2021-08-12 10:23:08 +0900344 if (t->status == SMB_DIRECT_CS_CONNECTED)
345 queue_work(smb_direct_wq, &t->disconnect_work);
Namjae Jeon0626e662021-03-16 13:07:11 +0900346}
347
348static void smb_direct_send_immediate_work(struct work_struct *work)
349{
350 struct smb_direct_transport *t = container_of(work,
351 struct smb_direct_transport, send_immediate_work);
352
353 if (t->status != SMB_DIRECT_CS_CONNECTED)
354 return;
355
356 smb_direct_post_send_data(t, NULL, NULL, 0, 0);
357}
358
359static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
360{
361 struct smb_direct_transport *t;
362 struct ksmbd_conn *conn;
363
364 t = kzalloc(sizeof(*t), GFP_KERNEL);
365 if (!t)
366 return NULL;
367
368 t->cm_id = cm_id;
369 cm_id->context = t;
370
371 t->status = SMB_DIRECT_CS_NEW;
372 init_waitqueue_head(&t->wait_status);
373
374 spin_lock_init(&t->reassembly_queue_lock);
375 INIT_LIST_HEAD(&t->reassembly_queue);
376 t->reassembly_data_length = 0;
377 t->reassembly_queue_length = 0;
378 init_waitqueue_head(&t->wait_reassembly_queue);
379 init_waitqueue_head(&t->wait_send_credits);
380 init_waitqueue_head(&t->wait_rw_avail_ops);
381
382 spin_lock_init(&t->receive_credit_lock);
383 spin_lock_init(&t->recvmsg_queue_lock);
384 INIT_LIST_HEAD(&t->recvmsg_queue);
385
386 spin_lock_init(&t->empty_recvmsg_queue_lock);
387 INIT_LIST_HEAD(&t->empty_recvmsg_queue);
388
389 init_waitqueue_head(&t->wait_send_payload_pending);
390 atomic_set(&t->send_payload_pending, 0);
391 init_waitqueue_head(&t->wait_send_pending);
392 atomic_set(&t->send_pending, 0);
393
394 spin_lock_init(&t->lock_new_recv_credits);
395
396 INIT_DELAYED_WORK(&t->post_recv_credits_work,
397 smb_direct_post_recv_credits);
398 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work);
399 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work);
400
401 conn = ksmbd_conn_alloc();
402 if (!conn)
403 goto err;
404 conn->transport = KSMBD_TRANS(t);
405 KSMBD_TRANS(t)->conn = conn;
406 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
407 return t;
408err:
409 kfree(t);
410 return NULL;
411}
412
413static void free_transport(struct smb_direct_transport *t)
414{
415 struct smb_direct_recvmsg *recvmsg;
416
417 wake_up_interruptible(&t->wait_send_credits);
418
419 ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n");
420 wait_event(t->wait_send_payload_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +0900421 atomic_read(&t->send_payload_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900422 wait_event(t->wait_send_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +0900423 atomic_read(&t->send_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900424
425 cancel_work_sync(&t->disconnect_work);
426 cancel_delayed_work_sync(&t->post_recv_credits_work);
427 cancel_work_sync(&t->send_immediate_work);
428
429 if (t->qp) {
430 ib_drain_qp(t->qp);
Hyunchul Leec9f18922022-01-07 14:45:30 +0900431 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
Namjae Jeon0626e662021-03-16 13:07:11 +0900432 ib_destroy_qp(t->qp);
433 }
434
435 ksmbd_debug(RDMA, "drain the reassembly queue\n");
436 do {
437 spin_lock(&t->reassembly_queue_lock);
438 recvmsg = get_first_reassembly(t);
439 if (recvmsg) {
440 list_del(&recvmsg->list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900441 spin_unlock(&t->reassembly_queue_lock);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900442 put_recvmsg(t, recvmsg);
443 } else {
444 spin_unlock(&t->reassembly_queue_lock);
445 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900446 } while (recvmsg);
447 t->reassembly_data_length = 0;
448
449 if (t->send_cq)
450 ib_free_cq(t->send_cq);
451 if (t->recv_cq)
452 ib_free_cq(t->recv_cq);
453 if (t->pd)
454 ib_dealloc_pd(t->pd);
455 if (t->cm_id)
456 rdma_destroy_id(t->cm_id);
457
458 smb_direct_destroy_pools(t);
459 ksmbd_conn_free(KSMBD_TRANS(t)->conn);
460 kfree(t);
461}
462
463static struct smb_direct_sendmsg
464*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
465{
466 struct smb_direct_sendmsg *msg;
467
468 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL);
469 if (!msg)
470 return ERR_PTR(-ENOMEM);
471 msg->transport = t;
472 INIT_LIST_HEAD(&msg->list);
473 msg->num_sge = 0;
474 return msg;
475}
476
477static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900478 struct smb_direct_sendmsg *msg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900479{
480 int i;
481
482 if (msg->num_sge > 0) {
483 ib_dma_unmap_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900484 msg->sge[0].addr, msg->sge[0].length,
485 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900486 for (i = 1; i < msg->num_sge; i++)
487 ib_dma_unmap_page(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900488 msg->sge[i].addr, msg->sge[i].length,
489 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900490 }
491 mempool_free(msg, t->sendmsg_mempool);
492}
493
494static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
495{
496 switch (recvmsg->type) {
497 case SMB_DIRECT_MSG_DATA_TRANSFER: {
498 struct smb_direct_data_transfer *req =
Namjae Jeon64b39f42021-03-30 14:25:35 +0900499 (struct smb_direct_data_transfer *)recvmsg->packet;
500 struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
Namjae Jeoncb451722021-11-03 08:08:44 +0900501 + le32_to_cpu(req->data_offset));
Namjae Jeon0626e662021-03-16 13:07:11 +0900502 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900503 "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
504 le16_to_cpu(req->credits_granted),
505 le16_to_cpu(req->credits_requested),
506 req->data_length, req->remaining_data_length,
507 hdr->ProtocolId, hdr->Command);
Namjae Jeon0626e662021-03-16 13:07:11 +0900508 break;
509 }
510 case SMB_DIRECT_MSG_NEGOTIATE_REQ: {
511 struct smb_direct_negotiate_req *req =
512 (struct smb_direct_negotiate_req *)recvmsg->packet;
513 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900514 "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
515 le16_to_cpu(req->min_version),
516 le16_to_cpu(req->max_version),
517 le16_to_cpu(req->credits_requested),
518 le32_to_cpu(req->preferred_send_size),
519 le32_to_cpu(req->max_receive_size),
520 le32_to_cpu(req->max_fragmented_size));
Namjae Jeon0626e662021-03-16 13:07:11 +0900521 if (le16_to_cpu(req->min_version) > 0x0100 ||
Namjae Jeon64b39f42021-03-30 14:25:35 +0900522 le16_to_cpu(req->max_version) < 0x0100)
Namjae Jeon0626e662021-03-16 13:07:11 +0900523 return -EOPNOTSUPP;
524 if (le16_to_cpu(req->credits_requested) <= 0 ||
Namjae Jeon64b39f42021-03-30 14:25:35 +0900525 le32_to_cpu(req->max_receive_size) <= 128 ||
526 le32_to_cpu(req->max_fragmented_size) <=
527 128 * 1024)
Namjae Jeon0626e662021-03-16 13:07:11 +0900528 return -ECONNABORTED;
529
530 break;
531 }
532 default:
533 return -EINVAL;
534 }
535 return 0;
536}
537
538static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
539{
540 struct smb_direct_recvmsg *recvmsg;
541 struct smb_direct_transport *t;
542
543 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
544 t = recvmsg->transport;
545
546 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
547 if (wc->status != IB_WC_WR_FLUSH_ERR) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900548 pr_err("Recv error. status='%s (%d)' opcode=%d\n",
549 ib_wc_status_msg(wc->status), wc->status,
550 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900551 smb_direct_disconnect_rdma_connection(t);
552 }
553 put_empty_recvmsg(t, recvmsg);
554 return;
555 }
556
557 ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
Namjae Jeon070fb212021-05-26 17:57:12 +0900558 ib_wc_status_msg(wc->status), wc->status,
559 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900560
561 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
Namjae Jeon070fb212021-05-26 17:57:12 +0900562 recvmsg->sge.length, DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900563
564 switch (recvmsg->type) {
565 case SMB_DIRECT_MSG_NEGOTIATE_REQ:
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900566 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
567 put_empty_recvmsg(t, recvmsg);
568 return;
569 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900570 t->negotiation_requested = true;
571 t->full_packet_received = true;
Hyunchul Lee99b76502022-01-04 14:56:26 +0900572 enqueue_reassembly(t, recvmsg, 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900573 wake_up_interruptible(&t->wait_status);
574 break;
575 case SMB_DIRECT_MSG_DATA_TRANSFER: {
576 struct smb_direct_data_transfer *data_transfer =
577 (struct smb_direct_data_transfer *)recvmsg->packet;
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900578 unsigned int data_length;
Namjae Jeon0626e662021-03-16 13:07:11 +0900579 int avail_recvmsg_count, receive_credits;
580
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900581 if (wc->byte_len <
582 offsetof(struct smb_direct_data_transfer, padding)) {
583 put_empty_recvmsg(t, recvmsg);
584 return;
585 }
586
587 data_length = le32_to_cpu(data_transfer->data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900588 if (data_length) {
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900589 if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
590 (u64)data_length) {
591 put_empty_recvmsg(t, recvmsg);
592 return;
593 }
594
Namjae Jeon0626e662021-03-16 13:07:11 +0900595 if (t->full_packet_received)
596 recvmsg->first_segment = true;
597
598 if (le32_to_cpu(data_transfer->remaining_data_length))
599 t->full_packet_received = false;
600 else
601 t->full_packet_received = true;
602
Hyunchul Lee2ea086e2021-10-15 06:02:50 +0900603 enqueue_reassembly(t, recvmsg, (int)data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900604 wake_up_interruptible(&t->wait_reassembly_queue);
605
606 spin_lock(&t->receive_credit_lock);
607 receive_credits = --(t->recv_credits);
608 avail_recvmsg_count = t->count_avail_recvmsg;
609 spin_unlock(&t->receive_credit_lock);
610 } else {
611 put_empty_recvmsg(t, recvmsg);
612
613 spin_lock(&t->receive_credit_lock);
614 receive_credits = --(t->recv_credits);
615 avail_recvmsg_count = ++(t->count_avail_recvmsg);
616 spin_unlock(&t->receive_credit_lock);
617 }
618
619 t->recv_credit_target =
620 le16_to_cpu(data_transfer->credits_requested);
621 atomic_add(le16_to_cpu(data_transfer->credits_granted),
Namjae Jeon070fb212021-05-26 17:57:12 +0900622 &t->send_credits);
Namjae Jeon0626e662021-03-16 13:07:11 +0900623
624 if (le16_to_cpu(data_transfer->flags) &
Namjae Jeon070fb212021-05-26 17:57:12 +0900625 SMB_DIRECT_RESPONSE_REQUESTED)
Namjae Jeon0626e662021-03-16 13:07:11 +0900626 queue_work(smb_direct_wq, &t->send_immediate_work);
627
628 if (atomic_read(&t->send_credits) > 0)
629 wake_up_interruptible(&t->wait_send_credits);
630
Namjae Jeon64b39f42021-03-30 14:25:35 +0900631 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
Namjae Jeon0626e662021-03-16 13:07:11 +0900632 mod_delayed_work(smb_direct_wq,
Namjae Jeon070fb212021-05-26 17:57:12 +0900633 &t->post_recv_credits_work, 0);
Namjae Jeon0626e662021-03-16 13:07:11 +0900634 break;
635 }
636 default:
637 break;
638 }
639}
640
641static int smb_direct_post_recv(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900642 struct smb_direct_recvmsg *recvmsg)
Namjae Jeon0626e662021-03-16 13:07:11 +0900643{
644 struct ib_recv_wr wr;
645 int ret;
646
647 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900648 recvmsg->packet, t->max_recv_size,
649 DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900650 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr);
651 if (ret)
652 return ret;
653 recvmsg->sge.length = t->max_recv_size;
654 recvmsg->sge.lkey = t->pd->local_dma_lkey;
655 recvmsg->cqe.done = recv_done;
656
657 wr.wr_cqe = &recvmsg->cqe;
658 wr.next = NULL;
659 wr.sg_list = &recvmsg->sge;
660 wr.num_sge = 1;
661
662 ret = ib_post_recv(t->qp, &wr, NULL);
663 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900664 pr_err("Can't post recv: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +0900665 ib_dma_unmap_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +0900666 recvmsg->sge.addr, recvmsg->sge.length,
667 DMA_FROM_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +0900668 smb_direct_disconnect_rdma_connection(t);
669 return ret;
670 }
671 return ret;
672}
673
674static int smb_direct_read(struct ksmbd_transport *t, char *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +0900675 unsigned int size)
Namjae Jeon0626e662021-03-16 13:07:11 +0900676{
677 struct smb_direct_recvmsg *recvmsg;
678 struct smb_direct_data_transfer *data_transfer;
679 int to_copy, to_read, data_read, offset;
680 u32 data_length, remaining_data_length, data_offset;
681 int rc;
Namjae Jeon02d4b4a2021-06-25 13:43:01 +0900682 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +0900683
684again:
685 if (st->status != SMB_DIRECT_CS_CONNECTED) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900686 pr_err("disconnected\n");
Namjae Jeon0626e662021-03-16 13:07:11 +0900687 return -ENOTCONN;
688 }
689
690 /*
691 * No need to hold the reassembly queue lock all the time as we are
692 * the only one reading from the front of the queue. The transport
693 * may add more entries to the back of the queue at the same time
694 */
695 if (st->reassembly_data_length >= size) {
696 int queue_length;
697 int queue_removed = 0;
698
699 /*
700 * Need to make sure reassembly_data_length is read before
701 * reading reassembly_queue_length and calling
702 * get_first_reassembly. This call is lock free
703 * as we never read at the end of the queue which are being
704 * updated in SOFTIRQ as more data is received
705 */
706 virt_rmb();
707 queue_length = st->reassembly_queue_length;
708 data_read = 0;
709 to_read = size;
710 offset = st->first_entry_offset;
711 while (data_read < size) {
712 recvmsg = get_first_reassembly(st);
713 data_transfer = smb_direct_recvmsg_payload(recvmsg);
714 data_length = le32_to_cpu(data_transfer->data_length);
715 remaining_data_length =
Namjae Jeon64b39f42021-03-30 14:25:35 +0900716 le32_to_cpu(data_transfer->remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +0900717 data_offset = le32_to_cpu(data_transfer->data_offset);
718
719 /*
720 * The upper layer expects RFC1002 length at the
721 * beginning of the payload. Return it to indicate
722 * the total length of the packet. This minimize the
723 * change to upper layer packet processing logic. This
724 * will be eventually remove when an intermediate
725 * transport layer is added
726 */
727 if (recvmsg->first_segment && size == 4) {
728 unsigned int rfc1002_len =
729 data_length + remaining_data_length;
730 *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
731 data_read = 4;
732 recvmsg->first_segment = false;
733 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900734 "returning rfc1002 length %d\n",
735 rfc1002_len);
Namjae Jeon0626e662021-03-16 13:07:11 +0900736 goto read_rfc1002_done;
737 }
738
739 to_copy = min_t(int, data_length - offset, to_read);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900740 memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
Namjae Jeon070fb212021-05-26 17:57:12 +0900741 to_copy);
Namjae Jeon0626e662021-03-16 13:07:11 +0900742
743 /* move on to the next buffer? */
744 if (to_copy == data_length - offset) {
745 queue_length--;
746 /*
747 * No need to lock if we are not at the
748 * end of the queue
749 */
Namjae Jeon64b39f42021-03-30 14:25:35 +0900750 if (queue_length) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900751 list_del(&recvmsg->list);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900752 } else {
753 spin_lock_irq(&st->reassembly_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900754 list_del(&recvmsg->list);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900755 spin_unlock_irq(&st->reassembly_queue_lock);
Namjae Jeon0626e662021-03-16 13:07:11 +0900756 }
757 queue_removed++;
758 put_recvmsg(st, recvmsg);
759 offset = 0;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900760 } else {
Namjae Jeon0626e662021-03-16 13:07:11 +0900761 offset += to_copy;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900762 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900763
764 to_read -= to_copy;
765 data_read += to_copy;
766 }
767
768 spin_lock_irq(&st->reassembly_queue_lock);
769 st->reassembly_data_length -= data_read;
770 st->reassembly_queue_length -= queue_removed;
771 spin_unlock_irq(&st->reassembly_queue_lock);
772
773 spin_lock(&st->receive_credit_lock);
774 st->count_avail_recvmsg += queue_removed;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900775 if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900776 spin_unlock(&st->receive_credit_lock);
777 mod_delayed_work(smb_direct_wq,
Namjae Jeon070fb212021-05-26 17:57:12 +0900778 &st->post_recv_credits_work, 0);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900779 } else {
Namjae Jeon0626e662021-03-16 13:07:11 +0900780 spin_unlock(&st->receive_credit_lock);
Namjae Jeon64b39f42021-03-30 14:25:35 +0900781 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900782
783 st->first_entry_offset = offset;
784 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +0900785 "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
786 data_read, st->reassembly_data_length,
787 st->first_entry_offset);
Namjae Jeon0626e662021-03-16 13:07:11 +0900788read_rfc1002_done:
789 return data_read;
790 }
791
792 ksmbd_debug(RDMA, "wait_event on more data\n");
Namjae Jeon64b39f42021-03-30 14:25:35 +0900793 rc = wait_event_interruptible(st->wait_reassembly_queue,
Namjae Jeon070fb212021-05-26 17:57:12 +0900794 st->reassembly_data_length >= size ||
795 st->status != SMB_DIRECT_CS_CONNECTED);
Namjae Jeon0626e662021-03-16 13:07:11 +0900796 if (rc)
797 return -EINTR;
798
799 goto again;
800}
801
802static void smb_direct_post_recv_credits(struct work_struct *work)
803{
804 struct smb_direct_transport *t = container_of(work,
805 struct smb_direct_transport, post_recv_credits_work.work);
806 struct smb_direct_recvmsg *recvmsg;
807 int receive_credits, credits = 0;
808 int ret;
809 int use_free = 1;
810
811 spin_lock(&t->receive_credit_lock);
812 receive_credits = t->recv_credits;
813 spin_unlock(&t->receive_credit_lock);
814
815 if (receive_credits < t->recv_credit_target) {
816 while (true) {
817 if (use_free)
818 recvmsg = get_free_recvmsg(t);
819 else
820 recvmsg = get_empty_recvmsg(t);
821 if (!recvmsg) {
822 if (use_free) {
823 use_free = 0;
824 continue;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900825 } else {
Namjae Jeon0626e662021-03-16 13:07:11 +0900826 break;
Namjae Jeon64b39f42021-03-30 14:25:35 +0900827 }
Namjae Jeon0626e662021-03-16 13:07:11 +0900828 }
829
830 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
831 recvmsg->first_segment = false;
832
833 ret = smb_direct_post_recv(t, recvmsg);
834 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900835 pr_err("Can't post recv: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +0900836 put_recvmsg(t, recvmsg);
837 break;
838 }
839 credits++;
840 }
841 }
842
843 spin_lock(&t->receive_credit_lock);
844 t->recv_credits += credits;
845 t->count_avail_recvmsg -= credits;
846 spin_unlock(&t->receive_credit_lock);
847
848 spin_lock(&t->lock_new_recv_credits);
849 t->new_recv_credits += credits;
850 spin_unlock(&t->lock_new_recv_credits);
851
852 if (credits)
853 queue_work(smb_direct_wq, &t->send_immediate_work);
854}
855
856static void send_done(struct ib_cq *cq, struct ib_wc *wc)
857{
858 struct smb_direct_sendmsg *sendmsg, *sibling;
859 struct smb_direct_transport *t;
860 struct list_head *pos, *prev, *end;
861
862 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
863 t = sendmsg->transport;
864
865 ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
Namjae Jeon070fb212021-05-26 17:57:12 +0900866 ib_wc_status_msg(wc->status), wc->status,
867 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900868
869 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900870 pr_err("Send error. status='%s (%d)', opcode=%d\n",
871 ib_wc_status_msg(wc->status), wc->status,
872 wc->opcode);
Namjae Jeon0626e662021-03-16 13:07:11 +0900873 smb_direct_disconnect_rdma_connection(t);
874 }
875
876 if (sendmsg->num_sge > 1) {
877 if (atomic_dec_and_test(&t->send_payload_pending))
878 wake_up(&t->wait_send_payload_pending);
879 } else {
880 if (atomic_dec_and_test(&t->send_pending))
881 wake_up(&t->wait_send_pending);
882 }
883
884 /* iterate and free the list of messages in reverse. the list's head
885 * is invalid.
886 */
887 for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
Namjae Jeon070fb212021-05-26 17:57:12 +0900888 prev != end; pos = prev, prev = prev->prev) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900889 sibling = container_of(pos, struct smb_direct_sendmsg, list);
890 smb_direct_free_sendmsg(t, sibling);
891 }
892
893 sibling = container_of(pos, struct smb_direct_sendmsg, list);
894 smb_direct_free_sendmsg(t, sibling);
895}
896
897static int manage_credits_prior_sending(struct smb_direct_transport *t)
898{
899 int new_credits;
900
901 spin_lock(&t->lock_new_recv_credits);
902 new_credits = t->new_recv_credits;
903 t->new_recv_credits = 0;
904 spin_unlock(&t->lock_new_recv_credits);
905
906 return new_credits;
907}
908
909static int smb_direct_post_send(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900910 struct ib_send_wr *wr)
Namjae Jeon0626e662021-03-16 13:07:11 +0900911{
912 int ret;
913
914 if (wr->num_sge > 1)
915 atomic_inc(&t->send_payload_pending);
916 else
917 atomic_inc(&t->send_pending);
918
919 ret = ib_post_send(t->qp, wr, NULL);
920 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +0900921 pr_err("failed to post send: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +0900922 if (wr->num_sge > 1) {
923 if (atomic_dec_and_test(&t->send_payload_pending))
924 wake_up(&t->wait_send_payload_pending);
925 } else {
926 if (atomic_dec_and_test(&t->send_pending))
927 wake_up(&t->wait_send_pending);
928 }
929 smb_direct_disconnect_rdma_connection(t);
930 }
931 return ret;
932}
933
934static void smb_direct_send_ctx_init(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900935 struct smb_direct_send_ctx *send_ctx,
936 bool need_invalidate_rkey,
937 unsigned int remote_key)
Namjae Jeon0626e662021-03-16 13:07:11 +0900938{
939 INIT_LIST_HEAD(&send_ctx->msg_list);
940 send_ctx->wr_cnt = 0;
941 send_ctx->need_invalidate_rkey = need_invalidate_rkey;
942 send_ctx->remote_key = remote_key;
943}
944
945static int smb_direct_flush_send_list(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900946 struct smb_direct_send_ctx *send_ctx,
947 bool is_last)
Namjae Jeon0626e662021-03-16 13:07:11 +0900948{
949 struct smb_direct_sendmsg *first, *last;
950 int ret;
951
952 if (list_empty(&send_ctx->msg_list))
953 return 0;
954
955 first = list_first_entry(&send_ctx->msg_list,
Namjae Jeon070fb212021-05-26 17:57:12 +0900956 struct smb_direct_sendmsg,
957 list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900958 last = list_last_entry(&send_ctx->msg_list,
Namjae Jeon070fb212021-05-26 17:57:12 +0900959 struct smb_direct_sendmsg,
960 list);
Namjae Jeon0626e662021-03-16 13:07:11 +0900961
962 last->wr.send_flags = IB_SEND_SIGNALED;
963 last->wr.wr_cqe = &last->cqe;
964 if (is_last && send_ctx->need_invalidate_rkey) {
965 last->wr.opcode = IB_WR_SEND_WITH_INV;
966 last->wr.ex.invalidate_rkey = send_ctx->remote_key;
967 }
968
969 ret = smb_direct_post_send(t, &first->wr);
970 if (!ret) {
971 smb_direct_send_ctx_init(t, send_ctx,
Namjae Jeon070fb212021-05-26 17:57:12 +0900972 send_ctx->need_invalidate_rkey,
973 send_ctx->remote_key);
Namjae Jeon0626e662021-03-16 13:07:11 +0900974 } else {
975 atomic_add(send_ctx->wr_cnt, &t->send_credits);
976 wake_up(&t->wait_send_credits);
977 list_for_each_entry_safe(first, last, &send_ctx->msg_list,
Namjae Jeon070fb212021-05-26 17:57:12 +0900978 list) {
Namjae Jeon0626e662021-03-16 13:07:11 +0900979 smb_direct_free_sendmsg(t, first);
980 }
981 }
982 return ret;
983}
984
985static int wait_for_credits(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +0900986 wait_queue_head_t *waitq, atomic_t *credits)
Namjae Jeon0626e662021-03-16 13:07:11 +0900987{
988 int ret;
989
990 do {
991 if (atomic_dec_return(credits) >= 0)
992 return 0;
993
994 atomic_inc(credits);
995 ret = wait_event_interruptible(*waitq,
Namjae Jeon070fb212021-05-26 17:57:12 +0900996 atomic_read(credits) > 0 ||
997 t->status != SMB_DIRECT_CS_CONNECTED);
Namjae Jeon0626e662021-03-16 13:07:11 +0900998
999 if (t->status != SMB_DIRECT_CS_CONNECTED)
1000 return -ENOTCONN;
1001 else if (ret < 0)
1002 return ret;
1003 } while (true);
1004}
1005
1006static int wait_for_send_credits(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001007 struct smb_direct_send_ctx *send_ctx)
Namjae Jeon0626e662021-03-16 13:07:11 +09001008{
1009 int ret;
1010
Namjae Jeon070fb212021-05-26 17:57:12 +09001011 if (send_ctx &&
1012 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) {
Namjae Jeon0626e662021-03-16 13:07:11 +09001013 ret = smb_direct_flush_send_list(t, send_ctx, false);
1014 if (ret)
1015 return ret;
1016 }
1017
1018 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits);
1019}
1020
1021static int smb_direct_create_header(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001022 int size, int remaining_data_length,
1023 struct smb_direct_sendmsg **sendmsg_out)
Namjae Jeon0626e662021-03-16 13:07:11 +09001024{
1025 struct smb_direct_sendmsg *sendmsg;
1026 struct smb_direct_data_transfer *packet;
1027 int header_length;
1028 int ret;
1029
1030 sendmsg = smb_direct_alloc_sendmsg(t);
Dan Carpenter8ef32962021-03-18 16:09:37 +03001031 if (IS_ERR(sendmsg))
1032 return PTR_ERR(sendmsg);
Namjae Jeon0626e662021-03-16 13:07:11 +09001033
1034 /* Fill in the packet header */
1035 packet = (struct smb_direct_data_transfer *)sendmsg->packet;
1036 packet->credits_requested = cpu_to_le16(t->send_credit_target);
1037 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
1038
1039 packet->flags = 0;
1040 packet->reserved = 0;
1041 if (!size)
1042 packet->data_offset = 0;
1043 else
1044 packet->data_offset = cpu_to_le32(24);
1045 packet->data_length = cpu_to_le32(size);
1046 packet->remaining_data_length = cpu_to_le32(remaining_data_length);
1047 packet->padding = 0;
1048
1049 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +09001050 "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
1051 le16_to_cpu(packet->credits_requested),
1052 le16_to_cpu(packet->credits_granted),
1053 le32_to_cpu(packet->data_offset),
1054 le32_to_cpu(packet->data_length),
1055 le32_to_cpu(packet->remaining_data_length));
Namjae Jeon0626e662021-03-16 13:07:11 +09001056
1057 /* Map the packet to DMA */
1058 header_length = sizeof(struct smb_direct_data_transfer);
1059 /* If this is a packet without payload, don't send padding */
1060 if (!size)
1061 header_length =
1062 offsetof(struct smb_direct_data_transfer, padding);
1063
1064 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
1065 (void *)packet,
1066 header_length,
1067 DMA_TO_DEVICE);
1068 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
1069 if (ret) {
1070 smb_direct_free_sendmsg(t, sendmsg);
1071 return ret;
1072 }
1073
1074 sendmsg->num_sge = 1;
1075 sendmsg->sge[0].length = header_length;
1076 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1077
1078 *sendmsg_out = sendmsg;
1079 return 0;
1080}
1081
Namjae Jeon64b39f42021-03-30 14:25:35 +09001082static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
Namjae Jeon0626e662021-03-16 13:07:11 +09001083{
1084 bool high = is_vmalloc_addr(buf);
1085 struct page *page;
1086 int offset, len;
1087 int i = 0;
1088
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +09001089 if (nentries < get_buf_page_count(buf, size))
Namjae Jeon0626e662021-03-16 13:07:11 +09001090 return -EINVAL;
1091
1092 offset = offset_in_page(buf);
1093 buf -= offset;
1094 while (size > 0) {
1095 len = min_t(int, PAGE_SIZE - offset, size);
1096 if (high)
1097 page = vmalloc_to_page(buf);
1098 else
1099 page = kmap_to_page(buf);
1100
1101 if (!sg_list)
1102 return -EINVAL;
1103 sg_set_page(sg_list, page, len, offset);
1104 sg_list = sg_next(sg_list);
1105
1106 buf += PAGE_SIZE;
1107 size -= len;
1108 offset = 0;
1109 i++;
1110 }
1111 return i;
1112}
1113
1114static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
Namjae Jeon070fb212021-05-26 17:57:12 +09001115 struct scatterlist *sg_list, int nentries,
1116 enum dma_data_direction dir)
Namjae Jeon0626e662021-03-16 13:07:11 +09001117{
1118 int npages;
1119
1120 npages = get_sg_list(buf, size, sg_list, nentries);
1121 if (npages <= 0)
1122 return -EINVAL;
1123 return ib_dma_map_sg(device, sg_list, npages, dir);
1124}
1125
1126static int post_sendmsg(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001127 struct smb_direct_send_ctx *send_ctx,
1128 struct smb_direct_sendmsg *msg)
Namjae Jeon0626e662021-03-16 13:07:11 +09001129{
1130 int i;
1131
1132 for (i = 0; i < msg->num_sge; i++)
1133 ib_dma_sync_single_for_device(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001134 msg->sge[i].addr, msg->sge[i].length,
1135 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001136
1137 msg->cqe.done = send_done;
1138 msg->wr.opcode = IB_WR_SEND;
1139 msg->wr.sg_list = &msg->sge[0];
1140 msg->wr.num_sge = msg->num_sge;
1141 msg->wr.next = NULL;
1142
1143 if (send_ctx) {
1144 msg->wr.wr_cqe = NULL;
1145 msg->wr.send_flags = 0;
1146 if (!list_empty(&send_ctx->msg_list)) {
1147 struct smb_direct_sendmsg *last;
1148
1149 last = list_last_entry(&send_ctx->msg_list,
1150 struct smb_direct_sendmsg,
1151 list);
1152 last->wr.next = &msg->wr;
1153 }
1154 list_add_tail(&msg->list, &send_ctx->msg_list);
1155 send_ctx->wr_cnt++;
1156 return 0;
1157 }
1158
1159 msg->wr.wr_cqe = &msg->cqe;
1160 msg->wr.send_flags = IB_SEND_SIGNALED;
1161 return smb_direct_post_send(t, &msg->wr);
1162}
1163
1164static int smb_direct_post_send_data(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001165 struct smb_direct_send_ctx *send_ctx,
1166 struct kvec *iov, int niov,
1167 int remaining_data_length)
Namjae Jeon0626e662021-03-16 13:07:11 +09001168{
1169 int i, j, ret;
1170 struct smb_direct_sendmsg *msg;
1171 int data_length;
Namjae Jeon64b39f42021-03-30 14:25:35 +09001172 struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
Namjae Jeon0626e662021-03-16 13:07:11 +09001173
1174 ret = wait_for_send_credits(t, send_ctx);
1175 if (ret)
1176 return ret;
1177
1178 data_length = 0;
1179 for (i = 0; i < niov; i++)
1180 data_length += iov[i].iov_len;
1181
1182 ret = smb_direct_create_header(t, data_length, remaining_data_length,
1183 &msg);
1184 if (ret) {
1185 atomic_inc(&t->send_credits);
1186 return ret;
1187 }
1188
1189 for (i = 0; i < niov; i++) {
1190 struct ib_sge *sge;
1191 int sg_cnt;
1192
Namjae Jeon64b39f42021-03-30 14:25:35 +09001193 sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
Namjae Jeon0626e662021-03-16 13:07:11 +09001194 sg_cnt = get_mapped_sg_list(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001195 iov[i].iov_base, iov[i].iov_len,
1196 sg, SMB_DIRECT_MAX_SEND_SGES - 1,
1197 DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001198 if (sg_cnt <= 0) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001199 pr_err("failed to map buffer\n");
Namjae Jeonbc3fcc92021-03-19 13:51:15 +09001200 ret = -ENOMEM;
Namjae Jeon0626e662021-03-16 13:07:11 +09001201 goto err;
Hyunchul Lee72d6cbb2021-08-30 13:27:43 +09001202 } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001203 pr_err("buffer not fitted into sges\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001204 ret = -E2BIG;
1205 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
1206 DMA_TO_DEVICE);
1207 goto err;
1208 }
1209
1210 for (j = 0; j < sg_cnt; j++) {
1211 sge = &msg->sge[msg->num_sge];
1212 sge->addr = sg_dma_address(&sg[j]);
1213 sge->length = sg_dma_len(&sg[j]);
1214 sge->lkey = t->pd->local_dma_lkey;
1215 msg->num_sge++;
1216 }
1217 }
1218
1219 ret = post_sendmsg(t, send_ctx, msg);
1220 if (ret)
1221 goto err;
1222 return 0;
1223err:
1224 smb_direct_free_sendmsg(t, msg);
1225 atomic_inc(&t->send_credits);
1226 return ret;
1227}
1228
1229static int smb_direct_writev(struct ksmbd_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001230 struct kvec *iov, int niovs, int buflen,
1231 bool need_invalidate, unsigned int remote_key)
Namjae Jeon0626e662021-03-16 13:07:11 +09001232{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001233 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +09001234 int remaining_data_length;
1235 int start, i, j;
1236 int max_iov_size = st->max_send_size -
1237 sizeof(struct smb_direct_data_transfer);
1238 int ret;
1239 struct kvec vec;
1240 struct smb_direct_send_ctx send_ctx;
1241
Namjae Jeonb8fc94c2021-07-07 14:56:44 +09001242 if (st->status != SMB_DIRECT_CS_CONNECTED)
1243 return -ENOTCONN;
Namjae Jeon0626e662021-03-16 13:07:11 +09001244
1245 //FIXME: skip RFC1002 header..
1246 buflen -= 4;
1247 iov[0].iov_base += 4;
1248 iov[0].iov_len -= 4;
1249
1250 remaining_data_length = buflen;
1251 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
1252
1253 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
1254 start = i = 0;
1255 buflen = 0;
1256 while (true) {
1257 buflen += iov[i].iov_len;
1258 if (buflen > max_iov_size) {
1259 if (i > start) {
1260 remaining_data_length -=
Namjae Jeon64b39f42021-03-30 14:25:35 +09001261 (buflen - iov[i].iov_len);
Namjae Jeon0626e662021-03-16 13:07:11 +09001262 ret = smb_direct_post_send_data(st, &send_ctx,
Namjae Jeon070fb212021-05-26 17:57:12 +09001263 &iov[start], i - start,
1264 remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +09001265 if (ret)
1266 goto done;
1267 } else {
1268 /* iov[start] is too big, break it */
Namjae Jeon64b39f42021-03-30 14:25:35 +09001269 int nvec = (buflen + max_iov_size - 1) /
Namjae Jeon0626e662021-03-16 13:07:11 +09001270 max_iov_size;
1271
1272 for (j = 0; j < nvec; j++) {
1273 vec.iov_base =
1274 (char *)iov[start].iov_base +
Namjae Jeon64b39f42021-03-30 14:25:35 +09001275 j * max_iov_size;
Namjae Jeon0626e662021-03-16 13:07:11 +09001276 vec.iov_len =
1277 min_t(int, max_iov_size,
Namjae Jeon070fb212021-05-26 17:57:12 +09001278 buflen - max_iov_size * j);
Namjae Jeon0626e662021-03-16 13:07:11 +09001279 remaining_data_length -= vec.iov_len;
Namjae Jeon070fb212021-05-26 17:57:12 +09001280 ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
1281 remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +09001282 if (ret)
1283 goto done;
1284 }
1285 i++;
1286 if (i == niovs)
1287 break;
1288 }
1289 start = i;
1290 buflen = 0;
1291 } else {
1292 i++;
1293 if (i == niovs) {
1294 /* send out all remaining vecs */
1295 remaining_data_length -= buflen;
1296 ret = smb_direct_post_send_data(st, &send_ctx,
Namjae Jeon070fb212021-05-26 17:57:12 +09001297 &iov[start], i - start,
1298 remaining_data_length);
Namjae Jeon0626e662021-03-16 13:07:11 +09001299 if (ret)
1300 goto done;
1301 break;
1302 }
1303 }
1304 }
1305
1306done:
1307 ret = smb_direct_flush_send_list(st, &send_ctx, true);
1308
1309 /*
1310 * As an optimization, we don't wait for individual I/O to finish
1311 * before sending the next one.
1312 * Send them all and wait for pending send count to get to 0
1313 * that means all the I/Os have been out and we are good to return
1314 */
1315
1316 wait_event(st->wait_send_payload_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +09001317 atomic_read(&st->send_payload_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +09001318 return ret;
1319}
1320
1321static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
Namjae Jeon070fb212021-05-26 17:57:12 +09001322 enum dma_data_direction dir)
Namjae Jeon0626e662021-03-16 13:07:11 +09001323{
1324 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe,
Namjae Jeon070fb212021-05-26 17:57:12 +09001325 struct smb_direct_rdma_rw_msg, cqe);
Namjae Jeon0626e662021-03-16 13:07:11 +09001326 struct smb_direct_transport *t = msg->t;
1327
1328 if (wc->status != IB_WC_SUCCESS) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001329 pr_err("read/write error. opcode = %d, status = %s(%d)\n",
1330 wc->opcode, ib_wc_status_msg(wc->status), wc->status);
Namjae Jeon0626e662021-03-16 13:07:11 +09001331 smb_direct_disconnect_rdma_connection(t);
1332 }
1333
1334 if (atomic_inc_return(&t->rw_avail_ops) > 0)
1335 wake_up(&t->wait_rw_avail_ops);
1336
1337 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
Namjae Jeon070fb212021-05-26 17:57:12 +09001338 msg->sg_list, msg->sgt.nents, dir);
Namjae Jeon0626e662021-03-16 13:07:11 +09001339 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1340 complete(msg->completion);
1341 kfree(msg);
1342}
1343
1344static void read_done(struct ib_cq *cq, struct ib_wc *wc)
1345{
1346 read_write_done(cq, wc, DMA_FROM_DEVICE);
1347}
1348
1349static void write_done(struct ib_cq *cq, struct ib_wc *wc)
1350{
1351 read_write_done(cq, wc, DMA_TO_DEVICE);
1352}
1353
1354static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +09001355 int buf_len, u32 remote_key, u64 remote_offset,
1356 u32 remote_len, bool is_read)
Namjae Jeon0626e662021-03-16 13:07:11 +09001357{
1358 struct smb_direct_rdma_rw_msg *msg;
1359 int ret;
1360 DECLARE_COMPLETION_ONSTACK(completion);
1361 struct ib_send_wr *first_wr = NULL;
1362
1363 ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops);
1364 if (ret < 0)
1365 return ret;
1366
1367 /* TODO: mempool */
1368 msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
Namjae Jeon070fb212021-05-26 17:57:12 +09001369 sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001370 if (!msg) {
1371 atomic_inc(&t->rw_avail_ops);
1372 return -ENOMEM;
1373 }
1374
1375 msg->sgt.sgl = &msg->sg_list[0];
1376 ret = sg_alloc_table_chained(&msg->sgt,
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +09001377 get_buf_page_count(buf, buf_len),
Namjae Jeon070fb212021-05-26 17:57:12 +09001378 msg->sg_list, SG_CHUNK_SIZE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001379 if (ret) {
1380 atomic_inc(&t->rw_avail_ops);
1381 kfree(msg);
1382 return -ENOMEM;
1383 }
1384
1385 ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents);
1386 if (ret <= 0) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001387 pr_err("failed to get pages\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001388 goto err;
1389 }
1390
1391 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
Hyunchul Lee8ad8dc32021-06-25 07:02:07 +09001392 msg->sg_list, get_buf_page_count(buf, buf_len),
Namjae Jeon070fb212021-05-26 17:57:12 +09001393 0, remote_offset, remote_key,
1394 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001395 if (ret < 0) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001396 pr_err("failed to init rdma_rw_ctx: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001397 goto err;
1398 }
1399
1400 msg->t = t;
1401 msg->cqe.done = is_read ? read_done : write_done;
1402 msg->completion = &completion;
1403 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port,
Namjae Jeon070fb212021-05-26 17:57:12 +09001404 &msg->cqe, NULL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001405
1406 ret = ib_post_send(t->qp, first_wr, NULL);
1407 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001408 pr_err("failed to post send wr: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001409 goto err;
1410 }
1411
1412 wait_for_completion(&completion);
1413 return 0;
1414
1415err:
1416 atomic_inc(&t->rw_avail_ops);
1417 if (first_wr)
1418 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port,
Namjae Jeon070fb212021-05-26 17:57:12 +09001419 msg->sg_list, msg->sgt.nents,
1420 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001421 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1422 kfree(msg);
1423 return ret;
Namjae Jeon0626e662021-03-16 13:07:11 +09001424}
1425
Namjae Jeon64b39f42021-03-30 14:25:35 +09001426static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +09001427 unsigned int buflen, u32 remote_key,
1428 u64 remote_offset, u32 remote_len)
Namjae Jeon0626e662021-03-16 13:07:11 +09001429{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001430 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
Namjae Jeon070fb212021-05-26 17:57:12 +09001431 remote_key, remote_offset,
1432 remote_len, false);
Namjae Jeon0626e662021-03-16 13:07:11 +09001433}
1434
Namjae Jeon64b39f42021-03-30 14:25:35 +09001435static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf,
Namjae Jeon070fb212021-05-26 17:57:12 +09001436 unsigned int buflen, u32 remote_key,
1437 u64 remote_offset, u32 remote_len)
Namjae Jeon0626e662021-03-16 13:07:11 +09001438{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001439 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen,
Namjae Jeon070fb212021-05-26 17:57:12 +09001440 remote_key, remote_offset,
1441 remote_len, true);
Namjae Jeon0626e662021-03-16 13:07:11 +09001442}
1443
1444static void smb_direct_disconnect(struct ksmbd_transport *t)
1445{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001446 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Namjae Jeon0626e662021-03-16 13:07:11 +09001447
1448 ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id);
1449
Hyunchul Lee323b1ea2021-08-12 10:23:08 +09001450 smb_direct_disconnect_rdma_work(&st->disconnect_work);
Namjae Jeon0626e662021-03-16 13:07:11 +09001451 wait_event_interruptible(st->wait_status,
Namjae Jeon070fb212021-05-26 17:57:12 +09001452 st->status == SMB_DIRECT_CS_DISCONNECTED);
Namjae Jeon0626e662021-03-16 13:07:11 +09001453 free_transport(st);
1454}
1455
Yufan Chen136dff32022-01-09 11:34:16 +09001456static void smb_direct_shutdown(struct ksmbd_transport *t)
1457{
1458 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
1459
1460 ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
1461
1462 smb_direct_disconnect_rdma_work(&st->disconnect_work);
1463}
1464
Namjae Jeon0626e662021-03-16 13:07:11 +09001465static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
Namjae Jeon070fb212021-05-26 17:57:12 +09001466 struct rdma_cm_event *event)
Namjae Jeon0626e662021-03-16 13:07:11 +09001467{
1468 struct smb_direct_transport *t = cm_id->context;
1469
1470 ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09001471 cm_id, rdma_event_msg(event->event), event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001472
1473 switch (event->event) {
1474 case RDMA_CM_EVENT_ESTABLISHED: {
1475 t->status = SMB_DIRECT_CS_CONNECTED;
1476 wake_up_interruptible(&t->wait_status);
1477 break;
1478 }
1479 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1480 case RDMA_CM_EVENT_DISCONNECTED: {
1481 t->status = SMB_DIRECT_CS_DISCONNECTED;
1482 wake_up_interruptible(&t->wait_status);
1483 wake_up_interruptible(&t->wait_reassembly_queue);
1484 wake_up(&t->wait_send_credits);
1485 break;
1486 }
1487 case RDMA_CM_EVENT_CONNECT_ERROR: {
1488 t->status = SMB_DIRECT_CS_DISCONNECTED;
1489 wake_up_interruptible(&t->wait_status);
1490 break;
1491 }
1492 default:
Namjae Jeonbde16942021-06-28 15:23:19 +09001493 pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
1494 cm_id, rdma_event_msg(event->event),
1495 event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001496 break;
1497 }
1498 return 0;
1499}
1500
1501static void smb_direct_qpair_handler(struct ib_event *event, void *context)
1502{
1503 struct smb_direct_transport *t = context;
1504
1505 ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09001506 t->cm_id, ib_event_msg(event->event), event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09001507
1508 switch (event->event) {
1509 case IB_EVENT_CQ_ERR:
1510 case IB_EVENT_QP_FATAL:
1511 smb_direct_disconnect_rdma_connection(t);
1512 break;
1513 default:
1514 break;
1515 }
1516}
1517
1518static int smb_direct_send_negotiate_response(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001519 int failed)
Namjae Jeon0626e662021-03-16 13:07:11 +09001520{
1521 struct smb_direct_sendmsg *sendmsg;
1522 struct smb_direct_negotiate_resp *resp;
1523 int ret;
1524
1525 sendmsg = smb_direct_alloc_sendmsg(t);
1526 if (IS_ERR(sendmsg))
1527 return -ENOMEM;
1528
1529 resp = (struct smb_direct_negotiate_resp *)sendmsg->packet;
1530 if (failed) {
1531 memset(resp, 0, sizeof(*resp));
1532 resp->min_version = cpu_to_le16(0x0100);
1533 resp->max_version = cpu_to_le16(0x0100);
1534 resp->status = STATUS_NOT_SUPPORTED;
1535 } else {
1536 resp->status = STATUS_SUCCESS;
1537 resp->min_version = SMB_DIRECT_VERSION_LE;
1538 resp->max_version = SMB_DIRECT_VERSION_LE;
1539 resp->negotiated_version = SMB_DIRECT_VERSION_LE;
1540 resp->reserved = 0;
1541 resp->credits_requested =
1542 cpu_to_le16(t->send_credit_target);
Namjae Jeon64b39f42021-03-30 14:25:35 +09001543 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
Namjae Jeon0626e662021-03-16 13:07:11 +09001544 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
1545 resp->preferred_send_size = cpu_to_le32(t->max_send_size);
1546 resp->max_receive_size = cpu_to_le32(t->max_recv_size);
1547 resp->max_fragmented_size =
1548 cpu_to_le32(t->max_fragmented_recv_size);
1549 }
1550
1551 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001552 (void *)resp, sizeof(*resp),
1553 DMA_TO_DEVICE);
1554 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr);
Namjae Jeon0626e662021-03-16 13:07:11 +09001555 if (ret) {
1556 smb_direct_free_sendmsg(t, sendmsg);
1557 return ret;
1558 }
1559
1560 sendmsg->num_sge = 1;
1561 sendmsg->sge[0].length = sizeof(*resp);
1562 sendmsg->sge[0].lkey = t->pd->local_dma_lkey;
1563
1564 ret = post_sendmsg(t, NULL, sendmsg);
1565 if (ret) {
1566 smb_direct_free_sendmsg(t, sendmsg);
1567 return ret;
1568 }
1569
1570 wait_event(t->wait_send_pending,
Namjae Jeon070fb212021-05-26 17:57:12 +09001571 atomic_read(&t->send_pending) == 0);
Namjae Jeon0626e662021-03-16 13:07:11 +09001572 return 0;
1573}
1574
1575static int smb_direct_accept_client(struct smb_direct_transport *t)
1576{
1577 struct rdma_conn_param conn_param;
1578 struct ib_port_immutable port_immutable;
1579 u32 ird_ord_hdr[2];
1580 int ret;
1581
1582 memset(&conn_param, 0, sizeof(conn_param));
Namjae Jeon070fb212021-05-26 17:57:12 +09001583 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom,
1584 SMB_DIRECT_CM_INITIATOR_DEPTH);
Namjae Jeon0626e662021-03-16 13:07:11 +09001585 conn_param.responder_resources = 0;
1586
1587 t->cm_id->device->ops.get_port_immutable(t->cm_id->device,
Namjae Jeon070fb212021-05-26 17:57:12 +09001588 t->cm_id->port_num,
1589 &port_immutable);
Namjae Jeon0626e662021-03-16 13:07:11 +09001590 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1591 ird_ord_hdr[0] = conn_param.responder_resources;
1592 ird_ord_hdr[1] = 1;
1593 conn_param.private_data = ird_ord_hdr;
1594 conn_param.private_data_len = sizeof(ird_ord_hdr);
1595 } else {
1596 conn_param.private_data = NULL;
1597 conn_param.private_data_len = 0;
1598 }
1599 conn_param.retry_count = SMB_DIRECT_CM_RETRY;
1600 conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
1601 conn_param.flow_control = 0;
1602
1603 ret = rdma_accept(t->cm_id, &conn_param);
1604 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001605 pr_err("error at rdma_accept: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001606 return ret;
1607 }
Namjae Jeon0626e662021-03-16 13:07:11 +09001608 return 0;
1609}
1610
Hyunchul Lee99b76502022-01-04 14:56:26 +09001611static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
Namjae Jeon0626e662021-03-16 13:07:11 +09001612{
1613 int ret;
1614 struct smb_direct_recvmsg *recvmsg;
Namjae Jeon0626e662021-03-16 13:07:11 +09001615
1616 recvmsg = get_free_recvmsg(t);
1617 if (!recvmsg)
1618 return -ENOMEM;
1619 recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ;
1620
1621 ret = smb_direct_post_recv(t, recvmsg);
1622 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001623 pr_err("Can't post recv: %d\n", ret);
Hyunchul Lee99b76502022-01-04 14:56:26 +09001624 goto out_err;
Namjae Jeon0626e662021-03-16 13:07:11 +09001625 }
1626
1627 t->negotiation_requested = false;
1628 ret = smb_direct_accept_client(t);
1629 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001630 pr_err("Can't accept client\n");
Hyunchul Lee99b76502022-01-04 14:56:26 +09001631 goto out_err;
Namjae Jeon0626e662021-03-16 13:07:11 +09001632 }
1633
1634 smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
Hyunchul Lee99b76502022-01-04 14:56:26 +09001635 return 0;
1636out_err:
1637 put_recvmsg(t, recvmsg);
Namjae Jeon0626e662021-03-16 13:07:11 +09001638 return ret;
1639}
1640
1641static int smb_direct_init_params(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001642 struct ib_qp_cap *cap)
Namjae Jeon0626e662021-03-16 13:07:11 +09001643{
1644 struct ib_device *device = t->cm_id->device;
1645 int max_send_sges, max_pages, max_rw_wrs, max_send_wrs;
1646
1647 /* need 2 more sge. because a SMB_DIRECT header will be mapped,
1648 * and maybe a send buffer could be not page aligned.
1649 */
1650 t->max_send_size = smb_direct_max_send_size;
1651 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2;
1652 if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001653 pr_err("max_send_size %d is too large\n", t->max_send_size);
Namjae Jeon0626e662021-03-16 13:07:11 +09001654 return -EINVAL;
1655 }
1656
1657 /*
1658 * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA
1659 * read/writes. HCA guarantees at least max_send_sge of sges for
1660 * a RDMA read/write work request, and if memory registration is used,
1661 * we need reg_mr, local_inv wrs for each read/write.
1662 */
1663 t->max_rdma_rw_size = smb_direct_max_read_write_size;
1664 max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1665 max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES);
1666 max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num,
1667 max_pages) * 2;
1668 max_rw_wrs *= smb_direct_max_outstanding_rw_ops;
1669
1670 max_send_wrs = smb_direct_send_credit_target + max_rw_wrs;
1671 if (max_send_wrs > device->attrs.max_cqe ||
Namjae Jeon64b39f42021-03-30 14:25:35 +09001672 max_send_wrs > device->attrs.max_qp_wr) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001673 pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n",
1674 smb_direct_send_credit_target,
1675 smb_direct_max_outstanding_rw_ops);
1676 pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
1677 device->attrs.max_cqe, device->attrs.max_qp_wr);
Namjae Jeon0626e662021-03-16 13:07:11 +09001678 return -EINVAL;
1679 }
1680
1681 if (smb_direct_receive_credit_max > device->attrs.max_cqe ||
1682 smb_direct_receive_credit_max > device->attrs.max_qp_wr) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001683 pr_err("consider lowering receive_credit_max = %d\n",
1684 smb_direct_receive_credit_max);
1685 pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1686 device->attrs.max_cqe, device->attrs.max_qp_wr);
Namjae Jeon0626e662021-03-16 13:07:11 +09001687 return -EINVAL;
1688 }
1689
1690 if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001691 pr_err("warning: device max_send_sge = %d too small\n",
1692 device->attrs.max_send_sge);
Namjae Jeon0626e662021-03-16 13:07:11 +09001693 return -EINVAL;
1694 }
1695 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001696 pr_err("warning: device max_recv_sge = %d too small\n",
1697 device->attrs.max_recv_sge);
Namjae Jeon0626e662021-03-16 13:07:11 +09001698 return -EINVAL;
1699 }
1700
1701 t->recv_credits = 0;
1702 t->count_avail_recvmsg = 0;
1703
1704 t->recv_credit_max = smb_direct_receive_credit_max;
1705 t->recv_credit_target = 10;
1706 t->new_recv_credits = 0;
1707
1708 t->send_credit_target = smb_direct_send_credit_target;
1709 atomic_set(&t->send_credits, 0);
1710 atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops);
1711
1712 t->max_send_size = smb_direct_max_send_size;
1713 t->max_recv_size = smb_direct_max_receive_size;
1714 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
1715
1716 cap->max_send_wr = max_send_wrs;
1717 cap->max_recv_wr = t->recv_credit_max;
1718 cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
1719 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
1720 cap->max_inline_data = 0;
Hyunchul Leec9f18922022-01-07 14:45:30 +09001721 cap->max_rdma_ctxs =
1722 rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
1723 smb_direct_max_outstanding_rw_ops;
Namjae Jeon0626e662021-03-16 13:07:11 +09001724 return 0;
1725}
1726
1727static void smb_direct_destroy_pools(struct smb_direct_transport *t)
1728{
1729 struct smb_direct_recvmsg *recvmsg;
1730
1731 while ((recvmsg = get_free_recvmsg(t)))
1732 mempool_free(recvmsg, t->recvmsg_mempool);
1733 while ((recvmsg = get_empty_recvmsg(t)))
1734 mempool_free(recvmsg, t->recvmsg_mempool);
1735
1736 mempool_destroy(t->recvmsg_mempool);
1737 t->recvmsg_mempool = NULL;
1738
1739 kmem_cache_destroy(t->recvmsg_cache);
1740 t->recvmsg_cache = NULL;
1741
1742 mempool_destroy(t->sendmsg_mempool);
1743 t->sendmsg_mempool = NULL;
1744
1745 kmem_cache_destroy(t->sendmsg_cache);
1746 t->sendmsg_cache = NULL;
1747}
1748
1749static int smb_direct_create_pools(struct smb_direct_transport *t)
1750{
1751 char name[80];
1752 int i;
1753 struct smb_direct_recvmsg *recvmsg;
1754
1755 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
1756 t->sendmsg_cache = kmem_cache_create(name,
Namjae Jeon070fb212021-05-26 17:57:12 +09001757 sizeof(struct smb_direct_sendmsg) +
1758 sizeof(struct smb_direct_negotiate_resp),
1759 0, SLAB_HWCACHE_ALIGN, NULL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001760 if (!t->sendmsg_cache)
1761 return -ENOMEM;
1762
1763 t->sendmsg_mempool = mempool_create(t->send_credit_target,
Namjae Jeon070fb212021-05-26 17:57:12 +09001764 mempool_alloc_slab, mempool_free_slab,
1765 t->sendmsg_cache);
Namjae Jeon0626e662021-03-16 13:07:11 +09001766 if (!t->sendmsg_mempool)
1767 goto err;
1768
1769 snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
1770 t->recvmsg_cache = kmem_cache_create(name,
Namjae Jeon070fb212021-05-26 17:57:12 +09001771 sizeof(struct smb_direct_recvmsg) +
1772 t->max_recv_size,
1773 0, SLAB_HWCACHE_ALIGN, NULL);
Namjae Jeon0626e662021-03-16 13:07:11 +09001774 if (!t->recvmsg_cache)
1775 goto err;
1776
1777 t->recvmsg_mempool =
1778 mempool_create(t->recv_credit_max, mempool_alloc_slab,
Namjae Jeon070fb212021-05-26 17:57:12 +09001779 mempool_free_slab, t->recvmsg_cache);
Namjae Jeon0626e662021-03-16 13:07:11 +09001780 if (!t->recvmsg_mempool)
1781 goto err;
1782
1783 INIT_LIST_HEAD(&t->recvmsg_queue);
1784
1785 for (i = 0; i < t->recv_credit_max; i++) {
1786 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL);
1787 if (!recvmsg)
1788 goto err;
1789 recvmsg->transport = t;
1790 list_add(&recvmsg->list, &t->recvmsg_queue);
1791 }
1792 t->count_avail_recvmsg = t->recv_credit_max;
1793
1794 return 0;
1795err:
1796 smb_direct_destroy_pools(t);
1797 return -ENOMEM;
1798}
1799
1800static int smb_direct_create_qpair(struct smb_direct_transport *t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001801 struct ib_qp_cap *cap)
Namjae Jeon0626e662021-03-16 13:07:11 +09001802{
1803 int ret;
1804 struct ib_qp_init_attr qp_attr;
Hyunchul Leec9f18922022-01-07 14:45:30 +09001805 int pages_per_rw;
Namjae Jeon0626e662021-03-16 13:07:11 +09001806
1807 t->pd = ib_alloc_pd(t->cm_id->device, 0);
1808 if (IS_ERR(t->pd)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001809 pr_err("Can't create RDMA PD\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001810 ret = PTR_ERR(t->pd);
1811 t->pd = NULL;
1812 return ret;
1813 }
1814
1815 t->send_cq = ib_alloc_cq(t->cm_id->device, t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001816 t->send_credit_target, 0, IB_POLL_WORKQUEUE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001817 if (IS_ERR(t->send_cq)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001818 pr_err("Can't create RDMA send CQ\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001819 ret = PTR_ERR(t->send_cq);
1820 t->send_cq = NULL;
1821 goto err;
1822 }
1823
1824 t->recv_cq = ib_alloc_cq(t->cm_id->device, t,
Namjae Jeon070fb212021-05-26 17:57:12 +09001825 cap->max_send_wr + cap->max_rdma_ctxs,
1826 0, IB_POLL_WORKQUEUE);
Namjae Jeon0626e662021-03-16 13:07:11 +09001827 if (IS_ERR(t->recv_cq)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001828 pr_err("Can't create RDMA recv CQ\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001829 ret = PTR_ERR(t->recv_cq);
1830 t->recv_cq = NULL;
1831 goto err;
1832 }
1833
1834 memset(&qp_attr, 0, sizeof(qp_attr));
1835 qp_attr.event_handler = smb_direct_qpair_handler;
1836 qp_attr.qp_context = t;
1837 qp_attr.cap = *cap;
1838 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1839 qp_attr.qp_type = IB_QPT_RC;
1840 qp_attr.send_cq = t->send_cq;
1841 qp_attr.recv_cq = t->recv_cq;
1842 qp_attr.port_num = ~0;
1843
1844 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr);
1845 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001846 pr_err("Can't create RDMA QP: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001847 goto err;
1848 }
1849
1850 t->qp = t->cm_id->qp;
1851 t->cm_id->event_handler = smb_direct_cm_handler;
1852
Hyunchul Leec9f18922022-01-07 14:45:30 +09001853 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
1854 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
1855 int pages_per_mr, mr_count;
1856
1857 pages_per_mr = min_t(int, pages_per_rw,
1858 t->cm_id->device->attrs.max_fast_reg_page_list_len);
1859 mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
1860 atomic_read(&t->rw_avail_ops);
1861 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
1862 IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
1863 if (ret) {
1864 pr_err("failed to init mr pool count %d pages %d\n",
1865 mr_count, pages_per_mr);
1866 goto err;
1867 }
1868 }
1869
Namjae Jeon0626e662021-03-16 13:07:11 +09001870 return 0;
1871err:
1872 if (t->qp) {
1873 ib_destroy_qp(t->qp);
1874 t->qp = NULL;
1875 }
1876 if (t->recv_cq) {
1877 ib_destroy_cq(t->recv_cq);
1878 t->recv_cq = NULL;
1879 }
1880 if (t->send_cq) {
1881 ib_destroy_cq(t->send_cq);
1882 t->send_cq = NULL;
1883 }
1884 if (t->pd) {
1885 ib_dealloc_pd(t->pd);
1886 t->pd = NULL;
1887 }
1888 return ret;
1889}
1890
1891static int smb_direct_prepare(struct ksmbd_transport *t)
1892{
Namjae Jeon02d4b4a2021-06-25 13:43:01 +09001893 struct smb_direct_transport *st = smb_trans_direct_transfort(t);
Hyunchul Lee99b76502022-01-04 14:56:26 +09001894 struct smb_direct_recvmsg *recvmsg;
1895 struct smb_direct_negotiate_req *req;
1896 int ret;
1897
1898 ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
1899 ret = wait_event_interruptible_timeout(st->wait_status,
1900 st->negotiation_requested ||
1901 st->status == SMB_DIRECT_CS_DISCONNECTED,
1902 SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
1903 if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
1904 return ret < 0 ? ret : -ETIMEDOUT;
1905
1906 recvmsg = get_first_reassembly(st);
1907 if (!recvmsg)
1908 return -ECONNABORTED;
1909
1910 ret = smb_direct_check_recvmsg(recvmsg);
1911 if (ret == -ECONNABORTED)
1912 goto out;
1913
1914 req = (struct smb_direct_negotiate_req *)recvmsg->packet;
1915 st->max_recv_size = min_t(int, st->max_recv_size,
1916 le32_to_cpu(req->preferred_send_size));
1917 st->max_send_size = min_t(int, st->max_send_size,
1918 le32_to_cpu(req->max_receive_size));
1919 st->max_fragmented_send_size =
Hyunchul Lee4d02c4f2022-01-07 14:45:31 +09001920 le32_to_cpu(req->max_fragmented_size);
1921 st->max_fragmented_recv_size =
1922 (st->recv_credit_max * st->max_recv_size) / 2;
Hyunchul Lee99b76502022-01-04 14:56:26 +09001923
1924 ret = smb_direct_send_negotiate_response(st, ret);
1925out:
1926 spin_lock_irq(&st->reassembly_queue_lock);
1927 st->reassembly_queue_length--;
1928 list_del(&recvmsg->list);
1929 spin_unlock_irq(&st->reassembly_queue_lock);
1930 put_recvmsg(st, recvmsg);
1931
1932 return ret;
1933}
1934
1935static int smb_direct_connect(struct smb_direct_transport *st)
1936{
Namjae Jeon0626e662021-03-16 13:07:11 +09001937 int ret;
1938 struct ib_qp_cap qp_cap;
1939
1940 ret = smb_direct_init_params(st, &qp_cap);
1941 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001942 pr_err("Can't configure RDMA parameters\n");
Namjae Jeon0626e662021-03-16 13:07:11 +09001943 return ret;
1944 }
1945
1946 ret = smb_direct_create_pools(st);
1947 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001948 pr_err("Can't init RDMA pool: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001949 return ret;
1950 }
1951
1952 ret = smb_direct_create_qpair(st, &qp_cap);
1953 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001954 pr_err("Can't accept RDMA client: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001955 return ret;
1956 }
1957
Hyunchul Lee99b76502022-01-04 14:56:26 +09001958 ret = smb_direct_prepare_negotiation(st);
Namjae Jeon0626e662021-03-16 13:07:11 +09001959 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09001960 pr_err("Can't negotiate: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09001961 return ret;
1962 }
Namjae Jeon0626e662021-03-16 13:07:11 +09001963 return 0;
1964}
1965
1966static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
1967{
1968 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
1969 return false;
1970 if (attrs->max_fast_reg_page_list_len == 0)
1971 return false;
1972 return true;
1973}
1974
1975static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
1976{
1977 struct smb_direct_transport *t;
Hyunchul Lee99b76502022-01-04 14:56:26 +09001978 int ret;
Namjae Jeon0626e662021-03-16 13:07:11 +09001979
1980 if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
1981 ksmbd_debug(RDMA,
Namjae Jeon070fb212021-05-26 17:57:12 +09001982 "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
1983 new_cm_id->device->attrs.device_cap_flags);
Namjae Jeon0626e662021-03-16 13:07:11 +09001984 return -EPROTONOSUPPORT;
1985 }
1986
1987 t = alloc_transport(new_cm_id);
1988 if (!t)
1989 return -ENOMEM;
1990
Hyunchul Lee99b76502022-01-04 14:56:26 +09001991 ret = smb_direct_connect(t);
1992 if (ret)
1993 goto out_err;
1994
Namjae Jeon0626e662021-03-16 13:07:11 +09001995 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
Namjae Jeon070fb212021-05-26 17:57:12 +09001996 KSMBD_TRANS(t)->conn, "ksmbd:r%u",
Namjae Jeoncb097b32021-12-29 23:02:50 +09001997 smb_direct_port);
Namjae Jeon0626e662021-03-16 13:07:11 +09001998 if (IS_ERR(KSMBD_TRANS(t)->handler)) {
Hyunchul Lee99b76502022-01-04 14:56:26 +09001999 ret = PTR_ERR(KSMBD_TRANS(t)->handler);
Namjae Jeonbde16942021-06-28 15:23:19 +09002000 pr_err("Can't start thread\n");
Hyunchul Lee99b76502022-01-04 14:56:26 +09002001 goto out_err;
Namjae Jeon0626e662021-03-16 13:07:11 +09002002 }
2003
2004 return 0;
Hyunchul Lee99b76502022-01-04 14:56:26 +09002005out_err:
2006 free_transport(t);
2007 return ret;
Namjae Jeon0626e662021-03-16 13:07:11 +09002008}
2009
2010static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
Namjae Jeon070fb212021-05-26 17:57:12 +09002011 struct rdma_cm_event *event)
Namjae Jeon0626e662021-03-16 13:07:11 +09002012{
2013 switch (event->event) {
2014 case RDMA_CM_EVENT_CONNECT_REQUEST: {
2015 int ret = smb_direct_handle_connect_request(cm_id);
2016
2017 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09002018 pr_err("Can't create transport: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002019 return ret;
2020 }
2021
2022 ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09002023 cm_id);
Namjae Jeon0626e662021-03-16 13:07:11 +09002024 break;
2025 }
2026 default:
Namjae Jeonbde16942021-06-28 15:23:19 +09002027 pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
2028 cm_id, rdma_event_msg(event->event), event->event);
Namjae Jeon0626e662021-03-16 13:07:11 +09002029 break;
2030 }
2031 return 0;
2032}
2033
2034static int smb_direct_listen(int port)
2035{
2036 int ret;
2037 struct rdma_cm_id *cm_id;
2038 struct sockaddr_in sin = {
2039 .sin_family = AF_INET,
2040 .sin_addr.s_addr = htonl(INADDR_ANY),
2041 .sin_port = htons(port),
2042 };
2043
2044 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
Namjae Jeon070fb212021-05-26 17:57:12 +09002045 &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC);
Namjae Jeon0626e662021-03-16 13:07:11 +09002046 if (IS_ERR(cm_id)) {
Namjae Jeonbde16942021-06-28 15:23:19 +09002047 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
Namjae Jeon0626e662021-03-16 13:07:11 +09002048 return PTR_ERR(cm_id);
2049 }
2050
2051 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2052 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09002053 pr_err("Can't bind: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002054 goto err;
2055 }
2056
2057 smb_direct_listener.cm_id = cm_id;
2058
2059 ret = rdma_listen(cm_id, 10);
2060 if (ret) {
Namjae Jeonbde16942021-06-28 15:23:19 +09002061 pr_err("Can't listen: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002062 goto err;
2063 }
2064 return 0;
2065err:
2066 smb_direct_listener.cm_id = NULL;
2067 rdma_destroy_id(cm_id);
2068 return ret;
2069}
2070
Hyunchul Lee31928a02021-12-29 23:02:15 +09002071static int smb_direct_ib_client_add(struct ib_device *ib_dev)
2072{
2073 struct smb_direct_device *smb_dev;
2074
Namjae Jeoncb097b32021-12-29 23:02:50 +09002075 /* Set 5445 port if device type is iWARP(No IB) */
2076 if (ib_dev->node_type != RDMA_NODE_IB_CA)
2077 smb_direct_port = SMB_DIRECT_PORT_IWARP;
2078
Hyunchul Lee31928a02021-12-29 23:02:15 +09002079 if (!ib_dev->ops.get_netdev ||
2080 !rdma_frwr_is_supported(&ib_dev->attrs))
2081 return 0;
2082
2083 smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
2084 if (!smb_dev)
2085 return -ENOMEM;
2086 smb_dev->ib_dev = ib_dev;
2087
2088 write_lock(&smb_direct_device_lock);
2089 list_add(&smb_dev->list, &smb_direct_device_list);
2090 write_unlock(&smb_direct_device_lock);
2091
2092 ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
2093 return 0;
2094}
2095
2096static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
2097 void *client_data)
2098{
2099 struct smb_direct_device *smb_dev, *tmp;
2100
2101 write_lock(&smb_direct_device_lock);
2102 list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
2103 if (smb_dev->ib_dev == ib_dev) {
2104 list_del(&smb_dev->list);
2105 kfree(smb_dev);
2106 break;
2107 }
2108 }
2109 write_unlock(&smb_direct_device_lock);
2110}
2111
2112static struct ib_client smb_direct_ib_client = {
2113 .name = "ksmbd_smb_direct_ib",
2114 .add = smb_direct_ib_client_add,
2115 .remove = smb_direct_ib_client_remove,
2116};
2117
Namjae Jeon0626e662021-03-16 13:07:11 +09002118int ksmbd_rdma_init(void)
2119{
2120 int ret;
2121
2122 smb_direct_listener.cm_id = NULL;
2123
Hyunchul Lee31928a02021-12-29 23:02:15 +09002124 ret = ib_register_client(&smb_direct_ib_client);
2125 if (ret) {
2126 pr_err("failed to ib_register_client\n");
2127 return ret;
2128 }
2129
Namjae Jeon0626e662021-03-16 13:07:11 +09002130 /* When a client is running out of send credits, the credits are
2131 * granted by the server's sending a packet using this queue.
2132 * This avoids the situation that a clients cannot send packets
2133 * for lack of credits
2134 */
2135 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
Namjae Jeon070fb212021-05-26 17:57:12 +09002136 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
Namjae Jeon0626e662021-03-16 13:07:11 +09002137 if (!smb_direct_wq)
2138 return -ENOMEM;
2139
Namjae Jeoncb097b32021-12-29 23:02:50 +09002140 ret = smb_direct_listen(smb_direct_port);
Namjae Jeon0626e662021-03-16 13:07:11 +09002141 if (ret) {
2142 destroy_workqueue(smb_direct_wq);
2143 smb_direct_wq = NULL;
Namjae Jeonbde16942021-06-28 15:23:19 +09002144 pr_err("Can't listen: %d\n", ret);
Namjae Jeon0626e662021-03-16 13:07:11 +09002145 return ret;
2146 }
2147
2148 ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n",
Namjae Jeon070fb212021-05-26 17:57:12 +09002149 smb_direct_listener.cm_id);
Namjae Jeon0626e662021-03-16 13:07:11 +09002150 return 0;
2151}
2152
Hyunchul Lee31928a02021-12-29 23:02:15 +09002153void ksmbd_rdma_destroy(void)
Namjae Jeon0626e662021-03-16 13:07:11 +09002154{
Hyunchul Lee31928a02021-12-29 23:02:15 +09002155 if (!smb_direct_listener.cm_id)
2156 return;
2157
2158 ib_unregister_client(&smb_direct_ib_client);
2159 rdma_destroy_id(smb_direct_listener.cm_id);
2160
Namjae Jeon0626e662021-03-16 13:07:11 +09002161 smb_direct_listener.cm_id = NULL;
2162
2163 if (smb_direct_wq) {
Namjae Jeon0626e662021-03-16 13:07:11 +09002164 destroy_workqueue(smb_direct_wq);
2165 smb_direct_wq = NULL;
2166 }
Namjae Jeon0626e662021-03-16 13:07:11 +09002167}
2168
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002169bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
2170{
Hyunchul Lee31928a02021-12-29 23:02:15 +09002171 struct smb_direct_device *smb_dev;
2172 int i;
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002173 bool rdma_capable = false;
2174
Hyunchul Lee31928a02021-12-29 23:02:15 +09002175 read_lock(&smb_direct_device_lock);
2176 list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
2177 for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
2178 struct net_device *ndev;
2179
2180 ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
2181 i + 1);
2182 if (!ndev)
2183 continue;
2184
2185 if (ndev == netdev) {
2186 dev_put(ndev);
2187 rdma_capable = true;
2188 goto out;
2189 }
2190 dev_put(ndev);
2191 }
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002192 }
Hyunchul Lee31928a02021-12-29 23:02:15 +09002193out:
2194 read_unlock(&smb_direct_device_lock);
2195
2196 if (rdma_capable == false) {
2197 struct ib_device *ibdev;
2198
2199 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
2200 if (ibdev) {
2201 if (rdma_frwr_is_supported(&ibdev->attrs))
2202 rdma_capable = true;
2203 ib_device_put(ibdev);
2204 }
2205 }
2206
Hyunchul Lee03d8d4f2021-07-13 16:09:34 +09002207 return rdma_capable;
2208}
2209
Namjae Jeon0626e662021-03-16 13:07:11 +09002210static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
2211 .prepare = smb_direct_prepare,
2212 .disconnect = smb_direct_disconnect,
Yufan Chen136dff32022-01-09 11:34:16 +09002213 .shutdown = smb_direct_shutdown,
Namjae Jeon0626e662021-03-16 13:07:11 +09002214 .writev = smb_direct_writev,
2215 .read = smb_direct_read,
2216 .rdma_read = smb_direct_rdma_read,
2217 .rdma_write = smb_direct_rdma_write,
2218};