Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Copyright (C) 2017, Microsoft Corporation. |
| 4 | * Copyright (C) 2018, LG Electronics. |
| 5 | * |
| 6 | * Author(s): Long Li <longli@microsoft.com>, |
| 7 | * Hyunchul Lee <hyc.lee@gmail.com> |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; either version 2 of the License, or |
| 12 | * (at your option) any later version. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
| 17 | * the GNU General Public License for more details. |
| 18 | */ |
| 19 | |
| 20 | #define SUBMOD_NAME "smb_direct" |
| 21 | |
| 22 | #include <linux/kthread.h> |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 23 | #include <linux/list.h> |
| 24 | #include <linux/mempool.h> |
| 25 | #include <linux/highmem.h> |
| 26 | #include <linux/scatterlist.h> |
| 27 | #include <rdma/ib_verbs.h> |
| 28 | #include <rdma/rdma_cm.h> |
| 29 | #include <rdma/rw.h> |
| 30 | |
| 31 | #include "glob.h" |
| 32 | #include "connection.h" |
| 33 | #include "smb_common.h" |
| 34 | #include "smbstatus.h" |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 35 | #include "transport_rdma.h" |
| 36 | |
Namjae Jeon | cb097b3 | 2021-12-29 23:02:50 +0900 | [diff] [blame^] | 37 | #define SMB_DIRECT_PORT_IWARP 5445 |
| 38 | #define SMB_DIRECT_PORT_INFINIBAND 445 |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 39 | |
| 40 | #define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100) |
| 41 | |
| 42 | /* SMB_DIRECT negotiation timeout in seconds */ |
| 43 | #define SMB_DIRECT_NEGOTIATE_TIMEOUT 120 |
| 44 | |
| 45 | #define SMB_DIRECT_MAX_SEND_SGES 8 |
| 46 | #define SMB_DIRECT_MAX_RECV_SGES 1 |
| 47 | |
| 48 | /* |
| 49 | * Default maximum number of RDMA read/write outstanding on this connection |
| 50 | * This value is possibly decreased during QP creation on hardware limit |
| 51 | */ |
| 52 | #define SMB_DIRECT_CM_INITIATOR_DEPTH 8 |
| 53 | |
| 54 | /* Maximum number of retries on data transfer operations */ |
| 55 | #define SMB_DIRECT_CM_RETRY 6 |
| 56 | /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */ |
| 57 | #define SMB_DIRECT_CM_RNR_RETRY 0 |
| 58 | |
| 59 | /* |
| 60 | * User configurable initial values per SMB_DIRECT transport connection |
Namjae Jeon | 9223958 | 2021-07-16 17:16:11 +0900 | [diff] [blame] | 61 | * as defined in [MS-SMBD] 3.1.1.1 |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 62 | * Those may change after a SMB_DIRECT negotiation |
| 63 | */ |
Namjae Jeon | cb097b3 | 2021-12-29 23:02:50 +0900 | [diff] [blame^] | 64 | |
| 65 | /* Set 445 port to SMB Direct port by default */ |
| 66 | static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND; |
| 67 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 68 | /* The local peer's maximum number of credits to grant to the peer */ |
| 69 | static int smb_direct_receive_credit_max = 255; |
| 70 | |
| 71 | /* The remote peer's credit request of local peer */ |
| 72 | static int smb_direct_send_credit_target = 255; |
| 73 | |
| 74 | /* The maximum single message size can be sent to remote peer */ |
| 75 | static int smb_direct_max_send_size = 8192; |
| 76 | |
| 77 | /* The maximum fragmented upper-layer payload receive size supported */ |
| 78 | static int smb_direct_max_fragmented_recv_size = 1024 * 1024; |
| 79 | |
| 80 | /* The maximum single-message size which can be received */ |
| 81 | static int smb_direct_max_receive_size = 8192; |
| 82 | |
| 83 | static int smb_direct_max_read_write_size = 1024 * 1024; |
| 84 | |
| 85 | static int smb_direct_max_outstanding_rw_ops = 8; |
| 86 | |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 87 | static LIST_HEAD(smb_direct_device_list); |
| 88 | static DEFINE_RWLOCK(smb_direct_device_lock); |
| 89 | |
| 90 | struct smb_direct_device { |
| 91 | struct ib_device *ib_dev; |
| 92 | struct list_head list; |
| 93 | }; |
| 94 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 95 | static struct smb_direct_listener { |
| 96 | struct rdma_cm_id *cm_id; |
| 97 | } smb_direct_listener; |
| 98 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 99 | static struct workqueue_struct *smb_direct_wq; |
| 100 | |
| 101 | enum smb_direct_status { |
| 102 | SMB_DIRECT_CS_NEW = 0, |
| 103 | SMB_DIRECT_CS_CONNECTED, |
| 104 | SMB_DIRECT_CS_DISCONNECTING, |
| 105 | SMB_DIRECT_CS_DISCONNECTED, |
| 106 | }; |
| 107 | |
| 108 | struct smb_direct_transport { |
| 109 | struct ksmbd_transport transport; |
| 110 | |
| 111 | enum smb_direct_status status; |
| 112 | bool full_packet_received; |
| 113 | wait_queue_head_t wait_status; |
| 114 | |
| 115 | struct rdma_cm_id *cm_id; |
| 116 | struct ib_cq *send_cq; |
| 117 | struct ib_cq *recv_cq; |
| 118 | struct ib_pd *pd; |
| 119 | struct ib_qp *qp; |
| 120 | |
| 121 | int max_send_size; |
| 122 | int max_recv_size; |
| 123 | int max_fragmented_send_size; |
| 124 | int max_fragmented_recv_size; |
| 125 | int max_rdma_rw_size; |
| 126 | |
| 127 | spinlock_t reassembly_queue_lock; |
| 128 | struct list_head reassembly_queue; |
| 129 | int reassembly_data_length; |
| 130 | int reassembly_queue_length; |
| 131 | int first_entry_offset; |
| 132 | wait_queue_head_t wait_reassembly_queue; |
| 133 | |
| 134 | spinlock_t receive_credit_lock; |
| 135 | int recv_credits; |
| 136 | int count_avail_recvmsg; |
| 137 | int recv_credit_max; |
| 138 | int recv_credit_target; |
| 139 | |
| 140 | spinlock_t recvmsg_queue_lock; |
| 141 | struct list_head recvmsg_queue; |
| 142 | |
| 143 | spinlock_t empty_recvmsg_queue_lock; |
| 144 | struct list_head empty_recvmsg_queue; |
| 145 | |
| 146 | int send_credit_target; |
| 147 | atomic_t send_credits; |
| 148 | spinlock_t lock_new_recv_credits; |
| 149 | int new_recv_credits; |
| 150 | atomic_t rw_avail_ops; |
| 151 | |
| 152 | wait_queue_head_t wait_send_credits; |
| 153 | wait_queue_head_t wait_rw_avail_ops; |
| 154 | |
| 155 | mempool_t *sendmsg_mempool; |
| 156 | struct kmem_cache *sendmsg_cache; |
| 157 | mempool_t *recvmsg_mempool; |
| 158 | struct kmem_cache *recvmsg_cache; |
| 159 | |
| 160 | wait_queue_head_t wait_send_payload_pending; |
| 161 | atomic_t send_payload_pending; |
| 162 | wait_queue_head_t wait_send_pending; |
| 163 | atomic_t send_pending; |
| 164 | |
| 165 | struct delayed_work post_recv_credits_work; |
| 166 | struct work_struct send_immediate_work; |
| 167 | struct work_struct disconnect_work; |
| 168 | |
| 169 | bool negotiation_requested; |
| 170 | }; |
| 171 | |
| 172 | #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 173 | |
| 174 | enum { |
| 175 | SMB_DIRECT_MSG_NEGOTIATE_REQ = 0, |
| 176 | SMB_DIRECT_MSG_DATA_TRANSFER |
| 177 | }; |
| 178 | |
| 179 | static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops; |
| 180 | |
| 181 | struct smb_direct_send_ctx { |
| 182 | struct list_head msg_list; |
| 183 | int wr_cnt; |
| 184 | bool need_invalidate_rkey; |
| 185 | unsigned int remote_key; |
| 186 | }; |
| 187 | |
| 188 | struct smb_direct_sendmsg { |
| 189 | struct smb_direct_transport *transport; |
| 190 | struct ib_send_wr wr; |
| 191 | struct list_head list; |
| 192 | int num_sge; |
| 193 | struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES]; |
| 194 | struct ib_cqe cqe; |
| 195 | u8 packet[]; |
| 196 | }; |
| 197 | |
| 198 | struct smb_direct_recvmsg { |
| 199 | struct smb_direct_transport *transport; |
| 200 | struct list_head list; |
| 201 | int type; |
| 202 | struct ib_sge sge; |
| 203 | struct ib_cqe cqe; |
| 204 | bool first_segment; |
| 205 | u8 packet[]; |
| 206 | }; |
| 207 | |
| 208 | struct smb_direct_rdma_rw_msg { |
| 209 | struct smb_direct_transport *t; |
| 210 | struct ib_cqe cqe; |
| 211 | struct completion *completion; |
| 212 | struct rdma_rw_ctx rw_ctx; |
| 213 | struct sg_table sgt; |
| 214 | struct scatterlist sg_list[0]; |
| 215 | }; |
| 216 | |
Hyunchul Lee | 8ad8dc3 | 2021-06-25 07:02:07 +0900 | [diff] [blame] | 217 | static inline int get_buf_page_count(void *buf, int size) |
| 218 | { |
| 219 | return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) - |
| 220 | (uintptr_t)buf / PAGE_SIZE; |
| 221 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 222 | |
| 223 | static void smb_direct_destroy_pools(struct smb_direct_transport *transport); |
| 224 | static void smb_direct_post_recv_credits(struct work_struct *work); |
| 225 | static int smb_direct_post_send_data(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 226 | struct smb_direct_send_ctx *send_ctx, |
| 227 | struct kvec *iov, int niov, |
| 228 | int remaining_data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 229 | |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 230 | static inline struct smb_direct_transport * |
| 231 | smb_trans_direct_transfort(struct ksmbd_transport *t) |
| 232 | { |
| 233 | return container_of(t, struct smb_direct_transport, transport); |
| 234 | } |
| 235 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 236 | static inline void |
| 237 | *smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg) |
| 238 | { |
| 239 | return (void *)recvmsg->packet; |
| 240 | } |
| 241 | |
| 242 | static inline bool is_receive_credit_post_required(int receive_credits, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 243 | int avail_recvmsg_count) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 244 | { |
| 245 | return receive_credits <= (smb_direct_receive_credit_max >> 3) && |
| 246 | avail_recvmsg_count >= (receive_credits >> 2); |
| 247 | } |
| 248 | |
| 249 | static struct |
| 250 | smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) |
| 251 | { |
| 252 | struct smb_direct_recvmsg *recvmsg = NULL; |
| 253 | |
| 254 | spin_lock(&t->recvmsg_queue_lock); |
| 255 | if (!list_empty(&t->recvmsg_queue)) { |
| 256 | recvmsg = list_first_entry(&t->recvmsg_queue, |
| 257 | struct smb_direct_recvmsg, |
| 258 | list); |
| 259 | list_del(&recvmsg->list); |
| 260 | } |
| 261 | spin_unlock(&t->recvmsg_queue_lock); |
| 262 | return recvmsg; |
| 263 | } |
| 264 | |
| 265 | static void put_recvmsg(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 266 | struct smb_direct_recvmsg *recvmsg) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 267 | { |
| 268 | ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 269 | recvmsg->sge.length, DMA_FROM_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 270 | |
| 271 | spin_lock(&t->recvmsg_queue_lock); |
| 272 | list_add(&recvmsg->list, &t->recvmsg_queue); |
| 273 | spin_unlock(&t->recvmsg_queue_lock); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | static struct |
| 277 | smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) |
| 278 | { |
| 279 | struct smb_direct_recvmsg *recvmsg = NULL; |
| 280 | |
| 281 | spin_lock(&t->empty_recvmsg_queue_lock); |
| 282 | if (!list_empty(&t->empty_recvmsg_queue)) { |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 283 | recvmsg = list_first_entry(&t->empty_recvmsg_queue, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 284 | struct smb_direct_recvmsg, list); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 285 | list_del(&recvmsg->list); |
| 286 | } |
| 287 | spin_unlock(&t->empty_recvmsg_queue_lock); |
| 288 | return recvmsg; |
| 289 | } |
| 290 | |
| 291 | static void put_empty_recvmsg(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 292 | struct smb_direct_recvmsg *recvmsg) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 293 | { |
| 294 | ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 295 | recvmsg->sge.length, DMA_FROM_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 296 | |
| 297 | spin_lock(&t->empty_recvmsg_queue_lock); |
| 298 | list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); |
| 299 | spin_unlock(&t->empty_recvmsg_queue_lock); |
| 300 | } |
| 301 | |
| 302 | static void enqueue_reassembly(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 303 | struct smb_direct_recvmsg *recvmsg, |
| 304 | int data_length) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 305 | { |
| 306 | spin_lock(&t->reassembly_queue_lock); |
| 307 | list_add_tail(&recvmsg->list, &t->reassembly_queue); |
| 308 | t->reassembly_queue_length++; |
| 309 | /* |
| 310 | * Make sure reassembly_data_length is updated after list and |
| 311 | * reassembly_queue_length are updated. On the dequeue side |
| 312 | * reassembly_data_length is checked without a lock to determine |
| 313 | * if reassembly_queue_length and list is up to date |
| 314 | */ |
| 315 | virt_wmb(); |
| 316 | t->reassembly_data_length += data_length; |
| 317 | spin_unlock(&t->reassembly_queue_lock); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 318 | } |
| 319 | |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 320 | static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 321 | { |
| 322 | if (!list_empty(&t->reassembly_queue)) |
| 323 | return list_first_entry(&t->reassembly_queue, |
| 324 | struct smb_direct_recvmsg, list); |
| 325 | else |
| 326 | return NULL; |
| 327 | } |
| 328 | |
| 329 | static void smb_direct_disconnect_rdma_work(struct work_struct *work) |
| 330 | { |
| 331 | struct smb_direct_transport *t = |
| 332 | container_of(work, struct smb_direct_transport, |
| 333 | disconnect_work); |
| 334 | |
| 335 | if (t->status == SMB_DIRECT_CS_CONNECTED) { |
| 336 | t->status = SMB_DIRECT_CS_DISCONNECTING; |
| 337 | rdma_disconnect(t->cm_id); |
| 338 | } |
| 339 | } |
| 340 | |
| 341 | static void |
| 342 | smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) |
| 343 | { |
Hyunchul Lee | 323b1ea | 2021-08-12 10:23:08 +0900 | [diff] [blame] | 344 | if (t->status == SMB_DIRECT_CS_CONNECTED) |
| 345 | queue_work(smb_direct_wq, &t->disconnect_work); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | static void smb_direct_send_immediate_work(struct work_struct *work) |
| 349 | { |
| 350 | struct smb_direct_transport *t = container_of(work, |
| 351 | struct smb_direct_transport, send_immediate_work); |
| 352 | |
| 353 | if (t->status != SMB_DIRECT_CS_CONNECTED) |
| 354 | return; |
| 355 | |
| 356 | smb_direct_post_send_data(t, NULL, NULL, 0, 0); |
| 357 | } |
| 358 | |
| 359 | static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) |
| 360 | { |
| 361 | struct smb_direct_transport *t; |
| 362 | struct ksmbd_conn *conn; |
| 363 | |
| 364 | t = kzalloc(sizeof(*t), GFP_KERNEL); |
| 365 | if (!t) |
| 366 | return NULL; |
| 367 | |
| 368 | t->cm_id = cm_id; |
| 369 | cm_id->context = t; |
| 370 | |
| 371 | t->status = SMB_DIRECT_CS_NEW; |
| 372 | init_waitqueue_head(&t->wait_status); |
| 373 | |
| 374 | spin_lock_init(&t->reassembly_queue_lock); |
| 375 | INIT_LIST_HEAD(&t->reassembly_queue); |
| 376 | t->reassembly_data_length = 0; |
| 377 | t->reassembly_queue_length = 0; |
| 378 | init_waitqueue_head(&t->wait_reassembly_queue); |
| 379 | init_waitqueue_head(&t->wait_send_credits); |
| 380 | init_waitqueue_head(&t->wait_rw_avail_ops); |
| 381 | |
| 382 | spin_lock_init(&t->receive_credit_lock); |
| 383 | spin_lock_init(&t->recvmsg_queue_lock); |
| 384 | INIT_LIST_HEAD(&t->recvmsg_queue); |
| 385 | |
| 386 | spin_lock_init(&t->empty_recvmsg_queue_lock); |
| 387 | INIT_LIST_HEAD(&t->empty_recvmsg_queue); |
| 388 | |
| 389 | init_waitqueue_head(&t->wait_send_payload_pending); |
| 390 | atomic_set(&t->send_payload_pending, 0); |
| 391 | init_waitqueue_head(&t->wait_send_pending); |
| 392 | atomic_set(&t->send_pending, 0); |
| 393 | |
| 394 | spin_lock_init(&t->lock_new_recv_credits); |
| 395 | |
| 396 | INIT_DELAYED_WORK(&t->post_recv_credits_work, |
| 397 | smb_direct_post_recv_credits); |
| 398 | INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); |
| 399 | INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); |
| 400 | |
| 401 | conn = ksmbd_conn_alloc(); |
| 402 | if (!conn) |
| 403 | goto err; |
| 404 | conn->transport = KSMBD_TRANS(t); |
| 405 | KSMBD_TRANS(t)->conn = conn; |
| 406 | KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; |
| 407 | return t; |
| 408 | err: |
| 409 | kfree(t); |
| 410 | return NULL; |
| 411 | } |
| 412 | |
| 413 | static void free_transport(struct smb_direct_transport *t) |
| 414 | { |
| 415 | struct smb_direct_recvmsg *recvmsg; |
| 416 | |
| 417 | wake_up_interruptible(&t->wait_send_credits); |
| 418 | |
| 419 | ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n"); |
| 420 | wait_event(t->wait_send_payload_pending, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 421 | atomic_read(&t->send_payload_pending) == 0); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 422 | wait_event(t->wait_send_pending, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 423 | atomic_read(&t->send_pending) == 0); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 424 | |
| 425 | cancel_work_sync(&t->disconnect_work); |
| 426 | cancel_delayed_work_sync(&t->post_recv_credits_work); |
| 427 | cancel_work_sync(&t->send_immediate_work); |
| 428 | |
| 429 | if (t->qp) { |
| 430 | ib_drain_qp(t->qp); |
| 431 | ib_destroy_qp(t->qp); |
| 432 | } |
| 433 | |
| 434 | ksmbd_debug(RDMA, "drain the reassembly queue\n"); |
| 435 | do { |
| 436 | spin_lock(&t->reassembly_queue_lock); |
| 437 | recvmsg = get_first_reassembly(t); |
| 438 | if (recvmsg) { |
| 439 | list_del(&recvmsg->list); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 440 | spin_unlock(&t->reassembly_queue_lock); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 441 | put_recvmsg(t, recvmsg); |
| 442 | } else { |
| 443 | spin_unlock(&t->reassembly_queue_lock); |
| 444 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 445 | } while (recvmsg); |
| 446 | t->reassembly_data_length = 0; |
| 447 | |
| 448 | if (t->send_cq) |
| 449 | ib_free_cq(t->send_cq); |
| 450 | if (t->recv_cq) |
| 451 | ib_free_cq(t->recv_cq); |
| 452 | if (t->pd) |
| 453 | ib_dealloc_pd(t->pd); |
| 454 | if (t->cm_id) |
| 455 | rdma_destroy_id(t->cm_id); |
| 456 | |
| 457 | smb_direct_destroy_pools(t); |
| 458 | ksmbd_conn_free(KSMBD_TRANS(t)->conn); |
| 459 | kfree(t); |
| 460 | } |
| 461 | |
| 462 | static struct smb_direct_sendmsg |
| 463 | *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) |
| 464 | { |
| 465 | struct smb_direct_sendmsg *msg; |
| 466 | |
| 467 | msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL); |
| 468 | if (!msg) |
| 469 | return ERR_PTR(-ENOMEM); |
| 470 | msg->transport = t; |
| 471 | INIT_LIST_HEAD(&msg->list); |
| 472 | msg->num_sge = 0; |
| 473 | return msg; |
| 474 | } |
| 475 | |
| 476 | static void smb_direct_free_sendmsg(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 477 | struct smb_direct_sendmsg *msg) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 478 | { |
| 479 | int i; |
| 480 | |
| 481 | if (msg->num_sge > 0) { |
| 482 | ib_dma_unmap_single(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 483 | msg->sge[0].addr, msg->sge[0].length, |
| 484 | DMA_TO_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 485 | for (i = 1; i < msg->num_sge; i++) |
| 486 | ib_dma_unmap_page(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 487 | msg->sge[i].addr, msg->sge[i].length, |
| 488 | DMA_TO_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 489 | } |
| 490 | mempool_free(msg, t->sendmsg_mempool); |
| 491 | } |
| 492 | |
| 493 | static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg) |
| 494 | { |
| 495 | switch (recvmsg->type) { |
| 496 | case SMB_DIRECT_MSG_DATA_TRANSFER: { |
| 497 | struct smb_direct_data_transfer *req = |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 498 | (struct smb_direct_data_transfer *)recvmsg->packet; |
| 499 | struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet |
Namjae Jeon | cb45172 | 2021-11-03 08:08:44 +0900 | [diff] [blame] | 500 | + le32_to_cpu(req->data_offset)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 501 | ksmbd_debug(RDMA, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 502 | "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n", |
| 503 | le16_to_cpu(req->credits_granted), |
| 504 | le16_to_cpu(req->credits_requested), |
| 505 | req->data_length, req->remaining_data_length, |
| 506 | hdr->ProtocolId, hdr->Command); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 507 | break; |
| 508 | } |
| 509 | case SMB_DIRECT_MSG_NEGOTIATE_REQ: { |
| 510 | struct smb_direct_negotiate_req *req = |
| 511 | (struct smb_direct_negotiate_req *)recvmsg->packet; |
| 512 | ksmbd_debug(RDMA, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 513 | "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n", |
| 514 | le16_to_cpu(req->min_version), |
| 515 | le16_to_cpu(req->max_version), |
| 516 | le16_to_cpu(req->credits_requested), |
| 517 | le32_to_cpu(req->preferred_send_size), |
| 518 | le32_to_cpu(req->max_receive_size), |
| 519 | le32_to_cpu(req->max_fragmented_size)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 520 | if (le16_to_cpu(req->min_version) > 0x0100 || |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 521 | le16_to_cpu(req->max_version) < 0x0100) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 522 | return -EOPNOTSUPP; |
| 523 | if (le16_to_cpu(req->credits_requested) <= 0 || |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 524 | le32_to_cpu(req->max_receive_size) <= 128 || |
| 525 | le32_to_cpu(req->max_fragmented_size) <= |
| 526 | 128 * 1024) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 527 | return -ECONNABORTED; |
| 528 | |
| 529 | break; |
| 530 | } |
| 531 | default: |
| 532 | return -EINVAL; |
| 533 | } |
| 534 | return 0; |
| 535 | } |
| 536 | |
| 537 | static void recv_done(struct ib_cq *cq, struct ib_wc *wc) |
| 538 | { |
| 539 | struct smb_direct_recvmsg *recvmsg; |
| 540 | struct smb_direct_transport *t; |
| 541 | |
| 542 | recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe); |
| 543 | t = recvmsg->transport; |
| 544 | |
| 545 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { |
| 546 | if (wc->status != IB_WC_WR_FLUSH_ERR) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 547 | pr_err("Recv error. status='%s (%d)' opcode=%d\n", |
| 548 | ib_wc_status_msg(wc->status), wc->status, |
| 549 | wc->opcode); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 550 | smb_direct_disconnect_rdma_connection(t); |
| 551 | } |
| 552 | put_empty_recvmsg(t, recvmsg); |
| 553 | return; |
| 554 | } |
| 555 | |
| 556 | ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 557 | ib_wc_status_msg(wc->status), wc->status, |
| 558 | wc->opcode); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 559 | |
| 560 | ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 561 | recvmsg->sge.length, DMA_FROM_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 562 | |
| 563 | switch (recvmsg->type) { |
| 564 | case SMB_DIRECT_MSG_NEGOTIATE_REQ: |
Hyunchul Lee | 2ea086e | 2021-10-15 06:02:50 +0900 | [diff] [blame] | 565 | if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) { |
| 566 | put_empty_recvmsg(t, recvmsg); |
| 567 | return; |
| 568 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 569 | t->negotiation_requested = true; |
| 570 | t->full_packet_received = true; |
| 571 | wake_up_interruptible(&t->wait_status); |
| 572 | break; |
| 573 | case SMB_DIRECT_MSG_DATA_TRANSFER: { |
| 574 | struct smb_direct_data_transfer *data_transfer = |
| 575 | (struct smb_direct_data_transfer *)recvmsg->packet; |
Hyunchul Lee | 2ea086e | 2021-10-15 06:02:50 +0900 | [diff] [blame] | 576 | unsigned int data_length; |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 577 | int avail_recvmsg_count, receive_credits; |
| 578 | |
Hyunchul Lee | 2ea086e | 2021-10-15 06:02:50 +0900 | [diff] [blame] | 579 | if (wc->byte_len < |
| 580 | offsetof(struct smb_direct_data_transfer, padding)) { |
| 581 | put_empty_recvmsg(t, recvmsg); |
| 582 | return; |
| 583 | } |
| 584 | |
| 585 | data_length = le32_to_cpu(data_transfer->data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 586 | if (data_length) { |
Hyunchul Lee | 2ea086e | 2021-10-15 06:02:50 +0900 | [diff] [blame] | 587 | if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + |
| 588 | (u64)data_length) { |
| 589 | put_empty_recvmsg(t, recvmsg); |
| 590 | return; |
| 591 | } |
| 592 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 593 | if (t->full_packet_received) |
| 594 | recvmsg->first_segment = true; |
| 595 | |
| 596 | if (le32_to_cpu(data_transfer->remaining_data_length)) |
| 597 | t->full_packet_received = false; |
| 598 | else |
| 599 | t->full_packet_received = true; |
| 600 | |
Hyunchul Lee | 2ea086e | 2021-10-15 06:02:50 +0900 | [diff] [blame] | 601 | enqueue_reassembly(t, recvmsg, (int)data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 602 | wake_up_interruptible(&t->wait_reassembly_queue); |
| 603 | |
| 604 | spin_lock(&t->receive_credit_lock); |
| 605 | receive_credits = --(t->recv_credits); |
| 606 | avail_recvmsg_count = t->count_avail_recvmsg; |
| 607 | spin_unlock(&t->receive_credit_lock); |
| 608 | } else { |
| 609 | put_empty_recvmsg(t, recvmsg); |
| 610 | |
| 611 | spin_lock(&t->receive_credit_lock); |
| 612 | receive_credits = --(t->recv_credits); |
| 613 | avail_recvmsg_count = ++(t->count_avail_recvmsg); |
| 614 | spin_unlock(&t->receive_credit_lock); |
| 615 | } |
| 616 | |
| 617 | t->recv_credit_target = |
| 618 | le16_to_cpu(data_transfer->credits_requested); |
| 619 | atomic_add(le16_to_cpu(data_transfer->credits_granted), |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 620 | &t->send_credits); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 621 | |
| 622 | if (le16_to_cpu(data_transfer->flags) & |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 623 | SMB_DIRECT_RESPONSE_REQUESTED) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 624 | queue_work(smb_direct_wq, &t->send_immediate_work); |
| 625 | |
| 626 | if (atomic_read(&t->send_credits) > 0) |
| 627 | wake_up_interruptible(&t->wait_send_credits); |
| 628 | |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 629 | if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count)) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 630 | mod_delayed_work(smb_direct_wq, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 631 | &t->post_recv_credits_work, 0); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 632 | break; |
| 633 | } |
| 634 | default: |
| 635 | break; |
| 636 | } |
| 637 | } |
| 638 | |
| 639 | static int smb_direct_post_recv(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 640 | struct smb_direct_recvmsg *recvmsg) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 641 | { |
| 642 | struct ib_recv_wr wr; |
| 643 | int ret; |
| 644 | |
| 645 | recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 646 | recvmsg->packet, t->max_recv_size, |
| 647 | DMA_FROM_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 648 | ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); |
| 649 | if (ret) |
| 650 | return ret; |
| 651 | recvmsg->sge.length = t->max_recv_size; |
| 652 | recvmsg->sge.lkey = t->pd->local_dma_lkey; |
| 653 | recvmsg->cqe.done = recv_done; |
| 654 | |
| 655 | wr.wr_cqe = &recvmsg->cqe; |
| 656 | wr.next = NULL; |
| 657 | wr.sg_list = &recvmsg->sge; |
| 658 | wr.num_sge = 1; |
| 659 | |
| 660 | ret = ib_post_recv(t->qp, &wr, NULL); |
| 661 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 662 | pr_err("Can't post recv: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 663 | ib_dma_unmap_single(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 664 | recvmsg->sge.addr, recvmsg->sge.length, |
| 665 | DMA_FROM_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 666 | smb_direct_disconnect_rdma_connection(t); |
| 667 | return ret; |
| 668 | } |
| 669 | return ret; |
| 670 | } |
| 671 | |
| 672 | static int smb_direct_read(struct ksmbd_transport *t, char *buf, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 673 | unsigned int size) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 674 | { |
| 675 | struct smb_direct_recvmsg *recvmsg; |
| 676 | struct smb_direct_data_transfer *data_transfer; |
| 677 | int to_copy, to_read, data_read, offset; |
| 678 | u32 data_length, remaining_data_length, data_offset; |
| 679 | int rc; |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 680 | struct smb_direct_transport *st = smb_trans_direct_transfort(t); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 681 | |
| 682 | again: |
| 683 | if (st->status != SMB_DIRECT_CS_CONNECTED) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 684 | pr_err("disconnected\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 685 | return -ENOTCONN; |
| 686 | } |
| 687 | |
| 688 | /* |
| 689 | * No need to hold the reassembly queue lock all the time as we are |
| 690 | * the only one reading from the front of the queue. The transport |
| 691 | * may add more entries to the back of the queue at the same time |
| 692 | */ |
| 693 | if (st->reassembly_data_length >= size) { |
| 694 | int queue_length; |
| 695 | int queue_removed = 0; |
| 696 | |
| 697 | /* |
| 698 | * Need to make sure reassembly_data_length is read before |
| 699 | * reading reassembly_queue_length and calling |
| 700 | * get_first_reassembly. This call is lock free |
| 701 | * as we never read at the end of the queue which are being |
| 702 | * updated in SOFTIRQ as more data is received |
| 703 | */ |
| 704 | virt_rmb(); |
| 705 | queue_length = st->reassembly_queue_length; |
| 706 | data_read = 0; |
| 707 | to_read = size; |
| 708 | offset = st->first_entry_offset; |
| 709 | while (data_read < size) { |
| 710 | recvmsg = get_first_reassembly(st); |
| 711 | data_transfer = smb_direct_recvmsg_payload(recvmsg); |
| 712 | data_length = le32_to_cpu(data_transfer->data_length); |
| 713 | remaining_data_length = |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 714 | le32_to_cpu(data_transfer->remaining_data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 715 | data_offset = le32_to_cpu(data_transfer->data_offset); |
| 716 | |
| 717 | /* |
| 718 | * The upper layer expects RFC1002 length at the |
| 719 | * beginning of the payload. Return it to indicate |
| 720 | * the total length of the packet. This minimize the |
| 721 | * change to upper layer packet processing logic. This |
| 722 | * will be eventually remove when an intermediate |
| 723 | * transport layer is added |
| 724 | */ |
| 725 | if (recvmsg->first_segment && size == 4) { |
| 726 | unsigned int rfc1002_len = |
| 727 | data_length + remaining_data_length; |
| 728 | *((__be32 *)buf) = cpu_to_be32(rfc1002_len); |
| 729 | data_read = 4; |
| 730 | recvmsg->first_segment = false; |
| 731 | ksmbd_debug(RDMA, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 732 | "returning rfc1002 length %d\n", |
| 733 | rfc1002_len); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 734 | goto read_rfc1002_done; |
| 735 | } |
| 736 | |
| 737 | to_copy = min_t(int, data_length - offset, to_read); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 738 | memcpy(buf + data_read, (char *)data_transfer + data_offset + offset, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 739 | to_copy); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 740 | |
| 741 | /* move on to the next buffer? */ |
| 742 | if (to_copy == data_length - offset) { |
| 743 | queue_length--; |
| 744 | /* |
| 745 | * No need to lock if we are not at the |
| 746 | * end of the queue |
| 747 | */ |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 748 | if (queue_length) { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 749 | list_del(&recvmsg->list); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 750 | } else { |
| 751 | spin_lock_irq(&st->reassembly_queue_lock); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 752 | list_del(&recvmsg->list); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 753 | spin_unlock_irq(&st->reassembly_queue_lock); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 754 | } |
| 755 | queue_removed++; |
| 756 | put_recvmsg(st, recvmsg); |
| 757 | offset = 0; |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 758 | } else { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 759 | offset += to_copy; |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 760 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 761 | |
| 762 | to_read -= to_copy; |
| 763 | data_read += to_copy; |
| 764 | } |
| 765 | |
| 766 | spin_lock_irq(&st->reassembly_queue_lock); |
| 767 | st->reassembly_data_length -= data_read; |
| 768 | st->reassembly_queue_length -= queue_removed; |
| 769 | spin_unlock_irq(&st->reassembly_queue_lock); |
| 770 | |
| 771 | spin_lock(&st->receive_credit_lock); |
| 772 | st->count_avail_recvmsg += queue_removed; |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 773 | if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 774 | spin_unlock(&st->receive_credit_lock); |
| 775 | mod_delayed_work(smb_direct_wq, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 776 | &st->post_recv_credits_work, 0); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 777 | } else { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 778 | spin_unlock(&st->receive_credit_lock); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 779 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 780 | |
| 781 | st->first_entry_offset = offset; |
| 782 | ksmbd_debug(RDMA, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 783 | "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n", |
| 784 | data_read, st->reassembly_data_length, |
| 785 | st->first_entry_offset); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 786 | read_rfc1002_done: |
| 787 | return data_read; |
| 788 | } |
| 789 | |
| 790 | ksmbd_debug(RDMA, "wait_event on more data\n"); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 791 | rc = wait_event_interruptible(st->wait_reassembly_queue, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 792 | st->reassembly_data_length >= size || |
| 793 | st->status != SMB_DIRECT_CS_CONNECTED); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 794 | if (rc) |
| 795 | return -EINTR; |
| 796 | |
| 797 | goto again; |
| 798 | } |
| 799 | |
| 800 | static void smb_direct_post_recv_credits(struct work_struct *work) |
| 801 | { |
| 802 | struct smb_direct_transport *t = container_of(work, |
| 803 | struct smb_direct_transport, post_recv_credits_work.work); |
| 804 | struct smb_direct_recvmsg *recvmsg; |
| 805 | int receive_credits, credits = 0; |
| 806 | int ret; |
| 807 | int use_free = 1; |
| 808 | |
| 809 | spin_lock(&t->receive_credit_lock); |
| 810 | receive_credits = t->recv_credits; |
| 811 | spin_unlock(&t->receive_credit_lock); |
| 812 | |
| 813 | if (receive_credits < t->recv_credit_target) { |
| 814 | while (true) { |
| 815 | if (use_free) |
| 816 | recvmsg = get_free_recvmsg(t); |
| 817 | else |
| 818 | recvmsg = get_empty_recvmsg(t); |
| 819 | if (!recvmsg) { |
| 820 | if (use_free) { |
| 821 | use_free = 0; |
| 822 | continue; |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 823 | } else { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 824 | break; |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 825 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 826 | } |
| 827 | |
| 828 | recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER; |
| 829 | recvmsg->first_segment = false; |
| 830 | |
| 831 | ret = smb_direct_post_recv(t, recvmsg); |
| 832 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 833 | pr_err("Can't post recv: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 834 | put_recvmsg(t, recvmsg); |
| 835 | break; |
| 836 | } |
| 837 | credits++; |
| 838 | } |
| 839 | } |
| 840 | |
| 841 | spin_lock(&t->receive_credit_lock); |
| 842 | t->recv_credits += credits; |
| 843 | t->count_avail_recvmsg -= credits; |
| 844 | spin_unlock(&t->receive_credit_lock); |
| 845 | |
| 846 | spin_lock(&t->lock_new_recv_credits); |
| 847 | t->new_recv_credits += credits; |
| 848 | spin_unlock(&t->lock_new_recv_credits); |
| 849 | |
| 850 | if (credits) |
| 851 | queue_work(smb_direct_wq, &t->send_immediate_work); |
| 852 | } |
| 853 | |
| 854 | static void send_done(struct ib_cq *cq, struct ib_wc *wc) |
| 855 | { |
| 856 | struct smb_direct_sendmsg *sendmsg, *sibling; |
| 857 | struct smb_direct_transport *t; |
| 858 | struct list_head *pos, *prev, *end; |
| 859 | |
| 860 | sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe); |
| 861 | t = sendmsg->transport; |
| 862 | |
| 863 | ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 864 | ib_wc_status_msg(wc->status), wc->status, |
| 865 | wc->opcode); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 866 | |
| 867 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 868 | pr_err("Send error. status='%s (%d)', opcode=%d\n", |
| 869 | ib_wc_status_msg(wc->status), wc->status, |
| 870 | wc->opcode); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 871 | smb_direct_disconnect_rdma_connection(t); |
| 872 | } |
| 873 | |
| 874 | if (sendmsg->num_sge > 1) { |
| 875 | if (atomic_dec_and_test(&t->send_payload_pending)) |
| 876 | wake_up(&t->wait_send_payload_pending); |
| 877 | } else { |
| 878 | if (atomic_dec_and_test(&t->send_pending)) |
| 879 | wake_up(&t->wait_send_pending); |
| 880 | } |
| 881 | |
| 882 | /* iterate and free the list of messages in reverse. the list's head |
| 883 | * is invalid. |
| 884 | */ |
| 885 | for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next; |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 886 | prev != end; pos = prev, prev = prev->prev) { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 887 | sibling = container_of(pos, struct smb_direct_sendmsg, list); |
| 888 | smb_direct_free_sendmsg(t, sibling); |
| 889 | } |
| 890 | |
| 891 | sibling = container_of(pos, struct smb_direct_sendmsg, list); |
| 892 | smb_direct_free_sendmsg(t, sibling); |
| 893 | } |
| 894 | |
| 895 | static int manage_credits_prior_sending(struct smb_direct_transport *t) |
| 896 | { |
| 897 | int new_credits; |
| 898 | |
| 899 | spin_lock(&t->lock_new_recv_credits); |
| 900 | new_credits = t->new_recv_credits; |
| 901 | t->new_recv_credits = 0; |
| 902 | spin_unlock(&t->lock_new_recv_credits); |
| 903 | |
| 904 | return new_credits; |
| 905 | } |
| 906 | |
| 907 | static int smb_direct_post_send(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 908 | struct ib_send_wr *wr) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 909 | { |
| 910 | int ret; |
| 911 | |
| 912 | if (wr->num_sge > 1) |
| 913 | atomic_inc(&t->send_payload_pending); |
| 914 | else |
| 915 | atomic_inc(&t->send_pending); |
| 916 | |
| 917 | ret = ib_post_send(t->qp, wr, NULL); |
| 918 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 919 | pr_err("failed to post send: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 920 | if (wr->num_sge > 1) { |
| 921 | if (atomic_dec_and_test(&t->send_payload_pending)) |
| 922 | wake_up(&t->wait_send_payload_pending); |
| 923 | } else { |
| 924 | if (atomic_dec_and_test(&t->send_pending)) |
| 925 | wake_up(&t->wait_send_pending); |
| 926 | } |
| 927 | smb_direct_disconnect_rdma_connection(t); |
| 928 | } |
| 929 | return ret; |
| 930 | } |
| 931 | |
| 932 | static void smb_direct_send_ctx_init(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 933 | struct smb_direct_send_ctx *send_ctx, |
| 934 | bool need_invalidate_rkey, |
| 935 | unsigned int remote_key) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 936 | { |
| 937 | INIT_LIST_HEAD(&send_ctx->msg_list); |
| 938 | send_ctx->wr_cnt = 0; |
| 939 | send_ctx->need_invalidate_rkey = need_invalidate_rkey; |
| 940 | send_ctx->remote_key = remote_key; |
| 941 | } |
| 942 | |
| 943 | static int smb_direct_flush_send_list(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 944 | struct smb_direct_send_ctx *send_ctx, |
| 945 | bool is_last) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 946 | { |
| 947 | struct smb_direct_sendmsg *first, *last; |
| 948 | int ret; |
| 949 | |
| 950 | if (list_empty(&send_ctx->msg_list)) |
| 951 | return 0; |
| 952 | |
| 953 | first = list_first_entry(&send_ctx->msg_list, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 954 | struct smb_direct_sendmsg, |
| 955 | list); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 956 | last = list_last_entry(&send_ctx->msg_list, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 957 | struct smb_direct_sendmsg, |
| 958 | list); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 959 | |
| 960 | last->wr.send_flags = IB_SEND_SIGNALED; |
| 961 | last->wr.wr_cqe = &last->cqe; |
| 962 | if (is_last && send_ctx->need_invalidate_rkey) { |
| 963 | last->wr.opcode = IB_WR_SEND_WITH_INV; |
| 964 | last->wr.ex.invalidate_rkey = send_ctx->remote_key; |
| 965 | } |
| 966 | |
| 967 | ret = smb_direct_post_send(t, &first->wr); |
| 968 | if (!ret) { |
| 969 | smb_direct_send_ctx_init(t, send_ctx, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 970 | send_ctx->need_invalidate_rkey, |
| 971 | send_ctx->remote_key); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 972 | } else { |
| 973 | atomic_add(send_ctx->wr_cnt, &t->send_credits); |
| 974 | wake_up(&t->wait_send_credits); |
| 975 | list_for_each_entry_safe(first, last, &send_ctx->msg_list, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 976 | list) { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 977 | smb_direct_free_sendmsg(t, first); |
| 978 | } |
| 979 | } |
| 980 | return ret; |
| 981 | } |
| 982 | |
| 983 | static int wait_for_credits(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 984 | wait_queue_head_t *waitq, atomic_t *credits) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 985 | { |
| 986 | int ret; |
| 987 | |
| 988 | do { |
| 989 | if (atomic_dec_return(credits) >= 0) |
| 990 | return 0; |
| 991 | |
| 992 | atomic_inc(credits); |
| 993 | ret = wait_event_interruptible(*waitq, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 994 | atomic_read(credits) > 0 || |
| 995 | t->status != SMB_DIRECT_CS_CONNECTED); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 996 | |
| 997 | if (t->status != SMB_DIRECT_CS_CONNECTED) |
| 998 | return -ENOTCONN; |
| 999 | else if (ret < 0) |
| 1000 | return ret; |
| 1001 | } while (true); |
| 1002 | } |
| 1003 | |
| 1004 | static int wait_for_send_credits(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1005 | struct smb_direct_send_ctx *send_ctx) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1006 | { |
| 1007 | int ret; |
| 1008 | |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1009 | if (send_ctx && |
| 1010 | (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1011 | ret = smb_direct_flush_send_list(t, send_ctx, false); |
| 1012 | if (ret) |
| 1013 | return ret; |
| 1014 | } |
| 1015 | |
| 1016 | return wait_for_credits(t, &t->wait_send_credits, &t->send_credits); |
| 1017 | } |
| 1018 | |
| 1019 | static int smb_direct_create_header(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1020 | int size, int remaining_data_length, |
| 1021 | struct smb_direct_sendmsg **sendmsg_out) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1022 | { |
| 1023 | struct smb_direct_sendmsg *sendmsg; |
| 1024 | struct smb_direct_data_transfer *packet; |
| 1025 | int header_length; |
| 1026 | int ret; |
| 1027 | |
| 1028 | sendmsg = smb_direct_alloc_sendmsg(t); |
Dan Carpenter | 8ef3296 | 2021-03-18 16:09:37 +0300 | [diff] [blame] | 1029 | if (IS_ERR(sendmsg)) |
| 1030 | return PTR_ERR(sendmsg); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1031 | |
| 1032 | /* Fill in the packet header */ |
| 1033 | packet = (struct smb_direct_data_transfer *)sendmsg->packet; |
| 1034 | packet->credits_requested = cpu_to_le16(t->send_credit_target); |
| 1035 | packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); |
| 1036 | |
| 1037 | packet->flags = 0; |
| 1038 | packet->reserved = 0; |
| 1039 | if (!size) |
| 1040 | packet->data_offset = 0; |
| 1041 | else |
| 1042 | packet->data_offset = cpu_to_le32(24); |
| 1043 | packet->data_length = cpu_to_le32(size); |
| 1044 | packet->remaining_data_length = cpu_to_le32(remaining_data_length); |
| 1045 | packet->padding = 0; |
| 1046 | |
| 1047 | ksmbd_debug(RDMA, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1048 | "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n", |
| 1049 | le16_to_cpu(packet->credits_requested), |
| 1050 | le16_to_cpu(packet->credits_granted), |
| 1051 | le32_to_cpu(packet->data_offset), |
| 1052 | le32_to_cpu(packet->data_length), |
| 1053 | le32_to_cpu(packet->remaining_data_length)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1054 | |
| 1055 | /* Map the packet to DMA */ |
| 1056 | header_length = sizeof(struct smb_direct_data_transfer); |
| 1057 | /* If this is a packet without payload, don't send padding */ |
| 1058 | if (!size) |
| 1059 | header_length = |
| 1060 | offsetof(struct smb_direct_data_transfer, padding); |
| 1061 | |
| 1062 | sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, |
| 1063 | (void *)packet, |
| 1064 | header_length, |
| 1065 | DMA_TO_DEVICE); |
| 1066 | ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); |
| 1067 | if (ret) { |
| 1068 | smb_direct_free_sendmsg(t, sendmsg); |
| 1069 | return ret; |
| 1070 | } |
| 1071 | |
| 1072 | sendmsg->num_sge = 1; |
| 1073 | sendmsg->sge[0].length = header_length; |
| 1074 | sendmsg->sge[0].lkey = t->pd->local_dma_lkey; |
| 1075 | |
| 1076 | *sendmsg_out = sendmsg; |
| 1077 | return 0; |
| 1078 | } |
| 1079 | |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1080 | static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1081 | { |
| 1082 | bool high = is_vmalloc_addr(buf); |
| 1083 | struct page *page; |
| 1084 | int offset, len; |
| 1085 | int i = 0; |
| 1086 | |
Hyunchul Lee | 8ad8dc3 | 2021-06-25 07:02:07 +0900 | [diff] [blame] | 1087 | if (nentries < get_buf_page_count(buf, size)) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1088 | return -EINVAL; |
| 1089 | |
| 1090 | offset = offset_in_page(buf); |
| 1091 | buf -= offset; |
| 1092 | while (size > 0) { |
| 1093 | len = min_t(int, PAGE_SIZE - offset, size); |
| 1094 | if (high) |
| 1095 | page = vmalloc_to_page(buf); |
| 1096 | else |
| 1097 | page = kmap_to_page(buf); |
| 1098 | |
| 1099 | if (!sg_list) |
| 1100 | return -EINVAL; |
| 1101 | sg_set_page(sg_list, page, len, offset); |
| 1102 | sg_list = sg_next(sg_list); |
| 1103 | |
| 1104 | buf += PAGE_SIZE; |
| 1105 | size -= len; |
| 1106 | offset = 0; |
| 1107 | i++; |
| 1108 | } |
| 1109 | return i; |
| 1110 | } |
| 1111 | |
| 1112 | static int get_mapped_sg_list(struct ib_device *device, void *buf, int size, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1113 | struct scatterlist *sg_list, int nentries, |
| 1114 | enum dma_data_direction dir) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1115 | { |
| 1116 | int npages; |
| 1117 | |
| 1118 | npages = get_sg_list(buf, size, sg_list, nentries); |
| 1119 | if (npages <= 0) |
| 1120 | return -EINVAL; |
| 1121 | return ib_dma_map_sg(device, sg_list, npages, dir); |
| 1122 | } |
| 1123 | |
| 1124 | static int post_sendmsg(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1125 | struct smb_direct_send_ctx *send_ctx, |
| 1126 | struct smb_direct_sendmsg *msg) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1127 | { |
| 1128 | int i; |
| 1129 | |
| 1130 | for (i = 0; i < msg->num_sge; i++) |
| 1131 | ib_dma_sync_single_for_device(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1132 | msg->sge[i].addr, msg->sge[i].length, |
| 1133 | DMA_TO_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1134 | |
| 1135 | msg->cqe.done = send_done; |
| 1136 | msg->wr.opcode = IB_WR_SEND; |
| 1137 | msg->wr.sg_list = &msg->sge[0]; |
| 1138 | msg->wr.num_sge = msg->num_sge; |
| 1139 | msg->wr.next = NULL; |
| 1140 | |
| 1141 | if (send_ctx) { |
| 1142 | msg->wr.wr_cqe = NULL; |
| 1143 | msg->wr.send_flags = 0; |
| 1144 | if (!list_empty(&send_ctx->msg_list)) { |
| 1145 | struct smb_direct_sendmsg *last; |
| 1146 | |
| 1147 | last = list_last_entry(&send_ctx->msg_list, |
| 1148 | struct smb_direct_sendmsg, |
| 1149 | list); |
| 1150 | last->wr.next = &msg->wr; |
| 1151 | } |
| 1152 | list_add_tail(&msg->list, &send_ctx->msg_list); |
| 1153 | send_ctx->wr_cnt++; |
| 1154 | return 0; |
| 1155 | } |
| 1156 | |
| 1157 | msg->wr.wr_cqe = &msg->cqe; |
| 1158 | msg->wr.send_flags = IB_SEND_SIGNALED; |
| 1159 | return smb_direct_post_send(t, &msg->wr); |
| 1160 | } |
| 1161 | |
| 1162 | static int smb_direct_post_send_data(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1163 | struct smb_direct_send_ctx *send_ctx, |
| 1164 | struct kvec *iov, int niov, |
| 1165 | int remaining_data_length) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1166 | { |
| 1167 | int i, j, ret; |
| 1168 | struct smb_direct_sendmsg *msg; |
| 1169 | int data_length; |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1170 | struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1]; |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1171 | |
| 1172 | ret = wait_for_send_credits(t, send_ctx); |
| 1173 | if (ret) |
| 1174 | return ret; |
| 1175 | |
| 1176 | data_length = 0; |
| 1177 | for (i = 0; i < niov; i++) |
| 1178 | data_length += iov[i].iov_len; |
| 1179 | |
| 1180 | ret = smb_direct_create_header(t, data_length, remaining_data_length, |
| 1181 | &msg); |
| 1182 | if (ret) { |
| 1183 | atomic_inc(&t->send_credits); |
| 1184 | return ret; |
| 1185 | } |
| 1186 | |
| 1187 | for (i = 0; i < niov; i++) { |
| 1188 | struct ib_sge *sge; |
| 1189 | int sg_cnt; |
| 1190 | |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1191 | sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1192 | sg_cnt = get_mapped_sg_list(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1193 | iov[i].iov_base, iov[i].iov_len, |
| 1194 | sg, SMB_DIRECT_MAX_SEND_SGES - 1, |
| 1195 | DMA_TO_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1196 | if (sg_cnt <= 0) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1197 | pr_err("failed to map buffer\n"); |
Namjae Jeon | bc3fcc9 | 2021-03-19 13:51:15 +0900 | [diff] [blame] | 1198 | ret = -ENOMEM; |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1199 | goto err; |
Hyunchul Lee | 72d6cbb | 2021-08-30 13:27:43 +0900 | [diff] [blame] | 1200 | } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1201 | pr_err("buffer not fitted into sges\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1202 | ret = -E2BIG; |
| 1203 | ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, |
| 1204 | DMA_TO_DEVICE); |
| 1205 | goto err; |
| 1206 | } |
| 1207 | |
| 1208 | for (j = 0; j < sg_cnt; j++) { |
| 1209 | sge = &msg->sge[msg->num_sge]; |
| 1210 | sge->addr = sg_dma_address(&sg[j]); |
| 1211 | sge->length = sg_dma_len(&sg[j]); |
| 1212 | sge->lkey = t->pd->local_dma_lkey; |
| 1213 | msg->num_sge++; |
| 1214 | } |
| 1215 | } |
| 1216 | |
| 1217 | ret = post_sendmsg(t, send_ctx, msg); |
| 1218 | if (ret) |
| 1219 | goto err; |
| 1220 | return 0; |
| 1221 | err: |
| 1222 | smb_direct_free_sendmsg(t, msg); |
| 1223 | atomic_inc(&t->send_credits); |
| 1224 | return ret; |
| 1225 | } |
| 1226 | |
| 1227 | static int smb_direct_writev(struct ksmbd_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1228 | struct kvec *iov, int niovs, int buflen, |
| 1229 | bool need_invalidate, unsigned int remote_key) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1230 | { |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 1231 | struct smb_direct_transport *st = smb_trans_direct_transfort(t); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1232 | int remaining_data_length; |
| 1233 | int start, i, j; |
| 1234 | int max_iov_size = st->max_send_size - |
| 1235 | sizeof(struct smb_direct_data_transfer); |
| 1236 | int ret; |
| 1237 | struct kvec vec; |
| 1238 | struct smb_direct_send_ctx send_ctx; |
| 1239 | |
Namjae Jeon | b8fc94c | 2021-07-07 14:56:44 +0900 | [diff] [blame] | 1240 | if (st->status != SMB_DIRECT_CS_CONNECTED) |
| 1241 | return -ENOTCONN; |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1242 | |
| 1243 | //FIXME: skip RFC1002 header.. |
| 1244 | buflen -= 4; |
| 1245 | iov[0].iov_base += 4; |
| 1246 | iov[0].iov_len -= 4; |
| 1247 | |
| 1248 | remaining_data_length = buflen; |
| 1249 | ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen); |
| 1250 | |
| 1251 | smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key); |
| 1252 | start = i = 0; |
| 1253 | buflen = 0; |
| 1254 | while (true) { |
| 1255 | buflen += iov[i].iov_len; |
| 1256 | if (buflen > max_iov_size) { |
| 1257 | if (i > start) { |
| 1258 | remaining_data_length -= |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1259 | (buflen - iov[i].iov_len); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1260 | ret = smb_direct_post_send_data(st, &send_ctx, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1261 | &iov[start], i - start, |
| 1262 | remaining_data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1263 | if (ret) |
| 1264 | goto done; |
| 1265 | } else { |
| 1266 | /* iov[start] is too big, break it */ |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1267 | int nvec = (buflen + max_iov_size - 1) / |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1268 | max_iov_size; |
| 1269 | |
| 1270 | for (j = 0; j < nvec; j++) { |
| 1271 | vec.iov_base = |
| 1272 | (char *)iov[start].iov_base + |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1273 | j * max_iov_size; |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1274 | vec.iov_len = |
| 1275 | min_t(int, max_iov_size, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1276 | buflen - max_iov_size * j); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1277 | remaining_data_length -= vec.iov_len; |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1278 | ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1, |
| 1279 | remaining_data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1280 | if (ret) |
| 1281 | goto done; |
| 1282 | } |
| 1283 | i++; |
| 1284 | if (i == niovs) |
| 1285 | break; |
| 1286 | } |
| 1287 | start = i; |
| 1288 | buflen = 0; |
| 1289 | } else { |
| 1290 | i++; |
| 1291 | if (i == niovs) { |
| 1292 | /* send out all remaining vecs */ |
| 1293 | remaining_data_length -= buflen; |
| 1294 | ret = smb_direct_post_send_data(st, &send_ctx, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1295 | &iov[start], i - start, |
| 1296 | remaining_data_length); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1297 | if (ret) |
| 1298 | goto done; |
| 1299 | break; |
| 1300 | } |
| 1301 | } |
| 1302 | } |
| 1303 | |
| 1304 | done: |
| 1305 | ret = smb_direct_flush_send_list(st, &send_ctx, true); |
| 1306 | |
| 1307 | /* |
| 1308 | * As an optimization, we don't wait for individual I/O to finish |
| 1309 | * before sending the next one. |
| 1310 | * Send them all and wait for pending send count to get to 0 |
| 1311 | * that means all the I/Os have been out and we are good to return |
| 1312 | */ |
| 1313 | |
| 1314 | wait_event(st->wait_send_payload_pending, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1315 | atomic_read(&st->send_payload_pending) == 0); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1316 | return ret; |
| 1317 | } |
| 1318 | |
| 1319 | static void read_write_done(struct ib_cq *cq, struct ib_wc *wc, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1320 | enum dma_data_direction dir) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1321 | { |
| 1322 | struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1323 | struct smb_direct_rdma_rw_msg, cqe); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1324 | struct smb_direct_transport *t = msg->t; |
| 1325 | |
| 1326 | if (wc->status != IB_WC_SUCCESS) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1327 | pr_err("read/write error. opcode = %d, status = %s(%d)\n", |
| 1328 | wc->opcode, ib_wc_status_msg(wc->status), wc->status); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1329 | smb_direct_disconnect_rdma_connection(t); |
| 1330 | } |
| 1331 | |
| 1332 | if (atomic_inc_return(&t->rw_avail_ops) > 0) |
| 1333 | wake_up(&t->wait_rw_avail_ops); |
| 1334 | |
| 1335 | rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1336 | msg->sg_list, msg->sgt.nents, dir); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1337 | sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); |
| 1338 | complete(msg->completion); |
| 1339 | kfree(msg); |
| 1340 | } |
| 1341 | |
| 1342 | static void read_done(struct ib_cq *cq, struct ib_wc *wc) |
| 1343 | { |
| 1344 | read_write_done(cq, wc, DMA_FROM_DEVICE); |
| 1345 | } |
| 1346 | |
| 1347 | static void write_done(struct ib_cq *cq, struct ib_wc *wc) |
| 1348 | { |
| 1349 | read_write_done(cq, wc, DMA_TO_DEVICE); |
| 1350 | } |
| 1351 | |
| 1352 | static int smb_direct_rdma_xmit(struct smb_direct_transport *t, void *buf, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1353 | int buf_len, u32 remote_key, u64 remote_offset, |
| 1354 | u32 remote_len, bool is_read) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1355 | { |
| 1356 | struct smb_direct_rdma_rw_msg *msg; |
| 1357 | int ret; |
| 1358 | DECLARE_COMPLETION_ONSTACK(completion); |
| 1359 | struct ib_send_wr *first_wr = NULL; |
| 1360 | |
| 1361 | ret = wait_for_credits(t, &t->wait_rw_avail_ops, &t->rw_avail_ops); |
| 1362 | if (ret < 0) |
| 1363 | return ret; |
| 1364 | |
| 1365 | /* TODO: mempool */ |
| 1366 | msg = kmalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) + |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1367 | sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1368 | if (!msg) { |
| 1369 | atomic_inc(&t->rw_avail_ops); |
| 1370 | return -ENOMEM; |
| 1371 | } |
| 1372 | |
| 1373 | msg->sgt.sgl = &msg->sg_list[0]; |
| 1374 | ret = sg_alloc_table_chained(&msg->sgt, |
Hyunchul Lee | 8ad8dc3 | 2021-06-25 07:02:07 +0900 | [diff] [blame] | 1375 | get_buf_page_count(buf, buf_len), |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1376 | msg->sg_list, SG_CHUNK_SIZE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1377 | if (ret) { |
| 1378 | atomic_inc(&t->rw_avail_ops); |
| 1379 | kfree(msg); |
| 1380 | return -ENOMEM; |
| 1381 | } |
| 1382 | |
| 1383 | ret = get_sg_list(buf, buf_len, msg->sgt.sgl, msg->sgt.orig_nents); |
| 1384 | if (ret <= 0) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1385 | pr_err("failed to get pages\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1386 | goto err; |
| 1387 | } |
| 1388 | |
| 1389 | ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, |
Hyunchul Lee | 8ad8dc3 | 2021-06-25 07:02:07 +0900 | [diff] [blame] | 1390 | msg->sg_list, get_buf_page_count(buf, buf_len), |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1391 | 0, remote_offset, remote_key, |
| 1392 | is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1393 | if (ret < 0) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1394 | pr_err("failed to init rdma_rw_ctx: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1395 | goto err; |
| 1396 | } |
| 1397 | |
| 1398 | msg->t = t; |
| 1399 | msg->cqe.done = is_read ? read_done : write_done; |
| 1400 | msg->completion = &completion; |
| 1401 | first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1402 | &msg->cqe, NULL); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1403 | |
| 1404 | ret = ib_post_send(t->qp, first_wr, NULL); |
| 1405 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1406 | pr_err("failed to post send wr: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1407 | goto err; |
| 1408 | } |
| 1409 | |
| 1410 | wait_for_completion(&completion); |
| 1411 | return 0; |
| 1412 | |
| 1413 | err: |
| 1414 | atomic_inc(&t->rw_avail_ops); |
| 1415 | if (first_wr) |
| 1416 | rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1417 | msg->sg_list, msg->sgt.nents, |
| 1418 | is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1419 | sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); |
| 1420 | kfree(msg); |
| 1421 | return ret; |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1422 | } |
| 1423 | |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1424 | static int smb_direct_rdma_write(struct ksmbd_transport *t, void *buf, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1425 | unsigned int buflen, u32 remote_key, |
| 1426 | u64 remote_offset, u32 remote_len) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1427 | { |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 1428 | return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1429 | remote_key, remote_offset, |
| 1430 | remote_len, false); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1431 | } |
| 1432 | |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1433 | static int smb_direct_rdma_read(struct ksmbd_transport *t, void *buf, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1434 | unsigned int buflen, u32 remote_key, |
| 1435 | u64 remote_offset, u32 remote_len) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1436 | { |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 1437 | return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1438 | remote_key, remote_offset, |
| 1439 | remote_len, true); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1440 | } |
| 1441 | |
| 1442 | static void smb_direct_disconnect(struct ksmbd_transport *t) |
| 1443 | { |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 1444 | struct smb_direct_transport *st = smb_trans_direct_transfort(t); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1445 | |
| 1446 | ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id); |
| 1447 | |
Hyunchul Lee | 323b1ea | 2021-08-12 10:23:08 +0900 | [diff] [blame] | 1448 | smb_direct_disconnect_rdma_work(&st->disconnect_work); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1449 | wait_event_interruptible(st->wait_status, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1450 | st->status == SMB_DIRECT_CS_DISCONNECTED); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1451 | free_transport(st); |
| 1452 | } |
| 1453 | |
| 1454 | static int smb_direct_cm_handler(struct rdma_cm_id *cm_id, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1455 | struct rdma_cm_event *event) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1456 | { |
| 1457 | struct smb_direct_transport *t = cm_id->context; |
| 1458 | |
| 1459 | ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1460 | cm_id, rdma_event_msg(event->event), event->event); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1461 | |
| 1462 | switch (event->event) { |
| 1463 | case RDMA_CM_EVENT_ESTABLISHED: { |
| 1464 | t->status = SMB_DIRECT_CS_CONNECTED; |
| 1465 | wake_up_interruptible(&t->wait_status); |
| 1466 | break; |
| 1467 | } |
| 1468 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 1469 | case RDMA_CM_EVENT_DISCONNECTED: { |
| 1470 | t->status = SMB_DIRECT_CS_DISCONNECTED; |
| 1471 | wake_up_interruptible(&t->wait_status); |
| 1472 | wake_up_interruptible(&t->wait_reassembly_queue); |
| 1473 | wake_up(&t->wait_send_credits); |
| 1474 | break; |
| 1475 | } |
| 1476 | case RDMA_CM_EVENT_CONNECT_ERROR: { |
| 1477 | t->status = SMB_DIRECT_CS_DISCONNECTED; |
| 1478 | wake_up_interruptible(&t->wait_status); |
| 1479 | break; |
| 1480 | } |
| 1481 | default: |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1482 | pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n", |
| 1483 | cm_id, rdma_event_msg(event->event), |
| 1484 | event->event); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1485 | break; |
| 1486 | } |
| 1487 | return 0; |
| 1488 | } |
| 1489 | |
| 1490 | static void smb_direct_qpair_handler(struct ib_event *event, void *context) |
| 1491 | { |
| 1492 | struct smb_direct_transport *t = context; |
| 1493 | |
| 1494 | ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1495 | t->cm_id, ib_event_msg(event->event), event->event); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1496 | |
| 1497 | switch (event->event) { |
| 1498 | case IB_EVENT_CQ_ERR: |
| 1499 | case IB_EVENT_QP_FATAL: |
| 1500 | smb_direct_disconnect_rdma_connection(t); |
| 1501 | break; |
| 1502 | default: |
| 1503 | break; |
| 1504 | } |
| 1505 | } |
| 1506 | |
| 1507 | static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1508 | int failed) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1509 | { |
| 1510 | struct smb_direct_sendmsg *sendmsg; |
| 1511 | struct smb_direct_negotiate_resp *resp; |
| 1512 | int ret; |
| 1513 | |
| 1514 | sendmsg = smb_direct_alloc_sendmsg(t); |
| 1515 | if (IS_ERR(sendmsg)) |
| 1516 | return -ENOMEM; |
| 1517 | |
| 1518 | resp = (struct smb_direct_negotiate_resp *)sendmsg->packet; |
| 1519 | if (failed) { |
| 1520 | memset(resp, 0, sizeof(*resp)); |
| 1521 | resp->min_version = cpu_to_le16(0x0100); |
| 1522 | resp->max_version = cpu_to_le16(0x0100); |
| 1523 | resp->status = STATUS_NOT_SUPPORTED; |
| 1524 | } else { |
| 1525 | resp->status = STATUS_SUCCESS; |
| 1526 | resp->min_version = SMB_DIRECT_VERSION_LE; |
| 1527 | resp->max_version = SMB_DIRECT_VERSION_LE; |
| 1528 | resp->negotiated_version = SMB_DIRECT_VERSION_LE; |
| 1529 | resp->reserved = 0; |
| 1530 | resp->credits_requested = |
| 1531 | cpu_to_le16(t->send_credit_target); |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1532 | resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1533 | resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); |
| 1534 | resp->preferred_send_size = cpu_to_le32(t->max_send_size); |
| 1535 | resp->max_receive_size = cpu_to_le32(t->max_recv_size); |
| 1536 | resp->max_fragmented_size = |
| 1537 | cpu_to_le32(t->max_fragmented_recv_size); |
| 1538 | } |
| 1539 | |
| 1540 | sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1541 | (void *)resp, sizeof(*resp), |
| 1542 | DMA_TO_DEVICE); |
| 1543 | ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1544 | if (ret) { |
| 1545 | smb_direct_free_sendmsg(t, sendmsg); |
| 1546 | return ret; |
| 1547 | } |
| 1548 | |
| 1549 | sendmsg->num_sge = 1; |
| 1550 | sendmsg->sge[0].length = sizeof(*resp); |
| 1551 | sendmsg->sge[0].lkey = t->pd->local_dma_lkey; |
| 1552 | |
| 1553 | ret = post_sendmsg(t, NULL, sendmsg); |
| 1554 | if (ret) { |
| 1555 | smb_direct_free_sendmsg(t, sendmsg); |
| 1556 | return ret; |
| 1557 | } |
| 1558 | |
| 1559 | wait_event(t->wait_send_pending, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1560 | atomic_read(&t->send_pending) == 0); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1561 | return 0; |
| 1562 | } |
| 1563 | |
| 1564 | static int smb_direct_accept_client(struct smb_direct_transport *t) |
| 1565 | { |
| 1566 | struct rdma_conn_param conn_param; |
| 1567 | struct ib_port_immutable port_immutable; |
| 1568 | u32 ird_ord_hdr[2]; |
| 1569 | int ret; |
| 1570 | |
| 1571 | memset(&conn_param, 0, sizeof(conn_param)); |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1572 | conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, |
| 1573 | SMB_DIRECT_CM_INITIATOR_DEPTH); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1574 | conn_param.responder_resources = 0; |
| 1575 | |
| 1576 | t->cm_id->device->ops.get_port_immutable(t->cm_id->device, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1577 | t->cm_id->port_num, |
| 1578 | &port_immutable); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1579 | if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { |
| 1580 | ird_ord_hdr[0] = conn_param.responder_resources; |
| 1581 | ird_ord_hdr[1] = 1; |
| 1582 | conn_param.private_data = ird_ord_hdr; |
| 1583 | conn_param.private_data_len = sizeof(ird_ord_hdr); |
| 1584 | } else { |
| 1585 | conn_param.private_data = NULL; |
| 1586 | conn_param.private_data_len = 0; |
| 1587 | } |
| 1588 | conn_param.retry_count = SMB_DIRECT_CM_RETRY; |
| 1589 | conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY; |
| 1590 | conn_param.flow_control = 0; |
| 1591 | |
| 1592 | ret = rdma_accept(t->cm_id, &conn_param); |
| 1593 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1594 | pr_err("error at rdma_accept: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1595 | return ret; |
| 1596 | } |
| 1597 | |
| 1598 | wait_event_interruptible(t->wait_status, |
| 1599 | t->status != SMB_DIRECT_CS_NEW); |
| 1600 | if (t->status != SMB_DIRECT_CS_CONNECTED) |
| 1601 | return -ENOTCONN; |
| 1602 | return 0; |
| 1603 | } |
| 1604 | |
| 1605 | static int smb_direct_negotiate(struct smb_direct_transport *t) |
| 1606 | { |
| 1607 | int ret; |
| 1608 | struct smb_direct_recvmsg *recvmsg; |
| 1609 | struct smb_direct_negotiate_req *req; |
| 1610 | |
| 1611 | recvmsg = get_free_recvmsg(t); |
| 1612 | if (!recvmsg) |
| 1613 | return -ENOMEM; |
| 1614 | recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ; |
| 1615 | |
| 1616 | ret = smb_direct_post_recv(t, recvmsg); |
| 1617 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1618 | pr_err("Can't post recv: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1619 | goto out; |
| 1620 | } |
| 1621 | |
| 1622 | t->negotiation_requested = false; |
| 1623 | ret = smb_direct_accept_client(t); |
| 1624 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1625 | pr_err("Can't accept client\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1626 | goto out; |
| 1627 | } |
| 1628 | |
| 1629 | smb_direct_post_recv_credits(&t->post_recv_credits_work.work); |
| 1630 | |
| 1631 | ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n"); |
| 1632 | ret = wait_event_interruptible_timeout(t->wait_status, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1633 | t->negotiation_requested || |
| 1634 | t->status == SMB_DIRECT_CS_DISCONNECTED, |
| 1635 | SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1636 | if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) { |
| 1637 | ret = ret < 0 ? ret : -ETIMEDOUT; |
| 1638 | goto out; |
| 1639 | } |
| 1640 | |
| 1641 | ret = smb_direct_check_recvmsg(recvmsg); |
| 1642 | if (ret == -ECONNABORTED) |
| 1643 | goto out; |
| 1644 | |
| 1645 | req = (struct smb_direct_negotiate_req *)recvmsg->packet; |
| 1646 | t->max_recv_size = min_t(int, t->max_recv_size, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1647 | le32_to_cpu(req->preferred_send_size)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1648 | t->max_send_size = min_t(int, t->max_send_size, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1649 | le32_to_cpu(req->max_receive_size)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1650 | t->max_fragmented_send_size = |
| 1651 | le32_to_cpu(req->max_fragmented_size); |
| 1652 | |
| 1653 | ret = smb_direct_send_negotiate_response(t, ret); |
| 1654 | out: |
| 1655 | if (recvmsg) |
| 1656 | put_recvmsg(t, recvmsg); |
| 1657 | return ret; |
| 1658 | } |
| 1659 | |
| 1660 | static int smb_direct_init_params(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1661 | struct ib_qp_cap *cap) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1662 | { |
| 1663 | struct ib_device *device = t->cm_id->device; |
| 1664 | int max_send_sges, max_pages, max_rw_wrs, max_send_wrs; |
| 1665 | |
| 1666 | /* need 2 more sge. because a SMB_DIRECT header will be mapped, |
| 1667 | * and maybe a send buffer could be not page aligned. |
| 1668 | */ |
| 1669 | t->max_send_size = smb_direct_max_send_size; |
| 1670 | max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 2; |
| 1671 | if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1672 | pr_err("max_send_size %d is too large\n", t->max_send_size); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1673 | return -EINVAL; |
| 1674 | } |
| 1675 | |
| 1676 | /* |
| 1677 | * allow smb_direct_max_outstanding_rw_ops of in-flight RDMA |
| 1678 | * read/writes. HCA guarantees at least max_send_sge of sges for |
| 1679 | * a RDMA read/write work request, and if memory registration is used, |
| 1680 | * we need reg_mr, local_inv wrs for each read/write. |
| 1681 | */ |
| 1682 | t->max_rdma_rw_size = smb_direct_max_read_write_size; |
| 1683 | max_pages = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; |
| 1684 | max_rw_wrs = DIV_ROUND_UP(max_pages, SMB_DIRECT_MAX_SEND_SGES); |
| 1685 | max_rw_wrs += rdma_rw_mr_factor(device, t->cm_id->port_num, |
| 1686 | max_pages) * 2; |
| 1687 | max_rw_wrs *= smb_direct_max_outstanding_rw_ops; |
| 1688 | |
| 1689 | max_send_wrs = smb_direct_send_credit_target + max_rw_wrs; |
| 1690 | if (max_send_wrs > device->attrs.max_cqe || |
Namjae Jeon | 64b39f4 | 2021-03-30 14:25:35 +0900 | [diff] [blame] | 1691 | max_send_wrs > device->attrs.max_qp_wr) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1692 | pr_err("consider lowering send_credit_target = %d, or max_outstanding_rw_ops = %d\n", |
| 1693 | smb_direct_send_credit_target, |
| 1694 | smb_direct_max_outstanding_rw_ops); |
| 1695 | pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", |
| 1696 | device->attrs.max_cqe, device->attrs.max_qp_wr); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1697 | return -EINVAL; |
| 1698 | } |
| 1699 | |
| 1700 | if (smb_direct_receive_credit_max > device->attrs.max_cqe || |
| 1701 | smb_direct_receive_credit_max > device->attrs.max_qp_wr) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1702 | pr_err("consider lowering receive_credit_max = %d\n", |
| 1703 | smb_direct_receive_credit_max); |
| 1704 | pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n", |
| 1705 | device->attrs.max_cqe, device->attrs.max_qp_wr); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1706 | return -EINVAL; |
| 1707 | } |
| 1708 | |
| 1709 | if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1710 | pr_err("warning: device max_send_sge = %d too small\n", |
| 1711 | device->attrs.max_send_sge); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1712 | return -EINVAL; |
| 1713 | } |
| 1714 | if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1715 | pr_err("warning: device max_recv_sge = %d too small\n", |
| 1716 | device->attrs.max_recv_sge); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1717 | return -EINVAL; |
| 1718 | } |
| 1719 | |
| 1720 | t->recv_credits = 0; |
| 1721 | t->count_avail_recvmsg = 0; |
| 1722 | |
| 1723 | t->recv_credit_max = smb_direct_receive_credit_max; |
| 1724 | t->recv_credit_target = 10; |
| 1725 | t->new_recv_credits = 0; |
| 1726 | |
| 1727 | t->send_credit_target = smb_direct_send_credit_target; |
| 1728 | atomic_set(&t->send_credits, 0); |
| 1729 | atomic_set(&t->rw_avail_ops, smb_direct_max_outstanding_rw_ops); |
| 1730 | |
| 1731 | t->max_send_size = smb_direct_max_send_size; |
| 1732 | t->max_recv_size = smb_direct_max_receive_size; |
| 1733 | t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; |
| 1734 | |
| 1735 | cap->max_send_wr = max_send_wrs; |
| 1736 | cap->max_recv_wr = t->recv_credit_max; |
| 1737 | cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES; |
| 1738 | cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES; |
| 1739 | cap->max_inline_data = 0; |
| 1740 | cap->max_rdma_ctxs = 0; |
| 1741 | return 0; |
| 1742 | } |
| 1743 | |
| 1744 | static void smb_direct_destroy_pools(struct smb_direct_transport *t) |
| 1745 | { |
| 1746 | struct smb_direct_recvmsg *recvmsg; |
| 1747 | |
| 1748 | while ((recvmsg = get_free_recvmsg(t))) |
| 1749 | mempool_free(recvmsg, t->recvmsg_mempool); |
| 1750 | while ((recvmsg = get_empty_recvmsg(t))) |
| 1751 | mempool_free(recvmsg, t->recvmsg_mempool); |
| 1752 | |
| 1753 | mempool_destroy(t->recvmsg_mempool); |
| 1754 | t->recvmsg_mempool = NULL; |
| 1755 | |
| 1756 | kmem_cache_destroy(t->recvmsg_cache); |
| 1757 | t->recvmsg_cache = NULL; |
| 1758 | |
| 1759 | mempool_destroy(t->sendmsg_mempool); |
| 1760 | t->sendmsg_mempool = NULL; |
| 1761 | |
| 1762 | kmem_cache_destroy(t->sendmsg_cache); |
| 1763 | t->sendmsg_cache = NULL; |
| 1764 | } |
| 1765 | |
| 1766 | static int smb_direct_create_pools(struct smb_direct_transport *t) |
| 1767 | { |
| 1768 | char name[80]; |
| 1769 | int i; |
| 1770 | struct smb_direct_recvmsg *recvmsg; |
| 1771 | |
| 1772 | snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); |
| 1773 | t->sendmsg_cache = kmem_cache_create(name, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1774 | sizeof(struct smb_direct_sendmsg) + |
| 1775 | sizeof(struct smb_direct_negotiate_resp), |
| 1776 | 0, SLAB_HWCACHE_ALIGN, NULL); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1777 | if (!t->sendmsg_cache) |
| 1778 | return -ENOMEM; |
| 1779 | |
| 1780 | t->sendmsg_mempool = mempool_create(t->send_credit_target, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1781 | mempool_alloc_slab, mempool_free_slab, |
| 1782 | t->sendmsg_cache); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1783 | if (!t->sendmsg_mempool) |
| 1784 | goto err; |
| 1785 | |
| 1786 | snprintf(name, sizeof(name), "smb_direct_resp_%p", t); |
| 1787 | t->recvmsg_cache = kmem_cache_create(name, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1788 | sizeof(struct smb_direct_recvmsg) + |
| 1789 | t->max_recv_size, |
| 1790 | 0, SLAB_HWCACHE_ALIGN, NULL); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1791 | if (!t->recvmsg_cache) |
| 1792 | goto err; |
| 1793 | |
| 1794 | t->recvmsg_mempool = |
| 1795 | mempool_create(t->recv_credit_max, mempool_alloc_slab, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1796 | mempool_free_slab, t->recvmsg_cache); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1797 | if (!t->recvmsg_mempool) |
| 1798 | goto err; |
| 1799 | |
| 1800 | INIT_LIST_HEAD(&t->recvmsg_queue); |
| 1801 | |
| 1802 | for (i = 0; i < t->recv_credit_max; i++) { |
| 1803 | recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL); |
| 1804 | if (!recvmsg) |
| 1805 | goto err; |
| 1806 | recvmsg->transport = t; |
| 1807 | list_add(&recvmsg->list, &t->recvmsg_queue); |
| 1808 | } |
| 1809 | t->count_avail_recvmsg = t->recv_credit_max; |
| 1810 | |
| 1811 | return 0; |
| 1812 | err: |
| 1813 | smb_direct_destroy_pools(t); |
| 1814 | return -ENOMEM; |
| 1815 | } |
| 1816 | |
| 1817 | static int smb_direct_create_qpair(struct smb_direct_transport *t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1818 | struct ib_qp_cap *cap) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1819 | { |
| 1820 | int ret; |
| 1821 | struct ib_qp_init_attr qp_attr; |
| 1822 | |
| 1823 | t->pd = ib_alloc_pd(t->cm_id->device, 0); |
| 1824 | if (IS_ERR(t->pd)) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1825 | pr_err("Can't create RDMA PD\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1826 | ret = PTR_ERR(t->pd); |
| 1827 | t->pd = NULL; |
| 1828 | return ret; |
| 1829 | } |
| 1830 | |
| 1831 | t->send_cq = ib_alloc_cq(t->cm_id->device, t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1832 | t->send_credit_target, 0, IB_POLL_WORKQUEUE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1833 | if (IS_ERR(t->send_cq)) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1834 | pr_err("Can't create RDMA send CQ\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1835 | ret = PTR_ERR(t->send_cq); |
| 1836 | t->send_cq = NULL; |
| 1837 | goto err; |
| 1838 | } |
| 1839 | |
| 1840 | t->recv_cq = ib_alloc_cq(t->cm_id->device, t, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1841 | cap->max_send_wr + cap->max_rdma_ctxs, |
| 1842 | 0, IB_POLL_WORKQUEUE); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1843 | if (IS_ERR(t->recv_cq)) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1844 | pr_err("Can't create RDMA recv CQ\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1845 | ret = PTR_ERR(t->recv_cq); |
| 1846 | t->recv_cq = NULL; |
| 1847 | goto err; |
| 1848 | } |
| 1849 | |
| 1850 | memset(&qp_attr, 0, sizeof(qp_attr)); |
| 1851 | qp_attr.event_handler = smb_direct_qpair_handler; |
| 1852 | qp_attr.qp_context = t; |
| 1853 | qp_attr.cap = *cap; |
| 1854 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 1855 | qp_attr.qp_type = IB_QPT_RC; |
| 1856 | qp_attr.send_cq = t->send_cq; |
| 1857 | qp_attr.recv_cq = t->recv_cq; |
| 1858 | qp_attr.port_num = ~0; |
| 1859 | |
| 1860 | ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); |
| 1861 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1862 | pr_err("Can't create RDMA QP: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1863 | goto err; |
| 1864 | } |
| 1865 | |
| 1866 | t->qp = t->cm_id->qp; |
| 1867 | t->cm_id->event_handler = smb_direct_cm_handler; |
| 1868 | |
| 1869 | return 0; |
| 1870 | err: |
| 1871 | if (t->qp) { |
| 1872 | ib_destroy_qp(t->qp); |
| 1873 | t->qp = NULL; |
| 1874 | } |
| 1875 | if (t->recv_cq) { |
| 1876 | ib_destroy_cq(t->recv_cq); |
| 1877 | t->recv_cq = NULL; |
| 1878 | } |
| 1879 | if (t->send_cq) { |
| 1880 | ib_destroy_cq(t->send_cq); |
| 1881 | t->send_cq = NULL; |
| 1882 | } |
| 1883 | if (t->pd) { |
| 1884 | ib_dealloc_pd(t->pd); |
| 1885 | t->pd = NULL; |
| 1886 | } |
| 1887 | return ret; |
| 1888 | } |
| 1889 | |
| 1890 | static int smb_direct_prepare(struct ksmbd_transport *t) |
| 1891 | { |
Namjae Jeon | 02d4b4a | 2021-06-25 13:43:01 +0900 | [diff] [blame] | 1892 | struct smb_direct_transport *st = smb_trans_direct_transfort(t); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1893 | int ret; |
| 1894 | struct ib_qp_cap qp_cap; |
| 1895 | |
| 1896 | ret = smb_direct_init_params(st, &qp_cap); |
| 1897 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1898 | pr_err("Can't configure RDMA parameters\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1899 | return ret; |
| 1900 | } |
| 1901 | |
| 1902 | ret = smb_direct_create_pools(st); |
| 1903 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1904 | pr_err("Can't init RDMA pool: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1905 | return ret; |
| 1906 | } |
| 1907 | |
| 1908 | ret = smb_direct_create_qpair(st, &qp_cap); |
| 1909 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1910 | pr_err("Can't accept RDMA client: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1911 | return ret; |
| 1912 | } |
| 1913 | |
| 1914 | ret = smb_direct_negotiate(st); |
| 1915 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1916 | pr_err("Can't negotiate: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1917 | return ret; |
| 1918 | } |
| 1919 | |
| 1920 | st->status = SMB_DIRECT_CS_CONNECTED; |
| 1921 | return 0; |
| 1922 | } |
| 1923 | |
| 1924 | static bool rdma_frwr_is_supported(struct ib_device_attr *attrs) |
| 1925 | { |
| 1926 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) |
| 1927 | return false; |
| 1928 | if (attrs->max_fast_reg_page_list_len == 0) |
| 1929 | return false; |
| 1930 | return true; |
| 1931 | } |
| 1932 | |
| 1933 | static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id) |
| 1934 | { |
| 1935 | struct smb_direct_transport *t; |
| 1936 | |
| 1937 | if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) { |
| 1938 | ksmbd_debug(RDMA, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1939 | "Fast Registration Work Requests is not supported. device capabilities=%llx\n", |
| 1940 | new_cm_id->device->attrs.device_cap_flags); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1941 | return -EPROTONOSUPPORT; |
| 1942 | } |
| 1943 | |
| 1944 | t = alloc_transport(new_cm_id); |
| 1945 | if (!t) |
| 1946 | return -ENOMEM; |
| 1947 | |
| 1948 | KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1949 | KSMBD_TRANS(t)->conn, "ksmbd:r%u", |
Namjae Jeon | cb097b3 | 2021-12-29 23:02:50 +0900 | [diff] [blame^] | 1950 | smb_direct_port); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1951 | if (IS_ERR(KSMBD_TRANS(t)->handler)) { |
| 1952 | int ret = PTR_ERR(KSMBD_TRANS(t)->handler); |
| 1953 | |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1954 | pr_err("Can't start thread\n"); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1955 | free_transport(t); |
| 1956 | return ret; |
| 1957 | } |
| 1958 | |
| 1959 | return 0; |
| 1960 | } |
| 1961 | |
| 1962 | static int smb_direct_listen_handler(struct rdma_cm_id *cm_id, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1963 | struct rdma_cm_event *event) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1964 | { |
| 1965 | switch (event->event) { |
| 1966 | case RDMA_CM_EVENT_CONNECT_REQUEST: { |
| 1967 | int ret = smb_direct_handle_connect_request(cm_id); |
| 1968 | |
| 1969 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1970 | pr_err("Can't create transport: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1971 | return ret; |
| 1972 | } |
| 1973 | |
| 1974 | ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1975 | cm_id); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1976 | break; |
| 1977 | } |
| 1978 | default: |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1979 | pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n", |
| 1980 | cm_id, rdma_event_msg(event->event), event->event); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1981 | break; |
| 1982 | } |
| 1983 | return 0; |
| 1984 | } |
| 1985 | |
| 1986 | static int smb_direct_listen(int port) |
| 1987 | { |
| 1988 | int ret; |
| 1989 | struct rdma_cm_id *cm_id; |
| 1990 | struct sockaddr_in sin = { |
| 1991 | .sin_family = AF_INET, |
| 1992 | .sin_addr.s_addr = htonl(INADDR_ANY), |
| 1993 | .sin_port = htons(port), |
| 1994 | }; |
| 1995 | |
| 1996 | cm_id = rdma_create_id(&init_net, smb_direct_listen_handler, |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 1997 | &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 1998 | if (IS_ERR(cm_id)) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 1999 | pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id)); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2000 | return PTR_ERR(cm_id); |
| 2001 | } |
| 2002 | |
| 2003 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
| 2004 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 2005 | pr_err("Can't bind: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2006 | goto err; |
| 2007 | } |
| 2008 | |
| 2009 | smb_direct_listener.cm_id = cm_id; |
| 2010 | |
| 2011 | ret = rdma_listen(cm_id, 10); |
| 2012 | if (ret) { |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 2013 | pr_err("Can't listen: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2014 | goto err; |
| 2015 | } |
| 2016 | return 0; |
| 2017 | err: |
| 2018 | smb_direct_listener.cm_id = NULL; |
| 2019 | rdma_destroy_id(cm_id); |
| 2020 | return ret; |
| 2021 | } |
| 2022 | |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2023 | static int smb_direct_ib_client_add(struct ib_device *ib_dev) |
| 2024 | { |
| 2025 | struct smb_direct_device *smb_dev; |
| 2026 | |
Namjae Jeon | cb097b3 | 2021-12-29 23:02:50 +0900 | [diff] [blame^] | 2027 | /* Set 5445 port if device type is iWARP(No IB) */ |
| 2028 | if (ib_dev->node_type != RDMA_NODE_IB_CA) |
| 2029 | smb_direct_port = SMB_DIRECT_PORT_IWARP; |
| 2030 | |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2031 | if (!ib_dev->ops.get_netdev || |
| 2032 | !rdma_frwr_is_supported(&ib_dev->attrs)) |
| 2033 | return 0; |
| 2034 | |
| 2035 | smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL); |
| 2036 | if (!smb_dev) |
| 2037 | return -ENOMEM; |
| 2038 | smb_dev->ib_dev = ib_dev; |
| 2039 | |
| 2040 | write_lock(&smb_direct_device_lock); |
| 2041 | list_add(&smb_dev->list, &smb_direct_device_list); |
| 2042 | write_unlock(&smb_direct_device_lock); |
| 2043 | |
| 2044 | ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name); |
| 2045 | return 0; |
| 2046 | } |
| 2047 | |
| 2048 | static void smb_direct_ib_client_remove(struct ib_device *ib_dev, |
| 2049 | void *client_data) |
| 2050 | { |
| 2051 | struct smb_direct_device *smb_dev, *tmp; |
| 2052 | |
| 2053 | write_lock(&smb_direct_device_lock); |
| 2054 | list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) { |
| 2055 | if (smb_dev->ib_dev == ib_dev) { |
| 2056 | list_del(&smb_dev->list); |
| 2057 | kfree(smb_dev); |
| 2058 | break; |
| 2059 | } |
| 2060 | } |
| 2061 | write_unlock(&smb_direct_device_lock); |
| 2062 | } |
| 2063 | |
| 2064 | static struct ib_client smb_direct_ib_client = { |
| 2065 | .name = "ksmbd_smb_direct_ib", |
| 2066 | .add = smb_direct_ib_client_add, |
| 2067 | .remove = smb_direct_ib_client_remove, |
| 2068 | }; |
| 2069 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2070 | int ksmbd_rdma_init(void) |
| 2071 | { |
| 2072 | int ret; |
| 2073 | |
| 2074 | smb_direct_listener.cm_id = NULL; |
| 2075 | |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2076 | ret = ib_register_client(&smb_direct_ib_client); |
| 2077 | if (ret) { |
| 2078 | pr_err("failed to ib_register_client\n"); |
| 2079 | return ret; |
| 2080 | } |
| 2081 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2082 | /* When a client is running out of send credits, the credits are |
| 2083 | * granted by the server's sending a packet using this queue. |
| 2084 | * This avoids the situation that a clients cannot send packets |
| 2085 | * for lack of credits |
| 2086 | */ |
| 2087 | smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 2088 | WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2089 | if (!smb_direct_wq) |
| 2090 | return -ENOMEM; |
| 2091 | |
Namjae Jeon | cb097b3 | 2021-12-29 23:02:50 +0900 | [diff] [blame^] | 2092 | ret = smb_direct_listen(smb_direct_port); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2093 | if (ret) { |
| 2094 | destroy_workqueue(smb_direct_wq); |
| 2095 | smb_direct_wq = NULL; |
Namjae Jeon | bde1694 | 2021-06-28 15:23:19 +0900 | [diff] [blame] | 2096 | pr_err("Can't listen: %d\n", ret); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2097 | return ret; |
| 2098 | } |
| 2099 | |
| 2100 | ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n", |
Namjae Jeon | 070fb21 | 2021-05-26 17:57:12 +0900 | [diff] [blame] | 2101 | smb_direct_listener.cm_id); |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2102 | return 0; |
| 2103 | } |
| 2104 | |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2105 | void ksmbd_rdma_destroy(void) |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2106 | { |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2107 | if (!smb_direct_listener.cm_id) |
| 2108 | return; |
| 2109 | |
| 2110 | ib_unregister_client(&smb_direct_ib_client); |
| 2111 | rdma_destroy_id(smb_direct_listener.cm_id); |
| 2112 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2113 | smb_direct_listener.cm_id = NULL; |
| 2114 | |
| 2115 | if (smb_direct_wq) { |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2116 | destroy_workqueue(smb_direct_wq); |
| 2117 | smb_direct_wq = NULL; |
| 2118 | } |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2119 | } |
| 2120 | |
Hyunchul Lee | 03d8d4f | 2021-07-13 16:09:34 +0900 | [diff] [blame] | 2121 | bool ksmbd_rdma_capable_netdev(struct net_device *netdev) |
| 2122 | { |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2123 | struct smb_direct_device *smb_dev; |
| 2124 | int i; |
Hyunchul Lee | 03d8d4f | 2021-07-13 16:09:34 +0900 | [diff] [blame] | 2125 | bool rdma_capable = false; |
| 2126 | |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2127 | read_lock(&smb_direct_device_lock); |
| 2128 | list_for_each_entry(smb_dev, &smb_direct_device_list, list) { |
| 2129 | for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) { |
| 2130 | struct net_device *ndev; |
| 2131 | |
| 2132 | ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev, |
| 2133 | i + 1); |
| 2134 | if (!ndev) |
| 2135 | continue; |
| 2136 | |
| 2137 | if (ndev == netdev) { |
| 2138 | dev_put(ndev); |
| 2139 | rdma_capable = true; |
| 2140 | goto out; |
| 2141 | } |
| 2142 | dev_put(ndev); |
| 2143 | } |
Hyunchul Lee | 03d8d4f | 2021-07-13 16:09:34 +0900 | [diff] [blame] | 2144 | } |
Hyunchul Lee | 31928a0 | 2021-12-29 23:02:15 +0900 | [diff] [blame] | 2145 | out: |
| 2146 | read_unlock(&smb_direct_device_lock); |
| 2147 | |
| 2148 | if (rdma_capable == false) { |
| 2149 | struct ib_device *ibdev; |
| 2150 | |
| 2151 | ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN); |
| 2152 | if (ibdev) { |
| 2153 | if (rdma_frwr_is_supported(&ibdev->attrs)) |
| 2154 | rdma_capable = true; |
| 2155 | ib_device_put(ibdev); |
| 2156 | } |
| 2157 | } |
| 2158 | |
Hyunchul Lee | 03d8d4f | 2021-07-13 16:09:34 +0900 | [diff] [blame] | 2159 | return rdma_capable; |
| 2160 | } |
| 2161 | |
Namjae Jeon | 0626e66 | 2021-03-16 13:07:11 +0900 | [diff] [blame] | 2162 | static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = { |
| 2163 | .prepare = smb_direct_prepare, |
| 2164 | .disconnect = smb_direct_disconnect, |
| 2165 | .writev = smb_direct_writev, |
| 2166 | .read = smb_direct_read, |
| 2167 | .rdma_read = smb_direct_rdma_read, |
| 2168 | .rdma_write = smb_direct_rdma_write, |
| 2169 | }; |