blob: d658c6e8263afd967daa722680c9b1b6ab35e62d [file] [log] [blame]
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/inet.h>
15#include <linux/llist.h>
16#include <crypto/hash.h>
17
18#include "nvmet.h"
19
20#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
21
Wunderlich, Mark43cc6682020-01-16 00:46:16 +000022/* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
27 */
28static int so_priority;
29module_param(so_priority, int, 0644);
30MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
31
Sagi Grimberg872d26a2018-12-03 17:52:15 -080032#define NVMET_TCP_RECV_BUDGET 8
33#define NVMET_TCP_SEND_BUDGET 8
34#define NVMET_TCP_IO_WORK_BUDGET 64
35
36enum nvmet_tcp_send_state {
37 NVMET_TCP_SEND_DATA_PDU,
38 NVMET_TCP_SEND_DATA,
39 NVMET_TCP_SEND_R2T,
40 NVMET_TCP_SEND_DDGST,
41 NVMET_TCP_SEND_RESPONSE
42};
43
44enum nvmet_tcp_recv_state {
45 NVMET_TCP_RECV_PDU,
46 NVMET_TCP_RECV_DATA,
47 NVMET_TCP_RECV_DDGST,
48 NVMET_TCP_RECV_ERR,
49};
50
51enum {
52 NVMET_TCP_F_INIT_FAILED = (1 << 0),
53};
54
55struct nvmet_tcp_cmd {
56 struct nvmet_tcp_queue *queue;
57 struct nvmet_req req;
58
59 struct nvme_tcp_cmd_pdu *cmd_pdu;
60 struct nvme_tcp_rsp_pdu *rsp_pdu;
61 struct nvme_tcp_data_pdu *data_pdu;
62 struct nvme_tcp_r2t_pdu *r2t_pdu;
63
64 u32 rbytes_done;
65 u32 wbytes_done;
66
67 u32 pdu_len;
68 u32 pdu_recv;
69 int sg_idx;
70 int nr_mapped;
71 struct msghdr recv_msg;
72 struct kvec *iov;
73 u32 flags;
74
75 struct list_head entry;
76 struct llist_node lentry;
77
78 /* send state */
79 u32 offset;
80 struct scatterlist *cur_sg;
81 enum nvmet_tcp_send_state state;
82
83 __le32 exp_ddgst;
84 __le32 recv_ddgst;
85};
86
87enum nvmet_tcp_queue_state {
88 NVMET_TCP_Q_CONNECTING,
89 NVMET_TCP_Q_LIVE,
90 NVMET_TCP_Q_DISCONNECTING,
91};
92
93struct nvmet_tcp_queue {
94 struct socket *sock;
95 struct nvmet_tcp_port *port;
96 struct work_struct io_work;
Sagi Grimberg872d26a2018-12-03 17:52:15 -080097 struct nvmet_cq nvme_cq;
98 struct nvmet_sq nvme_sq;
99
100 /* send state */
101 struct nvmet_tcp_cmd *cmds;
102 unsigned int nr_cmds;
103 struct list_head free_list;
104 struct llist_head resp_list;
105 struct list_head resp_send_list;
106 int send_list_len;
107 struct nvmet_tcp_cmd *snd_cmd;
108
109 /* recv state */
110 int offset;
111 int left;
112 enum nvmet_tcp_recv_state rcv_state;
113 struct nvmet_tcp_cmd *cmd;
114 union nvme_tcp_pdu pdu;
115
116 /* digest state */
117 bool hdr_digest;
118 bool data_digest;
119 struct ahash_request *snd_hash;
120 struct ahash_request *rcv_hash;
121
122 spinlock_t state_lock;
123 enum nvmet_tcp_queue_state state;
124
125 struct sockaddr_storage sockaddr;
126 struct sockaddr_storage sockaddr_peer;
127 struct work_struct release_work;
128
129 int idx;
130 struct list_head queue_list;
131
132 struct nvmet_tcp_cmd connect;
133
134 struct page_frag_cache pf_cache;
135
136 void (*data_ready)(struct sock *);
137 void (*state_change)(struct sock *);
138 void (*write_space)(struct sock *);
139};
140
141struct nvmet_tcp_port {
142 struct socket *sock;
143 struct work_struct accept_work;
144 struct nvmet_port *nport;
145 struct sockaddr_storage addr;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800146 void (*data_ready)(struct sock *);
147};
148
149static DEFINE_IDA(nvmet_tcp_queue_ida);
150static LIST_HEAD(nvmet_tcp_queue_list);
151static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
152
153static struct workqueue_struct *nvmet_tcp_wq;
Max Gurtovoya40aae62020-06-01 20:05:20 +0300154static const struct nvmet_fabrics_ops nvmet_tcp_ops;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800155static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
156static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
157
158static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
159 struct nvmet_tcp_cmd *cmd)
160{
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800161 if (unlikely(!queue->nr_cmds)) {
162 /* We didn't allocate cmds yet, send 0xffff */
163 return USHRT_MAX;
164 }
165
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800166 return cmd - queue->cmds;
167}
168
169static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
170{
171 return nvme_is_write(cmd->req.cmd) &&
172 cmd->rbytes_done < cmd->req.transfer_len;
173}
174
175static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
176{
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300177 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800178}
179
180static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
181{
182 return !nvme_is_write(cmd->req.cmd) &&
183 cmd->req.transfer_len > 0 &&
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300184 !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800185}
186
187static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
188{
189 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
190 !cmd->rbytes_done;
191}
192
193static inline struct nvmet_tcp_cmd *
194nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
195{
196 struct nvmet_tcp_cmd *cmd;
197
198 cmd = list_first_entry_or_null(&queue->free_list,
199 struct nvmet_tcp_cmd, entry);
200 if (!cmd)
201 return NULL;
202 list_del_init(&cmd->entry);
203
204 cmd->rbytes_done = cmd->wbytes_done = 0;
205 cmd->pdu_len = 0;
206 cmd->pdu_recv = 0;
207 cmd->iov = NULL;
208 cmd->flags = 0;
209 return cmd;
210}
211
212static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
213{
214 if (unlikely(cmd == &cmd->queue->connect))
215 return;
216
217 list_add_tail(&cmd->entry, &cmd->queue->free_list);
218}
219
Mark Wunderlichf7790e52020-08-28 01:00:53 +0000220static inline int queue_cpu(struct nvmet_tcp_queue *queue)
221{
222 return queue->sock->sk->sk_incoming_cpu;
223}
224
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800225static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
226{
227 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
228}
229
230static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
231{
232 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
233}
234
235static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
236 void *pdu, size_t len)
237{
238 struct scatterlist sg;
239
240 sg_init_one(&sg, pdu, len);
241 ahash_request_set_crypt(hash, &sg, pdu + len, len);
242 crypto_ahash_digest(hash);
243}
244
245static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
246 void *pdu, size_t len)
247{
248 struct nvme_tcp_hdr *hdr = pdu;
249 __le32 recv_digest;
250 __le32 exp_digest;
251
252 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
253 pr_err("queue %d: header digest enabled but no header digest\n",
254 queue->idx);
255 return -EPROTO;
256 }
257
258 recv_digest = *(__le32 *)(pdu + hdr->hlen);
259 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
260 exp_digest = *(__le32 *)(pdu + hdr->hlen);
261 if (recv_digest != exp_digest) {
262 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
263 queue->idx, le32_to_cpu(recv_digest),
264 le32_to_cpu(exp_digest));
265 return -EPROTO;
266 }
267
268 return 0;
269}
270
271static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
272{
273 struct nvme_tcp_hdr *hdr = pdu;
274 u8 digest_len = nvmet_tcp_hdgst_len(queue);
275 u32 len;
276
277 len = le32_to_cpu(hdr->plen) - hdr->hlen -
278 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
279
280 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
281 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
282 return -EPROTO;
283 }
284
285 return 0;
286}
287
288static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
289{
290 struct scatterlist *sg;
291 int i;
292
293 sg = &cmd->req.sg[cmd->sg_idx];
294
295 for (i = 0; i < cmd->nr_mapped; i++)
296 kunmap(sg_page(&sg[i]));
297}
298
299static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
300{
301 struct kvec *iov = cmd->iov;
302 struct scatterlist *sg;
303 u32 length, offset, sg_offset;
304
305 length = cmd->pdu_len;
306 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
307 offset = cmd->rbytes_done;
Sagi Grimbergcb8563f2021-02-03 01:20:25 -0800308 cmd->sg_idx = offset / PAGE_SIZE;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800309 sg_offset = offset % PAGE_SIZE;
310 sg = &cmd->req.sg[cmd->sg_idx];
311
312 while (length) {
313 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
314
315 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
316 iov->iov_len = iov_len;
317
318 length -= iov_len;
319 sg = sg_next(sg);
320 iov++;
Sagi Grimbergcb8563f2021-02-03 01:20:25 -0800321 sg_offset = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800322 }
323
324 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
325 cmd->nr_mapped, cmd->pdu_len);
326}
327
328static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
329{
330 queue->rcv_state = NVMET_TCP_RECV_ERR;
331 if (queue->nvme_sq.ctrl)
332 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
333 else
334 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
335}
336
Sagi Grimberg0236d342020-05-18 10:47:48 -0700337static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
338{
339 if (status == -EPIPE || status == -ECONNRESET)
340 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
341 else
342 nvmet_tcp_fatal_error(queue);
343}
344
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800345static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
346{
347 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
348 u32 len = le32_to_cpu(sgl->length);
349
Logan Gunthorpee0bace72019-10-23 10:35:39 -0600350 if (!len)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800351 return 0;
352
353 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
354 NVME_SGL_FMT_OFFSET)) {
355 if (!nvme_is_write(cmd->req.cmd))
356 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
357
358 if (len > cmd->req.port->inline_data_size)
359 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
360 cmd->pdu_len = len;
361 }
362 cmd->req.transfer_len += len;
363
364 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
365 if (!cmd->req.sg)
366 return NVME_SC_INTERNAL;
367 cmd->cur_sg = cmd->req.sg;
368
369 if (nvmet_tcp_has_data_in(cmd)) {
370 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
371 sizeof(*cmd->iov), GFP_KERNEL);
372 if (!cmd->iov)
373 goto err;
374 }
375
376 return 0;
377err:
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700378 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800379 return NVME_SC_INTERNAL;
380}
381
Sagi Grimbergfda871c2021-02-03 15:00:01 -0800382static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800383 struct nvmet_tcp_cmd *cmd)
384{
385 ahash_request_set_crypt(hash, cmd->req.sg,
386 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
387 crypto_ahash_digest(hash);
388}
389
Sagi Grimbergfda871c2021-02-03 15:00:01 -0800390static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
391 struct nvmet_tcp_cmd *cmd)
392{
393 struct scatterlist sg;
394 struct kvec *iov;
395 int i;
396
397 crypto_ahash_init(hash);
398 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
399 sg_init_one(&sg, iov->iov_base, iov->iov_len);
400 ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
401 crypto_ahash_update(hash);
402 }
403 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
404 crypto_ahash_final(hash);
405}
406
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800407static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
408{
409 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
410 struct nvmet_tcp_queue *queue = cmd->queue;
411 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
412 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
413
414 cmd->offset = 0;
415 cmd->state = NVMET_TCP_SEND_DATA_PDU;
416
417 pdu->hdr.type = nvme_tcp_c2h_data;
Sagi Grimberg70583292019-03-08 15:41:21 -0800418 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
419 NVME_TCP_F_DATA_SUCCESS : 0);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800420 pdu->hdr.hlen = sizeof(*pdu);
421 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
422 pdu->hdr.plen =
423 cpu_to_le32(pdu->hdr.hlen + hdgst +
424 cmd->req.transfer_len + ddgst);
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300425 pdu->command_id = cmd->req.cqe->command_id;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800426 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
427 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
428
429 if (queue->data_digest) {
430 pdu->hdr.flags |= NVME_TCP_F_DDGST;
Sagi Grimbergfda871c2021-02-03 15:00:01 -0800431 nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800432 }
433
434 if (cmd->queue->hdr_digest) {
435 pdu->hdr.flags |= NVME_TCP_F_HDGST;
436 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
437 }
438}
439
440static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
441{
442 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
443 struct nvmet_tcp_queue *queue = cmd->queue;
444 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
445
446 cmd->offset = 0;
447 cmd->state = NVMET_TCP_SEND_R2T;
448
449 pdu->hdr.type = nvme_tcp_r2t;
450 pdu->hdr.flags = 0;
451 pdu->hdr.hlen = sizeof(*pdu);
452 pdu->hdr.pdo = 0;
453 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
454
455 pdu->command_id = cmd->req.cmd->common.command_id;
456 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
457 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
458 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
459 if (cmd->queue->hdr_digest) {
460 pdu->hdr.flags |= NVME_TCP_F_HDGST;
461 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
462 }
463}
464
465static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
466{
467 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
468 struct nvmet_tcp_queue *queue = cmd->queue;
469 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
470
471 cmd->offset = 0;
472 cmd->state = NVMET_TCP_SEND_RESPONSE;
473
474 pdu->hdr.type = nvme_tcp_rsp;
475 pdu->hdr.flags = 0;
476 pdu->hdr.hlen = sizeof(*pdu);
477 pdu->hdr.pdo = 0;
478 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
479 if (cmd->queue->hdr_digest) {
480 pdu->hdr.flags |= NVME_TCP_F_HDGST;
481 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
482 }
483}
484
485static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
486{
487 struct llist_node *node;
Sagi Grimbergb8a12e92020-06-24 12:27:16 -0700488 struct nvmet_tcp_cmd *cmd;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800489
Sagi Grimbergb8a12e92020-06-24 12:27:16 -0700490 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
491 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800492 list_add(&cmd->entry, &queue->resp_send_list);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800493 queue->send_list_len++;
494 }
495}
496
497static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
498{
499 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
500 struct nvmet_tcp_cmd, entry);
501 if (!queue->snd_cmd) {
502 nvmet_tcp_process_resp_list(queue);
503 queue->snd_cmd =
504 list_first_entry_or_null(&queue->resp_send_list,
505 struct nvmet_tcp_cmd, entry);
506 if (unlikely(!queue->snd_cmd))
507 return NULL;
508 }
509
510 list_del_init(&queue->snd_cmd->entry);
511 queue->send_list_len--;
512
513 if (nvmet_tcp_need_data_out(queue->snd_cmd))
514 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
515 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
516 nvmet_setup_r2t_pdu(queue->snd_cmd);
517 else
518 nvmet_setup_response_pdu(queue->snd_cmd);
519
520 return queue->snd_cmd;
521}
522
523static void nvmet_tcp_queue_response(struct nvmet_req *req)
524{
525 struct nvmet_tcp_cmd *cmd =
526 container_of(req, struct nvmet_tcp_cmd, req);
527 struct nvmet_tcp_queue *queue = cmd->queue;
528
529 llist_add(&cmd->lentry, &queue->resp_list);
Mark Wunderlichf7790e52020-08-28 01:00:53 +0000530 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800531}
532
533static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
534{
535 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
536 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
537 int ret;
538
539 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
540 offset_in_page(cmd->data_pdu) + cmd->offset,
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700541 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800542 if (ret <= 0)
543 return ret;
544
545 cmd->offset += ret;
546 left -= ret;
547
548 if (left)
549 return -EAGAIN;
550
551 cmd->state = NVMET_TCP_SEND_DATA;
552 cmd->offset = 0;
553 return 1;
554}
555
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700556static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800557{
558 struct nvmet_tcp_queue *queue = cmd->queue;
559 int ret;
560
561 while (cmd->cur_sg) {
562 struct page *page = sg_page(cmd->cur_sg);
563 u32 left = cmd->cur_sg->length - cmd->offset;
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700564 int flags = MSG_DONTWAIT;
565
566 if ((!last_in_batch && cmd->queue->send_list_len) ||
567 cmd->wbytes_done + left < cmd->req.transfer_len ||
568 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700569 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800570
571 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700572 left, flags);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800573 if (ret <= 0)
574 return ret;
575
576 cmd->offset += ret;
577 cmd->wbytes_done += ret;
578
579 /* Done with sg?*/
580 if (cmd->offset == cmd->cur_sg->length) {
581 cmd->cur_sg = sg_next(cmd->cur_sg);
582 cmd->offset = 0;
583 }
584 }
585
586 if (queue->data_digest) {
587 cmd->state = NVMET_TCP_SEND_DDGST;
588 cmd->offset = 0;
589 } else {
Sagi Grimberg70583292019-03-08 15:41:21 -0800590 if (queue->nvme_sq.sqhd_disabled) {
591 cmd->queue->snd_cmd = NULL;
592 nvmet_tcp_put_cmd(cmd);
593 } else {
594 nvmet_setup_response_pdu(cmd);
595 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800596 }
Sagi Grimberg70583292019-03-08 15:41:21 -0800597
598 if (queue->nvme_sq.sqhd_disabled) {
599 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700600 sgl_free(cmd->req.sg);
Sagi Grimberg70583292019-03-08 15:41:21 -0800601 }
602
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800603 return 1;
604
605}
606
607static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
608 bool last_in_batch)
609{
610 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
611 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
612 int flags = MSG_DONTWAIT;
613 int ret;
614
615 if (!last_in_batch && cmd->queue->send_list_len)
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700616 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800617 else
618 flags |= MSG_EOR;
619
620 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
621 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
622 if (ret <= 0)
623 return ret;
624 cmd->offset += ret;
625 left -= ret;
626
627 if (left)
628 return -EAGAIN;
629
630 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700631 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800632 cmd->queue->snd_cmd = NULL;
633 nvmet_tcp_put_cmd(cmd);
634 return 1;
635}
636
637static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
638{
639 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
640 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
641 int flags = MSG_DONTWAIT;
642 int ret;
643
644 if (!last_in_batch && cmd->queue->send_list_len)
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700645 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800646 else
647 flags |= MSG_EOR;
648
649 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
650 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
651 if (ret <= 0)
652 return ret;
653 cmd->offset += ret;
654 left -= ret;
655
656 if (left)
657 return -EAGAIN;
658
659 cmd->queue->snd_cmd = NULL;
660 return 1;
661}
662
Sagi Grimberge90d1722020-03-12 16:06:39 -0700663static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800664{
665 struct nvmet_tcp_queue *queue = cmd->queue;
666 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
667 struct kvec iov = {
668 .iov_base = &cmd->exp_ddgst + cmd->offset,
669 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
670 };
671 int ret;
672
Sagi Grimberge90d1722020-03-12 16:06:39 -0700673 if (!last_in_batch && cmd->queue->send_list_len)
674 msg.msg_flags |= MSG_MORE;
Sagi Grimbergf381ab12020-05-12 18:01:43 -0700675 else
676 msg.msg_flags |= MSG_EOR;
Sagi Grimberge90d1722020-03-12 16:06:39 -0700677
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800678 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
679 if (unlikely(ret <= 0))
680 return ret;
681
682 cmd->offset += ret;
Sagi Grimberg70583292019-03-08 15:41:21 -0800683
684 if (queue->nvme_sq.sqhd_disabled) {
685 cmd->queue->snd_cmd = NULL;
686 nvmet_tcp_put_cmd(cmd);
687 } else {
688 nvmet_setup_response_pdu(cmd);
689 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800690 return 1;
691}
692
693static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
694 bool last_in_batch)
695{
696 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
697 int ret = 0;
698
699 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
700 cmd = nvmet_tcp_fetch_cmd(queue);
701 if (unlikely(!cmd))
702 return 0;
703 }
704
705 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
706 ret = nvmet_try_send_data_pdu(cmd);
707 if (ret <= 0)
708 goto done_send;
709 }
710
711 if (cmd->state == NVMET_TCP_SEND_DATA) {
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700712 ret = nvmet_try_send_data(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800713 if (ret <= 0)
714 goto done_send;
715 }
716
717 if (cmd->state == NVMET_TCP_SEND_DDGST) {
Sagi Grimberge90d1722020-03-12 16:06:39 -0700718 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800719 if (ret <= 0)
720 goto done_send;
721 }
722
723 if (cmd->state == NVMET_TCP_SEND_R2T) {
724 ret = nvmet_try_send_r2t(cmd, last_in_batch);
725 if (ret <= 0)
726 goto done_send;
727 }
728
729 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
730 ret = nvmet_try_send_response(cmd, last_in_batch);
731
732done_send:
733 if (ret < 0) {
734 if (ret == -EAGAIN)
735 return 0;
736 return ret;
737 }
738
739 return 1;
740}
741
742static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
743 int budget, int *sends)
744{
745 int i, ret = 0;
746
747 for (i = 0; i < budget; i++) {
748 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
Sagi Grimberg0236d342020-05-18 10:47:48 -0700749 if (unlikely(ret < 0)) {
750 nvmet_tcp_socket_error(queue, ret);
751 goto done;
752 } else if (ret == 0) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800753 break;
Sagi Grimberg0236d342020-05-18 10:47:48 -0700754 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800755 (*sends)++;
756 }
Sagi Grimberg0236d342020-05-18 10:47:48 -0700757done:
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800758 return ret;
759}
760
761static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
762{
763 queue->offset = 0;
764 queue->left = sizeof(struct nvme_tcp_hdr);
765 queue->cmd = NULL;
766 queue->rcv_state = NVMET_TCP_RECV_PDU;
767}
768
769static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
770{
771 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
772
773 ahash_request_free(queue->rcv_hash);
774 ahash_request_free(queue->snd_hash);
775 crypto_free_ahash(tfm);
776}
777
778static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
779{
780 struct crypto_ahash *tfm;
781
782 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
783 if (IS_ERR(tfm))
784 return PTR_ERR(tfm);
785
786 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
787 if (!queue->snd_hash)
788 goto free_tfm;
789 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
790
791 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
792 if (!queue->rcv_hash)
793 goto free_snd_hash;
794 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
795
796 return 0;
797free_snd_hash:
798 ahash_request_free(queue->snd_hash);
799free_tfm:
800 crypto_free_ahash(tfm);
801 return -ENOMEM;
802}
803
804
805static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
806{
807 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
808 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
809 struct msghdr msg = {};
810 struct kvec iov;
811 int ret;
812
813 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
814 pr_err("bad nvme-tcp pdu length (%d)\n",
815 le32_to_cpu(icreq->hdr.plen));
816 nvmet_tcp_fatal_error(queue);
817 }
818
819 if (icreq->pfv != NVME_TCP_PFV_1_0) {
820 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
821 return -EPROTO;
822 }
823
824 if (icreq->hpda != 0) {
825 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
826 icreq->hpda);
827 return -EPROTO;
828 }
829
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800830 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
831 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
832 if (queue->hdr_digest || queue->data_digest) {
833 ret = nvmet_tcp_alloc_crypto(queue);
834 if (ret)
835 return ret;
836 }
837
838 memset(icresp, 0, sizeof(*icresp));
839 icresp->hdr.type = nvme_tcp_icresp;
840 icresp->hdr.hlen = sizeof(*icresp);
841 icresp->hdr.pdo = 0;
842 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
843 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
Sagi Grimberg9cda34e2020-02-25 16:42:27 -0800844 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800845 icresp->cpda = 0;
846 if (queue->hdr_digest)
847 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
848 if (queue->data_digest)
849 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
850
851 iov.iov_base = icresp;
852 iov.iov_len = sizeof(*icresp);
853 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
854 if (ret < 0)
855 goto free_crypto;
856
857 queue->state = NVMET_TCP_Q_LIVE;
858 nvmet_prepare_receive_pdu(queue);
859 return 0;
860free_crypto:
861 if (queue->hdr_digest || queue->data_digest)
862 nvmet_tcp_free_crypto(queue);
863 return ret;
864}
865
866static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
867 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
868{
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600869 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800870 int ret;
871
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800872 if (!nvme_is_write(cmd->req.cmd) ||
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600873 data_len > cmd->req.port->inline_data_size) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800874 nvmet_prepare_receive_pdu(queue);
875 return;
876 }
877
878 ret = nvmet_tcp_map_data(cmd);
879 if (unlikely(ret)) {
880 pr_err("queue %d: failed to map data\n", queue->idx);
881 nvmet_tcp_fatal_error(queue);
882 return;
883 }
884
885 queue->rcv_state = NVMET_TCP_RECV_DATA;
886 nvmet_tcp_map_pdu_iovec(cmd);
887 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
888}
889
890static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
891{
892 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
893 struct nvmet_tcp_cmd *cmd;
894
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800895 if (likely(queue->nr_cmds))
896 cmd = &queue->cmds[data->ttag];
897 else
898 cmd = &queue->connect;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800899
900 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
901 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
902 data->ttag, le32_to_cpu(data->data_offset),
903 cmd->rbytes_done);
904 /* FIXME: use path and transport errors */
905 nvmet_req_complete(&cmd->req,
906 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
907 return -EPROTO;
908 }
909
910 cmd->pdu_len = le32_to_cpu(data->data_length);
911 cmd->pdu_recv = 0;
912 nvmet_tcp_map_pdu_iovec(cmd);
913 queue->cmd = cmd;
914 queue->rcv_state = NVMET_TCP_RECV_DATA;
915
916 return 0;
917}
918
919static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
920{
921 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
922 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
923 struct nvmet_req *req;
924 int ret;
925
926 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
927 if (hdr->type != nvme_tcp_icreq) {
928 pr_err("unexpected pdu type (%d) before icreq\n",
929 hdr->type);
930 nvmet_tcp_fatal_error(queue);
931 return -EPROTO;
932 }
933 return nvmet_tcp_handle_icreq(queue);
934 }
935
936 if (hdr->type == nvme_tcp_h2c_data) {
937 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
938 if (unlikely(ret))
939 return ret;
940 return 0;
941 }
942
943 queue->cmd = nvmet_tcp_get_cmd(queue);
944 if (unlikely(!queue->cmd)) {
945 /* This should never happen */
946 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
947 queue->idx, queue->nr_cmds, queue->send_list_len,
948 nvme_cmd->common.opcode);
949 nvmet_tcp_fatal_error(queue);
950 return -ENOMEM;
951 }
952
953 req = &queue->cmd->req;
954 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
955
956 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
957 &queue->nvme_sq, &nvmet_tcp_ops))) {
958 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
959 req->cmd, req->cmd->common.command_id,
960 req->cmd->common.opcode,
961 le32_to_cpu(req->cmd->common.dptr.sgl.length));
962
963 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
964 return -EAGAIN;
965 }
966
967 ret = nvmet_tcp_map_data(queue->cmd);
968 if (unlikely(ret)) {
969 pr_err("queue %d: failed to map data\n", queue->idx);
970 if (nvmet_tcp_has_inline_data(queue->cmd))
971 nvmet_tcp_fatal_error(queue);
972 else
973 nvmet_req_complete(req, ret);
974 ret = -EAGAIN;
975 goto out;
976 }
977
978 if (nvmet_tcp_need_data_in(queue->cmd)) {
979 if (nvmet_tcp_has_inline_data(queue->cmd)) {
980 queue->rcv_state = NVMET_TCP_RECV_DATA;
981 nvmet_tcp_map_pdu_iovec(queue->cmd);
982 return 0;
983 }
984 /* send back R2T */
985 nvmet_tcp_queue_response(&queue->cmd->req);
986 goto out;
987 }
988
Christoph Hellwigbe3f3112019-10-23 10:35:45 -0600989 queue->cmd->req.execute(&queue->cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800990out:
991 nvmet_prepare_receive_pdu(queue);
992 return ret;
993}
994
995static const u8 nvme_tcp_pdu_sizes[] = {
996 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
997 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
998 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
999};
1000
1001static inline u8 nvmet_tcp_pdu_size(u8 type)
1002{
1003 size_t idx = type;
1004
1005 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1006 nvme_tcp_pdu_sizes[idx]) ?
1007 nvme_tcp_pdu_sizes[idx] : 0;
1008}
1009
1010static inline bool nvmet_tcp_pdu_valid(u8 type)
1011{
1012 switch (type) {
1013 case nvme_tcp_icreq:
1014 case nvme_tcp_cmd:
1015 case nvme_tcp_h2c_data:
1016 /* fallthru */
1017 return true;
1018 }
1019
1020 return false;
1021}
1022
1023static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1024{
1025 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1026 int len;
1027 struct kvec iov;
1028 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1029
1030recv:
1031 iov.iov_base = (void *)&queue->pdu + queue->offset;
1032 iov.iov_len = queue->left;
1033 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1034 iov.iov_len, msg.msg_flags);
1035 if (unlikely(len < 0))
1036 return len;
1037
1038 queue->offset += len;
1039 queue->left -= len;
1040 if (queue->left)
1041 return -EAGAIN;
1042
1043 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1044 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1045
1046 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1047 pr_err("unexpected pdu type %d\n", hdr->type);
1048 nvmet_tcp_fatal_error(queue);
1049 return -EIO;
1050 }
1051
1052 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1053 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1054 return -EIO;
1055 }
1056
1057 queue->left = hdr->hlen - queue->offset + hdgst;
1058 goto recv;
1059 }
1060
1061 if (queue->hdr_digest &&
1062 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1063 nvmet_tcp_fatal_error(queue); /* fatal */
1064 return -EPROTO;
1065 }
1066
1067 if (queue->data_digest &&
1068 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1069 nvmet_tcp_fatal_error(queue); /* fatal */
1070 return -EPROTO;
1071 }
1072
1073 return nvmet_tcp_done_recv_pdu(queue);
1074}
1075
1076static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1077{
1078 struct nvmet_tcp_queue *queue = cmd->queue;
1079
Sagi Grimbergfda871c2021-02-03 15:00:01 -08001080 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001081 queue->offset = 0;
1082 queue->left = NVME_TCP_DIGEST_LENGTH;
1083 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1084}
1085
1086static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1087{
1088 struct nvmet_tcp_cmd *cmd = queue->cmd;
1089 int ret;
1090
1091 while (msg_data_left(&cmd->recv_msg)) {
1092 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1093 cmd->recv_msg.msg_flags);
1094 if (ret <= 0)
1095 return ret;
1096
1097 cmd->pdu_recv += ret;
1098 cmd->rbytes_done += ret;
1099 }
1100
Elad Grupibac04452021-03-16 17:44:25 +02001101 nvmet_tcp_unmap_pdu_iovec(cmd);
Sagi Grimbergfda871c2021-02-03 15:00:01 -08001102 if (queue->data_digest) {
1103 nvmet_tcp_prep_recv_ddgst(cmd);
1104 return 0;
1105 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001106
1107 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1108 cmd->rbytes_done == cmd->req.transfer_len) {
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001109 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001110 }
1111
1112 nvmet_prepare_receive_pdu(queue);
1113 return 0;
1114}
1115
1116static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1117{
1118 struct nvmet_tcp_cmd *cmd = queue->cmd;
1119 int ret;
1120 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1121 struct kvec iov = {
1122 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1123 .iov_len = queue->left
1124 };
1125
1126 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1127 iov.iov_len, msg.msg_flags);
1128 if (unlikely(ret < 0))
1129 return ret;
1130
1131 queue->offset += ret;
1132 queue->left -= ret;
1133 if (queue->left)
1134 return -EAGAIN;
1135
1136 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1137 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1138 queue->idx, cmd->req.cmd->common.command_id,
1139 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1140 le32_to_cpu(cmd->exp_ddgst));
1141 nvmet_tcp_finish_cmd(cmd);
1142 nvmet_tcp_fatal_error(queue);
1143 ret = -EPROTO;
1144 goto out;
1145 }
1146
1147 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1148 cmd->rbytes_done == cmd->req.transfer_len)
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001149 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001150 ret = 0;
1151out:
1152 nvmet_prepare_receive_pdu(queue);
1153 return ret;
1154}
1155
1156static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1157{
Sagi Grimbergfb865852019-01-09 14:56:32 -08001158 int result = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001159
1160 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1161 return 0;
1162
1163 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1164 result = nvmet_tcp_try_recv_pdu(queue);
1165 if (result != 0)
1166 goto done_recv;
1167 }
1168
1169 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1170 result = nvmet_tcp_try_recv_data(queue);
1171 if (result != 0)
1172 goto done_recv;
1173 }
1174
1175 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1176 result = nvmet_tcp_try_recv_ddgst(queue);
1177 if (result != 0)
1178 goto done_recv;
1179 }
1180
1181done_recv:
1182 if (result < 0) {
1183 if (result == -EAGAIN)
1184 return 0;
1185 return result;
1186 }
1187 return 1;
1188}
1189
1190static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1191 int budget, int *recvs)
1192{
1193 int i, ret = 0;
1194
1195 for (i = 0; i < budget; i++) {
1196 ret = nvmet_tcp_try_recv_one(queue);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001197 if (unlikely(ret < 0)) {
1198 nvmet_tcp_socket_error(queue, ret);
1199 goto done;
1200 } else if (ret == 0) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001201 break;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001202 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001203 (*recvs)++;
1204 }
Sagi Grimberg0236d342020-05-18 10:47:48 -07001205done:
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001206 return ret;
1207}
1208
1209static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1210{
1211 spin_lock(&queue->state_lock);
1212 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1213 queue->state = NVMET_TCP_Q_DISCONNECTING;
1214 schedule_work(&queue->release_work);
1215 }
1216 spin_unlock(&queue->state_lock);
1217}
1218
1219static void nvmet_tcp_io_work(struct work_struct *w)
1220{
1221 struct nvmet_tcp_queue *queue =
1222 container_of(w, struct nvmet_tcp_queue, io_work);
1223 bool pending;
1224 int ret, ops = 0;
1225
1226 do {
1227 pending = false;
1228
1229 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001230 if (ret > 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001231 pending = true;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001232 else if (ret < 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001233 return;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001234
1235 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001236 if (ret > 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001237 pending = true;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001238 else if (ret < 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001239 return;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001240
1241 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1242
1243 /*
1244 * We exahusted our budget, requeue our selves
1245 */
1246 if (pending)
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001247 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001248}
1249
1250static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1251 struct nvmet_tcp_cmd *c)
1252{
1253 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1254
1255 c->queue = queue;
1256 c->req.port = queue->port->nport;
1257
1258 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1259 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1260 if (!c->cmd_pdu)
1261 return -ENOMEM;
1262 c->req.cmd = &c->cmd_pdu->cmd;
1263
1264 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1265 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1266 if (!c->rsp_pdu)
1267 goto out_free_cmd;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +03001268 c->req.cqe = &c->rsp_pdu->cqe;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001269
1270 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1271 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1272 if (!c->data_pdu)
1273 goto out_free_rsp;
1274
1275 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1276 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1277 if (!c->r2t_pdu)
1278 goto out_free_data;
1279
1280 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1281
1282 list_add_tail(&c->entry, &queue->free_list);
1283
1284 return 0;
1285out_free_data:
1286 page_frag_free(c->data_pdu);
1287out_free_rsp:
1288 page_frag_free(c->rsp_pdu);
1289out_free_cmd:
1290 page_frag_free(c->cmd_pdu);
1291 return -ENOMEM;
1292}
1293
1294static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1295{
1296 page_frag_free(c->r2t_pdu);
1297 page_frag_free(c->data_pdu);
1298 page_frag_free(c->rsp_pdu);
1299 page_frag_free(c->cmd_pdu);
1300}
1301
1302static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1303{
1304 struct nvmet_tcp_cmd *cmds;
1305 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1306
1307 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1308 if (!cmds)
1309 goto out;
1310
1311 for (i = 0; i < nr_cmds; i++) {
1312 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1313 if (ret)
1314 goto out_free;
1315 }
1316
1317 queue->cmds = cmds;
1318
1319 return 0;
1320out_free:
1321 while (--i >= 0)
1322 nvmet_tcp_free_cmd(cmds + i);
1323 kfree(cmds);
1324out:
1325 return ret;
1326}
1327
1328static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1329{
1330 struct nvmet_tcp_cmd *cmds = queue->cmds;
1331 int i;
1332
1333 for (i = 0; i < queue->nr_cmds; i++)
1334 nvmet_tcp_free_cmd(cmds + i);
1335
1336 nvmet_tcp_free_cmd(&queue->connect);
1337 kfree(cmds);
1338}
1339
1340static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1341{
1342 struct socket *sock = queue->sock;
1343
1344 write_lock_bh(&sock->sk->sk_callback_lock);
1345 sock->sk->sk_data_ready = queue->data_ready;
1346 sock->sk->sk_state_change = queue->state_change;
1347 sock->sk->sk_write_space = queue->write_space;
1348 sock->sk->sk_user_data = NULL;
1349 write_unlock_bh(&sock->sk->sk_callback_lock);
1350}
1351
1352static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1353{
1354 nvmet_req_uninit(&cmd->req);
1355 nvmet_tcp_unmap_pdu_iovec(cmd);
Sagi Grimberg35d1a932019-08-02 20:29:11 -07001356 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -07001357 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001358}
1359
1360static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1361{
1362 struct nvmet_tcp_cmd *cmd = queue->cmds;
1363 int i;
1364
1365 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1366 if (nvmet_tcp_need_data_in(cmd))
1367 nvmet_tcp_finish_cmd(cmd);
1368 }
1369
1370 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1371 /* failed in connect */
1372 nvmet_tcp_finish_cmd(&queue->connect);
1373 }
1374}
1375
1376static void nvmet_tcp_release_queue_work(struct work_struct *w)
1377{
1378 struct nvmet_tcp_queue *queue =
1379 container_of(w, struct nvmet_tcp_queue, release_work);
1380
1381 mutex_lock(&nvmet_tcp_queue_mutex);
1382 list_del_init(&queue->queue_list);
1383 mutex_unlock(&nvmet_tcp_queue_mutex);
1384
1385 nvmet_tcp_restore_socket_callbacks(queue);
1386 flush_work(&queue->io_work);
1387
1388 nvmet_tcp_uninit_data_in_cmds(queue);
1389 nvmet_sq_destroy(&queue->nvme_sq);
1390 cancel_work_sync(&queue->io_work);
1391 sock_release(queue->sock);
1392 nvmet_tcp_free_cmds(queue);
1393 if (queue->hdr_digest || queue->data_digest)
1394 nvmet_tcp_free_crypto(queue);
1395 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1396
1397 kfree(queue);
1398}
1399
1400static void nvmet_tcp_data_ready(struct sock *sk)
1401{
1402 struct nvmet_tcp_queue *queue;
1403
1404 read_lock_bh(&sk->sk_callback_lock);
1405 queue = sk->sk_user_data;
1406 if (likely(queue))
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001407 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001408 read_unlock_bh(&sk->sk_callback_lock);
1409}
1410
1411static void nvmet_tcp_write_space(struct sock *sk)
1412{
1413 struct nvmet_tcp_queue *queue;
1414
1415 read_lock_bh(&sk->sk_callback_lock);
1416 queue = sk->sk_user_data;
1417 if (unlikely(!queue))
1418 goto out;
1419
1420 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1421 queue->write_space(sk);
1422 goto out;
1423 }
1424
1425 if (sk_stream_is_writeable(sk)) {
1426 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001427 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001428 }
1429out:
1430 read_unlock_bh(&sk->sk_callback_lock);
1431}
1432
1433static void nvmet_tcp_state_change(struct sock *sk)
1434{
1435 struct nvmet_tcp_queue *queue;
1436
1437 write_lock_bh(&sk->sk_callback_lock);
1438 queue = sk->sk_user_data;
1439 if (!queue)
1440 goto done;
1441
1442 switch (sk->sk_state) {
1443 case TCP_FIN_WAIT1:
1444 case TCP_CLOSE_WAIT:
1445 case TCP_CLOSE:
1446 /* FALLTHRU */
1447 sk->sk_user_data = NULL;
1448 nvmet_tcp_schedule_release_queue(queue);
1449 break;
1450 default:
1451 pr_warn("queue %d unhandled state %d\n",
1452 queue->idx, sk->sk_state);
1453 }
1454done:
1455 write_unlock_bh(&sk->sk_callback_lock);
1456}
1457
1458static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1459{
1460 struct socket *sock = queue->sock;
Israel Rukshin89275a92019-08-18 12:08:55 +03001461 struct inet_sock *inet = inet_sk(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001462 int ret;
1463
1464 ret = kernel_getsockname(sock,
1465 (struct sockaddr *)&queue->sockaddr);
1466 if (ret < 0)
1467 return ret;
1468
1469 ret = kernel_getpeername(sock,
1470 (struct sockaddr *)&queue->sockaddr_peer);
1471 if (ret < 0)
1472 return ret;
1473
1474 /*
1475 * Cleanup whatever is sitting in the TCP transmit queue on socket
1476 * close. This is done to prevent stale data from being sent should
1477 * the network connection be restored before TCP times out.
1478 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001479 sock_no_linger(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001480
Christoph Hellwig6e434962020-05-28 07:12:11 +02001481 if (so_priority > 0)
1482 sock_set_priority(sock->sk, so_priority);
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001483
Israel Rukshin89275a92019-08-18 12:08:55 +03001484 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001485 if (inet->rcv_tos > 0)
1486 ip_sock_set_tos(sock->sk, inet->rcv_tos);
Israel Rukshin89275a92019-08-18 12:08:55 +03001487
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001488 ret = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001489 write_lock_bh(&sock->sk->sk_callback_lock);
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001490 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1491 /*
1492 * If the socket is already closing, don't even start
1493 * consuming it
1494 */
1495 ret = -ENOTCONN;
1496 } else {
1497 sock->sk->sk_user_data = queue;
1498 queue->data_ready = sock->sk->sk_data_ready;
1499 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1500 queue->state_change = sock->sk->sk_state_change;
1501 sock->sk->sk_state_change = nvmet_tcp_state_change;
1502 queue->write_space = sock->sk->sk_write_space;
1503 sock->sk->sk_write_space = nvmet_tcp_write_space;
1504 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1505 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001506 write_unlock_bh(&sock->sk->sk_callback_lock);
1507
Sagi Grimberg0fbcfb02021-02-05 11:47:25 -08001508 return ret;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001509}
1510
1511static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1512 struct socket *newsock)
1513{
1514 struct nvmet_tcp_queue *queue;
1515 int ret;
1516
1517 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1518 if (!queue)
1519 return -ENOMEM;
1520
1521 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1522 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1523 queue->sock = newsock;
1524 queue->port = port;
1525 queue->nr_cmds = 0;
1526 spin_lock_init(&queue->state_lock);
1527 queue->state = NVMET_TCP_Q_CONNECTING;
1528 INIT_LIST_HEAD(&queue->free_list);
1529 init_llist_head(&queue->resp_list);
1530 INIT_LIST_HEAD(&queue->resp_send_list);
1531
1532 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1533 if (queue->idx < 0) {
1534 ret = queue->idx;
1535 goto out_free_queue;
1536 }
1537
1538 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1539 if (ret)
1540 goto out_ida_remove;
1541
1542 ret = nvmet_sq_init(&queue->nvme_sq);
1543 if (ret)
1544 goto out_free_connect;
1545
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001546 nvmet_prepare_receive_pdu(queue);
1547
1548 mutex_lock(&nvmet_tcp_queue_mutex);
1549 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1550 mutex_unlock(&nvmet_tcp_queue_mutex);
1551
1552 ret = nvmet_tcp_set_queue_sock(queue);
1553 if (ret)
1554 goto out_destroy_sq;
1555
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001556 return 0;
1557out_destroy_sq:
1558 mutex_lock(&nvmet_tcp_queue_mutex);
1559 list_del_init(&queue->queue_list);
1560 mutex_unlock(&nvmet_tcp_queue_mutex);
1561 nvmet_sq_destroy(&queue->nvme_sq);
1562out_free_connect:
1563 nvmet_tcp_free_cmd(&queue->connect);
1564out_ida_remove:
1565 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1566out_free_queue:
1567 kfree(queue);
1568 return ret;
1569}
1570
1571static void nvmet_tcp_accept_work(struct work_struct *w)
1572{
1573 struct nvmet_tcp_port *port =
1574 container_of(w, struct nvmet_tcp_port, accept_work);
1575 struct socket *newsock;
1576 int ret;
1577
1578 while (true) {
1579 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1580 if (ret < 0) {
1581 if (ret != -EAGAIN)
1582 pr_warn("failed to accept err=%d\n", ret);
1583 return;
1584 }
1585 ret = nvmet_tcp_alloc_queue(port, newsock);
1586 if (ret) {
1587 pr_err("failed to allocate queue\n");
1588 sock_release(newsock);
1589 }
1590 }
1591}
1592
1593static void nvmet_tcp_listen_data_ready(struct sock *sk)
1594{
1595 struct nvmet_tcp_port *port;
1596
1597 read_lock_bh(&sk->sk_callback_lock);
1598 port = sk->sk_user_data;
1599 if (!port)
1600 goto out;
1601
1602 if (sk->sk_state == TCP_LISTEN)
1603 schedule_work(&port->accept_work);
1604out:
1605 read_unlock_bh(&sk->sk_callback_lock);
1606}
1607
1608static int nvmet_tcp_add_port(struct nvmet_port *nport)
1609{
1610 struct nvmet_tcp_port *port;
1611 __kernel_sa_family_t af;
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001612 int ret;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001613
1614 port = kzalloc(sizeof(*port), GFP_KERNEL);
1615 if (!port)
1616 return -ENOMEM;
1617
1618 switch (nport->disc_addr.adrfam) {
1619 case NVMF_ADDR_FAMILY_IP4:
1620 af = AF_INET;
1621 break;
1622 case NVMF_ADDR_FAMILY_IP6:
1623 af = AF_INET6;
1624 break;
1625 default:
1626 pr_err("address family %d not supported\n",
1627 nport->disc_addr.adrfam);
1628 ret = -EINVAL;
1629 goto err_port;
1630 }
1631
1632 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1633 nport->disc_addr.trsvcid, &port->addr);
1634 if (ret) {
1635 pr_err("malformed ip/port passed: %s:%s\n",
1636 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1637 goto err_port;
1638 }
1639
1640 port->nport = nport;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001641 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1642 if (port->nport->inline_data_size < 0)
1643 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1644
1645 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1646 IPPROTO_TCP, &port->sock);
1647 if (ret) {
1648 pr_err("failed to create a socket\n");
1649 goto err_port;
1650 }
1651
1652 port->sock->sk->sk_user_data = port;
1653 port->data_ready = port->sock->sk->sk_data_ready;
1654 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
Christoph Hellwigb58f0e82020-05-28 07:12:09 +02001655 sock_set_reuseaddr(port->sock->sk);
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001656 tcp_sock_set_nodelay(port->sock->sk);
Christoph Hellwig6e434962020-05-28 07:12:11 +02001657 if (so_priority > 0)
1658 sock_set_priority(port->sock->sk, so_priority);
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001659
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001660 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1661 sizeof(port->addr));
1662 if (ret) {
1663 pr_err("failed to bind port socket %d\n", ret);
1664 goto err_sock;
1665 }
1666
1667 ret = kernel_listen(port->sock, 128);
1668 if (ret) {
1669 pr_err("failed to listen %d on port sock\n", ret);
1670 goto err_sock;
1671 }
1672
1673 nport->priv = port;
1674 pr_info("enabling port %d (%pISpc)\n",
1675 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1676
1677 return 0;
1678
1679err_sock:
1680 sock_release(port->sock);
1681err_port:
1682 kfree(port);
1683 return ret;
1684}
1685
1686static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1687{
1688 struct nvmet_tcp_port *port = nport->priv;
1689
1690 write_lock_bh(&port->sock->sk->sk_callback_lock);
1691 port->sock->sk->sk_data_ready = port->data_ready;
1692 port->sock->sk->sk_user_data = NULL;
1693 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1694 cancel_work_sync(&port->accept_work);
1695
1696 sock_release(port->sock);
1697 kfree(port);
1698}
1699
1700static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1701{
1702 struct nvmet_tcp_queue *queue;
1703
1704 mutex_lock(&nvmet_tcp_queue_mutex);
1705 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1706 if (queue->nvme_sq.ctrl == ctrl)
1707 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1708 mutex_unlock(&nvmet_tcp_queue_mutex);
1709}
1710
1711static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1712{
1713 struct nvmet_tcp_queue *queue =
1714 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1715
1716 if (sq->qid == 0) {
1717 /* Let inflight controller teardown complete */
1718 flush_scheduled_work();
1719 }
1720
1721 queue->nr_cmds = sq->size * 2;
1722 if (nvmet_tcp_alloc_cmds(queue))
1723 return NVME_SC_INTERNAL;
1724 return 0;
1725}
1726
1727static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1728 struct nvmet_port *nport, char *traddr)
1729{
1730 struct nvmet_tcp_port *port = nport->priv;
1731
1732 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1733 struct nvmet_tcp_cmd *cmd =
1734 container_of(req, struct nvmet_tcp_cmd, req);
1735 struct nvmet_tcp_queue *queue = cmd->queue;
1736
1737 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1738 } else {
1739 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1740 }
1741}
1742
Max Gurtovoya40aae62020-06-01 20:05:20 +03001743static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001744 .owner = THIS_MODULE,
1745 .type = NVMF_TRTYPE_TCP,
1746 .msdbd = 1,
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001747 .add_port = nvmet_tcp_add_port,
1748 .remove_port = nvmet_tcp_remove_port,
1749 .queue_response = nvmet_tcp_queue_response,
1750 .delete_ctrl = nvmet_tcp_delete_ctrl,
1751 .install_queue = nvmet_tcp_install_queue,
1752 .disc_traddr = nvmet_tcp_disc_port_addr,
1753};
1754
1755static int __init nvmet_tcp_init(void)
1756{
1757 int ret;
1758
1759 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1760 if (!nvmet_tcp_wq)
1761 return -ENOMEM;
1762
1763 ret = nvmet_register_transport(&nvmet_tcp_ops);
1764 if (ret)
1765 goto err;
1766
1767 return 0;
1768err:
1769 destroy_workqueue(nvmet_tcp_wq);
1770 return ret;
1771}
1772
1773static void __exit nvmet_tcp_exit(void)
1774{
1775 struct nvmet_tcp_queue *queue;
1776
1777 nvmet_unregister_transport(&nvmet_tcp_ops);
1778
1779 flush_scheduled_work();
1780 mutex_lock(&nvmet_tcp_queue_mutex);
1781 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1782 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1783 mutex_unlock(&nvmet_tcp_queue_mutex);
1784 flush_scheduled_work();
1785
1786 destroy_workqueue(nvmet_tcp_wq);
1787}
1788
1789module_init(nvmet_tcp_init);
1790module_exit(nvmet_tcp_exit);
1791
1792MODULE_LICENSE("GPL v2");
1793MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */