blob: dcee4995e22d58cf30218b0cdf16aa7677208c82 [file] [log] [blame]
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/inet.h>
15#include <linux/llist.h>
16#include <crypto/hash.h>
17
18#include "nvmet.h"
19
20#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
21
Wunderlich, Mark43cc6682020-01-16 00:46:16 +000022/* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
27 */
28static int so_priority;
29module_param(so_priority, int, 0644);
30MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
31
Sagi Grimberg872d26a2018-12-03 17:52:15 -080032#define NVMET_TCP_RECV_BUDGET 8
33#define NVMET_TCP_SEND_BUDGET 8
34#define NVMET_TCP_IO_WORK_BUDGET 64
35
36enum nvmet_tcp_send_state {
37 NVMET_TCP_SEND_DATA_PDU,
38 NVMET_TCP_SEND_DATA,
39 NVMET_TCP_SEND_R2T,
40 NVMET_TCP_SEND_DDGST,
41 NVMET_TCP_SEND_RESPONSE
42};
43
44enum nvmet_tcp_recv_state {
45 NVMET_TCP_RECV_PDU,
46 NVMET_TCP_RECV_DATA,
47 NVMET_TCP_RECV_DDGST,
48 NVMET_TCP_RECV_ERR,
49};
50
51enum {
52 NVMET_TCP_F_INIT_FAILED = (1 << 0),
53};
54
55struct nvmet_tcp_cmd {
56 struct nvmet_tcp_queue *queue;
57 struct nvmet_req req;
58
59 struct nvme_tcp_cmd_pdu *cmd_pdu;
60 struct nvme_tcp_rsp_pdu *rsp_pdu;
61 struct nvme_tcp_data_pdu *data_pdu;
62 struct nvme_tcp_r2t_pdu *r2t_pdu;
63
64 u32 rbytes_done;
65 u32 wbytes_done;
66
67 u32 pdu_len;
68 u32 pdu_recv;
69 int sg_idx;
70 int nr_mapped;
71 struct msghdr recv_msg;
72 struct kvec *iov;
73 u32 flags;
74
75 struct list_head entry;
76 struct llist_node lentry;
77
78 /* send state */
79 u32 offset;
80 struct scatterlist *cur_sg;
81 enum nvmet_tcp_send_state state;
82
83 __le32 exp_ddgst;
84 __le32 recv_ddgst;
85};
86
87enum nvmet_tcp_queue_state {
88 NVMET_TCP_Q_CONNECTING,
89 NVMET_TCP_Q_LIVE,
90 NVMET_TCP_Q_DISCONNECTING,
91};
92
93struct nvmet_tcp_queue {
94 struct socket *sock;
95 struct nvmet_tcp_port *port;
96 struct work_struct io_work;
97 int cpu;
98 struct nvmet_cq nvme_cq;
99 struct nvmet_sq nvme_sq;
100
101 /* send state */
102 struct nvmet_tcp_cmd *cmds;
103 unsigned int nr_cmds;
104 struct list_head free_list;
105 struct llist_head resp_list;
106 struct list_head resp_send_list;
107 int send_list_len;
108 struct nvmet_tcp_cmd *snd_cmd;
109
110 /* recv state */
111 int offset;
112 int left;
113 enum nvmet_tcp_recv_state rcv_state;
114 struct nvmet_tcp_cmd *cmd;
115 union nvme_tcp_pdu pdu;
116
117 /* digest state */
118 bool hdr_digest;
119 bool data_digest;
120 struct ahash_request *snd_hash;
121 struct ahash_request *rcv_hash;
122
123 spinlock_t state_lock;
124 enum nvmet_tcp_queue_state state;
125
126 struct sockaddr_storage sockaddr;
127 struct sockaddr_storage sockaddr_peer;
128 struct work_struct release_work;
129
130 int idx;
131 struct list_head queue_list;
132
133 struct nvmet_tcp_cmd connect;
134
135 struct page_frag_cache pf_cache;
136
137 void (*data_ready)(struct sock *);
138 void (*state_change)(struct sock *);
139 void (*write_space)(struct sock *);
140};
141
142struct nvmet_tcp_port {
143 struct socket *sock;
144 struct work_struct accept_work;
145 struct nvmet_port *nport;
146 struct sockaddr_storage addr;
147 int last_cpu;
148 void (*data_ready)(struct sock *);
149};
150
151static DEFINE_IDA(nvmet_tcp_queue_ida);
152static LIST_HEAD(nvmet_tcp_queue_list);
153static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
154
155static struct workqueue_struct *nvmet_tcp_wq;
156static struct nvmet_fabrics_ops nvmet_tcp_ops;
157static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
158static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
159
160static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
161 struct nvmet_tcp_cmd *cmd)
162{
163 return cmd - queue->cmds;
164}
165
166static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
167{
168 return nvme_is_write(cmd->req.cmd) &&
169 cmd->rbytes_done < cmd->req.transfer_len;
170}
171
172static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
173{
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300174 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800175}
176
177static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
178{
179 return !nvme_is_write(cmd->req.cmd) &&
180 cmd->req.transfer_len > 0 &&
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300181 !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800182}
183
184static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
185{
186 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
187 !cmd->rbytes_done;
188}
189
190static inline struct nvmet_tcp_cmd *
191nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
192{
193 struct nvmet_tcp_cmd *cmd;
194
195 cmd = list_first_entry_or_null(&queue->free_list,
196 struct nvmet_tcp_cmd, entry);
197 if (!cmd)
198 return NULL;
199 list_del_init(&cmd->entry);
200
201 cmd->rbytes_done = cmd->wbytes_done = 0;
202 cmd->pdu_len = 0;
203 cmd->pdu_recv = 0;
204 cmd->iov = NULL;
205 cmd->flags = 0;
206 return cmd;
207}
208
209static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
210{
211 if (unlikely(cmd == &cmd->queue->connect))
212 return;
213
214 list_add_tail(&cmd->entry, &cmd->queue->free_list);
215}
216
217static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
218{
219 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
220}
221
222static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
223{
224 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
225}
226
227static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
228 void *pdu, size_t len)
229{
230 struct scatterlist sg;
231
232 sg_init_one(&sg, pdu, len);
233 ahash_request_set_crypt(hash, &sg, pdu + len, len);
234 crypto_ahash_digest(hash);
235}
236
237static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
238 void *pdu, size_t len)
239{
240 struct nvme_tcp_hdr *hdr = pdu;
241 __le32 recv_digest;
242 __le32 exp_digest;
243
244 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
245 pr_err("queue %d: header digest enabled but no header digest\n",
246 queue->idx);
247 return -EPROTO;
248 }
249
250 recv_digest = *(__le32 *)(pdu + hdr->hlen);
251 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
252 exp_digest = *(__le32 *)(pdu + hdr->hlen);
253 if (recv_digest != exp_digest) {
254 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
255 queue->idx, le32_to_cpu(recv_digest),
256 le32_to_cpu(exp_digest));
257 return -EPROTO;
258 }
259
260 return 0;
261}
262
263static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
264{
265 struct nvme_tcp_hdr *hdr = pdu;
266 u8 digest_len = nvmet_tcp_hdgst_len(queue);
267 u32 len;
268
269 len = le32_to_cpu(hdr->plen) - hdr->hlen -
270 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
271
272 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
273 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
274 return -EPROTO;
275 }
276
277 return 0;
278}
279
280static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
281{
282 struct scatterlist *sg;
283 int i;
284
285 sg = &cmd->req.sg[cmd->sg_idx];
286
287 for (i = 0; i < cmd->nr_mapped; i++)
288 kunmap(sg_page(&sg[i]));
289}
290
291static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
292{
293 struct kvec *iov = cmd->iov;
294 struct scatterlist *sg;
295 u32 length, offset, sg_offset;
296
297 length = cmd->pdu_len;
298 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
299 offset = cmd->rbytes_done;
300 cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
301 sg_offset = offset % PAGE_SIZE;
302 sg = &cmd->req.sg[cmd->sg_idx];
303
304 while (length) {
305 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
306
307 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
308 iov->iov_len = iov_len;
309
310 length -= iov_len;
311 sg = sg_next(sg);
312 iov++;
313 }
314
315 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
316 cmd->nr_mapped, cmd->pdu_len);
317}
318
319static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
320{
321 queue->rcv_state = NVMET_TCP_RECV_ERR;
322 if (queue->nvme_sq.ctrl)
323 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
324 else
325 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
326}
327
328static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
329{
330 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
331 u32 len = le32_to_cpu(sgl->length);
332
Logan Gunthorpee0bace72019-10-23 10:35:39 -0600333 if (!len)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800334 return 0;
335
336 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
337 NVME_SGL_FMT_OFFSET)) {
338 if (!nvme_is_write(cmd->req.cmd))
339 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
340
341 if (len > cmd->req.port->inline_data_size)
342 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
343 cmd->pdu_len = len;
344 }
345 cmd->req.transfer_len += len;
346
347 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
348 if (!cmd->req.sg)
349 return NVME_SC_INTERNAL;
350 cmd->cur_sg = cmd->req.sg;
351
352 if (nvmet_tcp_has_data_in(cmd)) {
353 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
354 sizeof(*cmd->iov), GFP_KERNEL);
355 if (!cmd->iov)
356 goto err;
357 }
358
359 return 0;
360err:
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700361 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800362 return NVME_SC_INTERNAL;
363}
364
365static void nvmet_tcp_ddgst(struct ahash_request *hash,
366 struct nvmet_tcp_cmd *cmd)
367{
368 ahash_request_set_crypt(hash, cmd->req.sg,
369 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
370 crypto_ahash_digest(hash);
371}
372
373static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
374{
375 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
376 struct nvmet_tcp_queue *queue = cmd->queue;
377 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
378 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
379
380 cmd->offset = 0;
381 cmd->state = NVMET_TCP_SEND_DATA_PDU;
382
383 pdu->hdr.type = nvme_tcp_c2h_data;
Sagi Grimberg70583292019-03-08 15:41:21 -0800384 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
385 NVME_TCP_F_DATA_SUCCESS : 0);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800386 pdu->hdr.hlen = sizeof(*pdu);
387 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
388 pdu->hdr.plen =
389 cpu_to_le32(pdu->hdr.hlen + hdgst +
390 cmd->req.transfer_len + ddgst);
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300391 pdu->command_id = cmd->req.cqe->command_id;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800392 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
393 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
394
395 if (queue->data_digest) {
396 pdu->hdr.flags |= NVME_TCP_F_DDGST;
397 nvmet_tcp_ddgst(queue->snd_hash, cmd);
398 }
399
400 if (cmd->queue->hdr_digest) {
401 pdu->hdr.flags |= NVME_TCP_F_HDGST;
402 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
403 }
404}
405
406static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
407{
408 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
409 struct nvmet_tcp_queue *queue = cmd->queue;
410 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
411
412 cmd->offset = 0;
413 cmd->state = NVMET_TCP_SEND_R2T;
414
415 pdu->hdr.type = nvme_tcp_r2t;
416 pdu->hdr.flags = 0;
417 pdu->hdr.hlen = sizeof(*pdu);
418 pdu->hdr.pdo = 0;
419 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
420
421 pdu->command_id = cmd->req.cmd->common.command_id;
422 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
423 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
424 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
425 if (cmd->queue->hdr_digest) {
426 pdu->hdr.flags |= NVME_TCP_F_HDGST;
427 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
428 }
429}
430
431static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
432{
433 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
434 struct nvmet_tcp_queue *queue = cmd->queue;
435 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
436
437 cmd->offset = 0;
438 cmd->state = NVMET_TCP_SEND_RESPONSE;
439
440 pdu->hdr.type = nvme_tcp_rsp;
441 pdu->hdr.flags = 0;
442 pdu->hdr.hlen = sizeof(*pdu);
443 pdu->hdr.pdo = 0;
444 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
445 if (cmd->queue->hdr_digest) {
446 pdu->hdr.flags |= NVME_TCP_F_HDGST;
447 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
448 }
449}
450
451static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
452{
453 struct llist_node *node;
454
455 node = llist_del_all(&queue->resp_list);
456 if (!node)
457 return;
458
459 while (node) {
460 struct nvmet_tcp_cmd *cmd = llist_entry(node,
461 struct nvmet_tcp_cmd, lentry);
462
463 list_add(&cmd->entry, &queue->resp_send_list);
464 node = node->next;
465 queue->send_list_len++;
466 }
467}
468
469static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
470{
471 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
472 struct nvmet_tcp_cmd, entry);
473 if (!queue->snd_cmd) {
474 nvmet_tcp_process_resp_list(queue);
475 queue->snd_cmd =
476 list_first_entry_or_null(&queue->resp_send_list,
477 struct nvmet_tcp_cmd, entry);
478 if (unlikely(!queue->snd_cmd))
479 return NULL;
480 }
481
482 list_del_init(&queue->snd_cmd->entry);
483 queue->send_list_len--;
484
485 if (nvmet_tcp_need_data_out(queue->snd_cmd))
486 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
487 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
488 nvmet_setup_r2t_pdu(queue->snd_cmd);
489 else
490 nvmet_setup_response_pdu(queue->snd_cmd);
491
492 return queue->snd_cmd;
493}
494
495static void nvmet_tcp_queue_response(struct nvmet_req *req)
496{
497 struct nvmet_tcp_cmd *cmd =
498 container_of(req, struct nvmet_tcp_cmd, req);
499 struct nvmet_tcp_queue *queue = cmd->queue;
500
501 llist_add(&cmd->lentry, &queue->resp_list);
502 queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
503}
504
505static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
506{
507 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
508 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
509 int ret;
510
511 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
512 offset_in_page(cmd->data_pdu) + cmd->offset,
513 left, MSG_DONTWAIT | MSG_MORE);
514 if (ret <= 0)
515 return ret;
516
517 cmd->offset += ret;
518 left -= ret;
519
520 if (left)
521 return -EAGAIN;
522
523 cmd->state = NVMET_TCP_SEND_DATA;
524 cmd->offset = 0;
525 return 1;
526}
527
528static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
529{
530 struct nvmet_tcp_queue *queue = cmd->queue;
531 int ret;
532
533 while (cmd->cur_sg) {
534 struct page *page = sg_page(cmd->cur_sg);
535 u32 left = cmd->cur_sg->length - cmd->offset;
536
537 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
538 left, MSG_DONTWAIT | MSG_MORE);
539 if (ret <= 0)
540 return ret;
541
542 cmd->offset += ret;
543 cmd->wbytes_done += ret;
544
545 /* Done with sg?*/
546 if (cmd->offset == cmd->cur_sg->length) {
547 cmd->cur_sg = sg_next(cmd->cur_sg);
548 cmd->offset = 0;
549 }
550 }
551
552 if (queue->data_digest) {
553 cmd->state = NVMET_TCP_SEND_DDGST;
554 cmd->offset = 0;
555 } else {
Sagi Grimberg70583292019-03-08 15:41:21 -0800556 if (queue->nvme_sq.sqhd_disabled) {
557 cmd->queue->snd_cmd = NULL;
558 nvmet_tcp_put_cmd(cmd);
559 } else {
560 nvmet_setup_response_pdu(cmd);
561 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800562 }
Sagi Grimberg70583292019-03-08 15:41:21 -0800563
564 if (queue->nvme_sq.sqhd_disabled) {
565 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700566 sgl_free(cmd->req.sg);
Sagi Grimberg70583292019-03-08 15:41:21 -0800567 }
568
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800569 return 1;
570
571}
572
573static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
574 bool last_in_batch)
575{
576 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
577 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
578 int flags = MSG_DONTWAIT;
579 int ret;
580
581 if (!last_in_batch && cmd->queue->send_list_len)
582 flags |= MSG_MORE;
583 else
584 flags |= MSG_EOR;
585
586 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
587 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
588 if (ret <= 0)
589 return ret;
590 cmd->offset += ret;
591 left -= ret;
592
593 if (left)
594 return -EAGAIN;
595
596 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700597 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800598 cmd->queue->snd_cmd = NULL;
599 nvmet_tcp_put_cmd(cmd);
600 return 1;
601}
602
603static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
604{
605 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
606 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
607 int flags = MSG_DONTWAIT;
608 int ret;
609
610 if (!last_in_batch && cmd->queue->send_list_len)
611 flags |= MSG_MORE;
612 else
613 flags |= MSG_EOR;
614
615 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
616 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
617 if (ret <= 0)
618 return ret;
619 cmd->offset += ret;
620 left -= ret;
621
622 if (left)
623 return -EAGAIN;
624
625 cmd->queue->snd_cmd = NULL;
626 return 1;
627}
628
Sagi Grimberge90d1722020-03-12 16:06:39 -0700629static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800630{
631 struct nvmet_tcp_queue *queue = cmd->queue;
632 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
633 struct kvec iov = {
634 .iov_base = &cmd->exp_ddgst + cmd->offset,
635 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
636 };
637 int ret;
638
Sagi Grimberge90d1722020-03-12 16:06:39 -0700639 if (!last_in_batch && cmd->queue->send_list_len)
640 msg.msg_flags |= MSG_MORE;
641
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800642 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
643 if (unlikely(ret <= 0))
644 return ret;
645
646 cmd->offset += ret;
Sagi Grimberg70583292019-03-08 15:41:21 -0800647
648 if (queue->nvme_sq.sqhd_disabled) {
649 cmd->queue->snd_cmd = NULL;
650 nvmet_tcp_put_cmd(cmd);
651 } else {
652 nvmet_setup_response_pdu(cmd);
653 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800654 return 1;
655}
656
657static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
658 bool last_in_batch)
659{
660 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
661 int ret = 0;
662
663 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
664 cmd = nvmet_tcp_fetch_cmd(queue);
665 if (unlikely(!cmd))
666 return 0;
667 }
668
669 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
670 ret = nvmet_try_send_data_pdu(cmd);
671 if (ret <= 0)
672 goto done_send;
673 }
674
675 if (cmd->state == NVMET_TCP_SEND_DATA) {
676 ret = nvmet_try_send_data(cmd);
677 if (ret <= 0)
678 goto done_send;
679 }
680
681 if (cmd->state == NVMET_TCP_SEND_DDGST) {
Sagi Grimberge90d1722020-03-12 16:06:39 -0700682 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800683 if (ret <= 0)
684 goto done_send;
685 }
686
687 if (cmd->state == NVMET_TCP_SEND_R2T) {
688 ret = nvmet_try_send_r2t(cmd, last_in_batch);
689 if (ret <= 0)
690 goto done_send;
691 }
692
693 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
694 ret = nvmet_try_send_response(cmd, last_in_batch);
695
696done_send:
697 if (ret < 0) {
698 if (ret == -EAGAIN)
699 return 0;
700 return ret;
701 }
702
703 return 1;
704}
705
706static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
707 int budget, int *sends)
708{
709 int i, ret = 0;
710
711 for (i = 0; i < budget; i++) {
712 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
713 if (ret <= 0)
714 break;
715 (*sends)++;
716 }
717
718 return ret;
719}
720
721static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
722{
723 queue->offset = 0;
724 queue->left = sizeof(struct nvme_tcp_hdr);
725 queue->cmd = NULL;
726 queue->rcv_state = NVMET_TCP_RECV_PDU;
727}
728
729static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
730{
731 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
732
733 ahash_request_free(queue->rcv_hash);
734 ahash_request_free(queue->snd_hash);
735 crypto_free_ahash(tfm);
736}
737
738static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
739{
740 struct crypto_ahash *tfm;
741
742 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
743 if (IS_ERR(tfm))
744 return PTR_ERR(tfm);
745
746 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
747 if (!queue->snd_hash)
748 goto free_tfm;
749 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
750
751 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
752 if (!queue->rcv_hash)
753 goto free_snd_hash;
754 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
755
756 return 0;
757free_snd_hash:
758 ahash_request_free(queue->snd_hash);
759free_tfm:
760 crypto_free_ahash(tfm);
761 return -ENOMEM;
762}
763
764
765static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
766{
767 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
768 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
769 struct msghdr msg = {};
770 struct kvec iov;
771 int ret;
772
773 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
774 pr_err("bad nvme-tcp pdu length (%d)\n",
775 le32_to_cpu(icreq->hdr.plen));
776 nvmet_tcp_fatal_error(queue);
777 }
778
779 if (icreq->pfv != NVME_TCP_PFV_1_0) {
780 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
781 return -EPROTO;
782 }
783
784 if (icreq->hpda != 0) {
785 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
786 icreq->hpda);
787 return -EPROTO;
788 }
789
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800790 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
791 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
792 if (queue->hdr_digest || queue->data_digest) {
793 ret = nvmet_tcp_alloc_crypto(queue);
794 if (ret)
795 return ret;
796 }
797
798 memset(icresp, 0, sizeof(*icresp));
799 icresp->hdr.type = nvme_tcp_icresp;
800 icresp->hdr.hlen = sizeof(*icresp);
801 icresp->hdr.pdo = 0;
802 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
803 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
Sagi Grimberg9cda34e2020-02-25 16:42:27 -0800804 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800805 icresp->cpda = 0;
806 if (queue->hdr_digest)
807 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
808 if (queue->data_digest)
809 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
810
811 iov.iov_base = icresp;
812 iov.iov_len = sizeof(*icresp);
813 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
814 if (ret < 0)
815 goto free_crypto;
816
817 queue->state = NVMET_TCP_Q_LIVE;
818 nvmet_prepare_receive_pdu(queue);
819 return 0;
820free_crypto:
821 if (queue->hdr_digest || queue->data_digest)
822 nvmet_tcp_free_crypto(queue);
823 return ret;
824}
825
826static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
827 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
828{
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600829 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800830 int ret;
831
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800832 if (!nvme_is_write(cmd->req.cmd) ||
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600833 data_len > cmd->req.port->inline_data_size) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800834 nvmet_prepare_receive_pdu(queue);
835 return;
836 }
837
838 ret = nvmet_tcp_map_data(cmd);
839 if (unlikely(ret)) {
840 pr_err("queue %d: failed to map data\n", queue->idx);
841 nvmet_tcp_fatal_error(queue);
842 return;
843 }
844
845 queue->rcv_state = NVMET_TCP_RECV_DATA;
846 nvmet_tcp_map_pdu_iovec(cmd);
847 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
848}
849
850static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
851{
852 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
853 struct nvmet_tcp_cmd *cmd;
854
855 cmd = &queue->cmds[data->ttag];
856
857 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
858 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
859 data->ttag, le32_to_cpu(data->data_offset),
860 cmd->rbytes_done);
861 /* FIXME: use path and transport errors */
862 nvmet_req_complete(&cmd->req,
863 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
864 return -EPROTO;
865 }
866
867 cmd->pdu_len = le32_to_cpu(data->data_length);
868 cmd->pdu_recv = 0;
869 nvmet_tcp_map_pdu_iovec(cmd);
870 queue->cmd = cmd;
871 queue->rcv_state = NVMET_TCP_RECV_DATA;
872
873 return 0;
874}
875
876static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
877{
878 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
879 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
880 struct nvmet_req *req;
881 int ret;
882
883 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
884 if (hdr->type != nvme_tcp_icreq) {
885 pr_err("unexpected pdu type (%d) before icreq\n",
886 hdr->type);
887 nvmet_tcp_fatal_error(queue);
888 return -EPROTO;
889 }
890 return nvmet_tcp_handle_icreq(queue);
891 }
892
893 if (hdr->type == nvme_tcp_h2c_data) {
894 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
895 if (unlikely(ret))
896 return ret;
897 return 0;
898 }
899
900 queue->cmd = nvmet_tcp_get_cmd(queue);
901 if (unlikely(!queue->cmd)) {
902 /* This should never happen */
903 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
904 queue->idx, queue->nr_cmds, queue->send_list_len,
905 nvme_cmd->common.opcode);
906 nvmet_tcp_fatal_error(queue);
907 return -ENOMEM;
908 }
909
910 req = &queue->cmd->req;
911 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
912
913 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
914 &queue->nvme_sq, &nvmet_tcp_ops))) {
915 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
916 req->cmd, req->cmd->common.command_id,
917 req->cmd->common.opcode,
918 le32_to_cpu(req->cmd->common.dptr.sgl.length));
919
920 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
921 return -EAGAIN;
922 }
923
924 ret = nvmet_tcp_map_data(queue->cmd);
925 if (unlikely(ret)) {
926 pr_err("queue %d: failed to map data\n", queue->idx);
927 if (nvmet_tcp_has_inline_data(queue->cmd))
928 nvmet_tcp_fatal_error(queue);
929 else
930 nvmet_req_complete(req, ret);
931 ret = -EAGAIN;
932 goto out;
933 }
934
935 if (nvmet_tcp_need_data_in(queue->cmd)) {
936 if (nvmet_tcp_has_inline_data(queue->cmd)) {
937 queue->rcv_state = NVMET_TCP_RECV_DATA;
938 nvmet_tcp_map_pdu_iovec(queue->cmd);
939 return 0;
940 }
941 /* send back R2T */
942 nvmet_tcp_queue_response(&queue->cmd->req);
943 goto out;
944 }
945
Christoph Hellwigbe3f3112019-10-23 10:35:45 -0600946 queue->cmd->req.execute(&queue->cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800947out:
948 nvmet_prepare_receive_pdu(queue);
949 return ret;
950}
951
952static const u8 nvme_tcp_pdu_sizes[] = {
953 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
954 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
955 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
956};
957
958static inline u8 nvmet_tcp_pdu_size(u8 type)
959{
960 size_t idx = type;
961
962 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
963 nvme_tcp_pdu_sizes[idx]) ?
964 nvme_tcp_pdu_sizes[idx] : 0;
965}
966
967static inline bool nvmet_tcp_pdu_valid(u8 type)
968{
969 switch (type) {
970 case nvme_tcp_icreq:
971 case nvme_tcp_cmd:
972 case nvme_tcp_h2c_data:
973 /* fallthru */
974 return true;
975 }
976
977 return false;
978}
979
980static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
981{
982 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
983 int len;
984 struct kvec iov;
985 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
986
987recv:
988 iov.iov_base = (void *)&queue->pdu + queue->offset;
989 iov.iov_len = queue->left;
990 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
991 iov.iov_len, msg.msg_flags);
992 if (unlikely(len < 0))
993 return len;
994
995 queue->offset += len;
996 queue->left -= len;
997 if (queue->left)
998 return -EAGAIN;
999
1000 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1001 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1002
1003 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1004 pr_err("unexpected pdu type %d\n", hdr->type);
1005 nvmet_tcp_fatal_error(queue);
1006 return -EIO;
1007 }
1008
1009 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1010 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1011 return -EIO;
1012 }
1013
1014 queue->left = hdr->hlen - queue->offset + hdgst;
1015 goto recv;
1016 }
1017
1018 if (queue->hdr_digest &&
1019 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1020 nvmet_tcp_fatal_error(queue); /* fatal */
1021 return -EPROTO;
1022 }
1023
1024 if (queue->data_digest &&
1025 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1026 nvmet_tcp_fatal_error(queue); /* fatal */
1027 return -EPROTO;
1028 }
1029
1030 return nvmet_tcp_done_recv_pdu(queue);
1031}
1032
1033static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1034{
1035 struct nvmet_tcp_queue *queue = cmd->queue;
1036
1037 nvmet_tcp_ddgst(queue->rcv_hash, cmd);
1038 queue->offset = 0;
1039 queue->left = NVME_TCP_DIGEST_LENGTH;
1040 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1041}
1042
1043static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1044{
1045 struct nvmet_tcp_cmd *cmd = queue->cmd;
1046 int ret;
1047
1048 while (msg_data_left(&cmd->recv_msg)) {
1049 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1050 cmd->recv_msg.msg_flags);
1051 if (ret <= 0)
1052 return ret;
1053
1054 cmd->pdu_recv += ret;
1055 cmd->rbytes_done += ret;
1056 }
1057
1058 nvmet_tcp_unmap_pdu_iovec(cmd);
1059
1060 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1061 cmd->rbytes_done == cmd->req.transfer_len) {
1062 if (queue->data_digest) {
1063 nvmet_tcp_prep_recv_ddgst(cmd);
1064 return 0;
1065 }
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001066 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001067 }
1068
1069 nvmet_prepare_receive_pdu(queue);
1070 return 0;
1071}
1072
1073static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1074{
1075 struct nvmet_tcp_cmd *cmd = queue->cmd;
1076 int ret;
1077 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1078 struct kvec iov = {
1079 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1080 .iov_len = queue->left
1081 };
1082
1083 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1084 iov.iov_len, msg.msg_flags);
1085 if (unlikely(ret < 0))
1086 return ret;
1087
1088 queue->offset += ret;
1089 queue->left -= ret;
1090 if (queue->left)
1091 return -EAGAIN;
1092
1093 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1094 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1095 queue->idx, cmd->req.cmd->common.command_id,
1096 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1097 le32_to_cpu(cmd->exp_ddgst));
1098 nvmet_tcp_finish_cmd(cmd);
1099 nvmet_tcp_fatal_error(queue);
1100 ret = -EPROTO;
1101 goto out;
1102 }
1103
1104 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1105 cmd->rbytes_done == cmd->req.transfer_len)
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001106 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001107 ret = 0;
1108out:
1109 nvmet_prepare_receive_pdu(queue);
1110 return ret;
1111}
1112
1113static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1114{
Sagi Grimbergfb865852019-01-09 14:56:32 -08001115 int result = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001116
1117 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1118 return 0;
1119
1120 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1121 result = nvmet_tcp_try_recv_pdu(queue);
1122 if (result != 0)
1123 goto done_recv;
1124 }
1125
1126 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1127 result = nvmet_tcp_try_recv_data(queue);
1128 if (result != 0)
1129 goto done_recv;
1130 }
1131
1132 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1133 result = nvmet_tcp_try_recv_ddgst(queue);
1134 if (result != 0)
1135 goto done_recv;
1136 }
1137
1138done_recv:
1139 if (result < 0) {
1140 if (result == -EAGAIN)
1141 return 0;
1142 return result;
1143 }
1144 return 1;
1145}
1146
1147static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1148 int budget, int *recvs)
1149{
1150 int i, ret = 0;
1151
1152 for (i = 0; i < budget; i++) {
1153 ret = nvmet_tcp_try_recv_one(queue);
1154 if (ret <= 0)
1155 break;
1156 (*recvs)++;
1157 }
1158
1159 return ret;
1160}
1161
1162static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1163{
1164 spin_lock(&queue->state_lock);
1165 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1166 queue->state = NVMET_TCP_Q_DISCONNECTING;
1167 schedule_work(&queue->release_work);
1168 }
1169 spin_unlock(&queue->state_lock);
1170}
1171
1172static void nvmet_tcp_io_work(struct work_struct *w)
1173{
1174 struct nvmet_tcp_queue *queue =
1175 container_of(w, struct nvmet_tcp_queue, io_work);
1176 bool pending;
1177 int ret, ops = 0;
1178
1179 do {
1180 pending = false;
1181
1182 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1183 if (ret > 0) {
1184 pending = true;
1185 } else if (ret < 0) {
1186 if (ret == -EPIPE || ret == -ECONNRESET)
1187 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1188 else
1189 nvmet_tcp_fatal_error(queue);
1190 return;
1191 }
1192
1193 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1194 if (ret > 0) {
1195 /* transmitted message/data */
1196 pending = true;
1197 } else if (ret < 0) {
1198 if (ret == -EPIPE || ret == -ECONNRESET)
1199 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1200 else
1201 nvmet_tcp_fatal_error(queue);
1202 return;
1203 }
1204
1205 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1206
1207 /*
1208 * We exahusted our budget, requeue our selves
1209 */
1210 if (pending)
1211 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1212}
1213
1214static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1215 struct nvmet_tcp_cmd *c)
1216{
1217 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1218
1219 c->queue = queue;
1220 c->req.port = queue->port->nport;
1221
1222 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1223 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1224 if (!c->cmd_pdu)
1225 return -ENOMEM;
1226 c->req.cmd = &c->cmd_pdu->cmd;
1227
1228 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1229 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1230 if (!c->rsp_pdu)
1231 goto out_free_cmd;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +03001232 c->req.cqe = &c->rsp_pdu->cqe;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001233
1234 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1235 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1236 if (!c->data_pdu)
1237 goto out_free_rsp;
1238
1239 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1240 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1241 if (!c->r2t_pdu)
1242 goto out_free_data;
1243
1244 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1245
1246 list_add_tail(&c->entry, &queue->free_list);
1247
1248 return 0;
1249out_free_data:
1250 page_frag_free(c->data_pdu);
1251out_free_rsp:
1252 page_frag_free(c->rsp_pdu);
1253out_free_cmd:
1254 page_frag_free(c->cmd_pdu);
1255 return -ENOMEM;
1256}
1257
1258static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1259{
1260 page_frag_free(c->r2t_pdu);
1261 page_frag_free(c->data_pdu);
1262 page_frag_free(c->rsp_pdu);
1263 page_frag_free(c->cmd_pdu);
1264}
1265
1266static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1267{
1268 struct nvmet_tcp_cmd *cmds;
1269 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1270
1271 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1272 if (!cmds)
1273 goto out;
1274
1275 for (i = 0; i < nr_cmds; i++) {
1276 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1277 if (ret)
1278 goto out_free;
1279 }
1280
1281 queue->cmds = cmds;
1282
1283 return 0;
1284out_free:
1285 while (--i >= 0)
1286 nvmet_tcp_free_cmd(cmds + i);
1287 kfree(cmds);
1288out:
1289 return ret;
1290}
1291
1292static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1293{
1294 struct nvmet_tcp_cmd *cmds = queue->cmds;
1295 int i;
1296
1297 for (i = 0; i < queue->nr_cmds; i++)
1298 nvmet_tcp_free_cmd(cmds + i);
1299
1300 nvmet_tcp_free_cmd(&queue->connect);
1301 kfree(cmds);
1302}
1303
1304static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1305{
1306 struct socket *sock = queue->sock;
1307
1308 write_lock_bh(&sock->sk->sk_callback_lock);
1309 sock->sk->sk_data_ready = queue->data_ready;
1310 sock->sk->sk_state_change = queue->state_change;
1311 sock->sk->sk_write_space = queue->write_space;
1312 sock->sk->sk_user_data = NULL;
1313 write_unlock_bh(&sock->sk->sk_callback_lock);
1314}
1315
1316static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1317{
1318 nvmet_req_uninit(&cmd->req);
1319 nvmet_tcp_unmap_pdu_iovec(cmd);
Sagi Grimberg35d1a932019-08-02 20:29:11 -07001320 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -07001321 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001322}
1323
1324static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1325{
1326 struct nvmet_tcp_cmd *cmd = queue->cmds;
1327 int i;
1328
1329 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1330 if (nvmet_tcp_need_data_in(cmd))
1331 nvmet_tcp_finish_cmd(cmd);
1332 }
1333
1334 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1335 /* failed in connect */
1336 nvmet_tcp_finish_cmd(&queue->connect);
1337 }
1338}
1339
1340static void nvmet_tcp_release_queue_work(struct work_struct *w)
1341{
1342 struct nvmet_tcp_queue *queue =
1343 container_of(w, struct nvmet_tcp_queue, release_work);
1344
1345 mutex_lock(&nvmet_tcp_queue_mutex);
1346 list_del_init(&queue->queue_list);
1347 mutex_unlock(&nvmet_tcp_queue_mutex);
1348
1349 nvmet_tcp_restore_socket_callbacks(queue);
1350 flush_work(&queue->io_work);
1351
1352 nvmet_tcp_uninit_data_in_cmds(queue);
1353 nvmet_sq_destroy(&queue->nvme_sq);
1354 cancel_work_sync(&queue->io_work);
1355 sock_release(queue->sock);
1356 nvmet_tcp_free_cmds(queue);
1357 if (queue->hdr_digest || queue->data_digest)
1358 nvmet_tcp_free_crypto(queue);
1359 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1360
1361 kfree(queue);
1362}
1363
1364static void nvmet_tcp_data_ready(struct sock *sk)
1365{
1366 struct nvmet_tcp_queue *queue;
1367
1368 read_lock_bh(&sk->sk_callback_lock);
1369 queue = sk->sk_user_data;
1370 if (likely(queue))
1371 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1372 read_unlock_bh(&sk->sk_callback_lock);
1373}
1374
1375static void nvmet_tcp_write_space(struct sock *sk)
1376{
1377 struct nvmet_tcp_queue *queue;
1378
1379 read_lock_bh(&sk->sk_callback_lock);
1380 queue = sk->sk_user_data;
1381 if (unlikely(!queue))
1382 goto out;
1383
1384 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1385 queue->write_space(sk);
1386 goto out;
1387 }
1388
1389 if (sk_stream_is_writeable(sk)) {
1390 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1391 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1392 }
1393out:
1394 read_unlock_bh(&sk->sk_callback_lock);
1395}
1396
1397static void nvmet_tcp_state_change(struct sock *sk)
1398{
1399 struct nvmet_tcp_queue *queue;
1400
1401 write_lock_bh(&sk->sk_callback_lock);
1402 queue = sk->sk_user_data;
1403 if (!queue)
1404 goto done;
1405
1406 switch (sk->sk_state) {
1407 case TCP_FIN_WAIT1:
1408 case TCP_CLOSE_WAIT:
1409 case TCP_CLOSE:
1410 /* FALLTHRU */
1411 sk->sk_user_data = NULL;
1412 nvmet_tcp_schedule_release_queue(queue);
1413 break;
1414 default:
1415 pr_warn("queue %d unhandled state %d\n",
1416 queue->idx, sk->sk_state);
1417 }
1418done:
1419 write_unlock_bh(&sk->sk_callback_lock);
1420}
1421
1422static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1423{
1424 struct socket *sock = queue->sock;
Israel Rukshin89275a92019-08-18 12:08:55 +03001425 struct inet_sock *inet = inet_sk(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001426 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1427 int ret;
1428
1429 ret = kernel_getsockname(sock,
1430 (struct sockaddr *)&queue->sockaddr);
1431 if (ret < 0)
1432 return ret;
1433
1434 ret = kernel_getpeername(sock,
1435 (struct sockaddr *)&queue->sockaddr_peer);
1436 if (ret < 0)
1437 return ret;
1438
1439 /*
1440 * Cleanup whatever is sitting in the TCP transmit queue on socket
1441 * close. This is done to prevent stale data from being sent should
1442 * the network connection be restored before TCP times out.
1443 */
1444 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
1445 (char *)&sol, sizeof(sol));
1446 if (ret)
1447 return ret;
1448
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001449 if (so_priority > 0) {
1450 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
1451 (char *)&so_priority, sizeof(so_priority));
1452 if (ret)
1453 return ret;
1454 }
1455
Israel Rukshin89275a92019-08-18 12:08:55 +03001456 /* Set socket type of service */
1457 if (inet->rcv_tos > 0) {
1458 int tos = inet->rcv_tos;
1459
1460 ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
1461 (char *)&tos, sizeof(tos));
1462 if (ret)
1463 return ret;
1464 }
1465
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001466 write_lock_bh(&sock->sk->sk_callback_lock);
1467 sock->sk->sk_user_data = queue;
1468 queue->data_ready = sock->sk->sk_data_ready;
1469 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1470 queue->state_change = sock->sk->sk_state_change;
1471 sock->sk->sk_state_change = nvmet_tcp_state_change;
1472 queue->write_space = sock->sk->sk_write_space;
1473 sock->sk->sk_write_space = nvmet_tcp_write_space;
1474 write_unlock_bh(&sock->sk->sk_callback_lock);
1475
1476 return 0;
1477}
1478
1479static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1480 struct socket *newsock)
1481{
1482 struct nvmet_tcp_queue *queue;
1483 int ret;
1484
1485 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1486 if (!queue)
1487 return -ENOMEM;
1488
1489 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1490 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1491 queue->sock = newsock;
1492 queue->port = port;
1493 queue->nr_cmds = 0;
1494 spin_lock_init(&queue->state_lock);
1495 queue->state = NVMET_TCP_Q_CONNECTING;
1496 INIT_LIST_HEAD(&queue->free_list);
1497 init_llist_head(&queue->resp_list);
1498 INIT_LIST_HEAD(&queue->resp_send_list);
1499
1500 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1501 if (queue->idx < 0) {
1502 ret = queue->idx;
1503 goto out_free_queue;
1504 }
1505
1506 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1507 if (ret)
1508 goto out_ida_remove;
1509
1510 ret = nvmet_sq_init(&queue->nvme_sq);
1511 if (ret)
1512 goto out_free_connect;
1513
1514 port->last_cpu = cpumask_next_wrap(port->last_cpu,
1515 cpu_online_mask, -1, false);
1516 queue->cpu = port->last_cpu;
1517 nvmet_prepare_receive_pdu(queue);
1518
1519 mutex_lock(&nvmet_tcp_queue_mutex);
1520 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1521 mutex_unlock(&nvmet_tcp_queue_mutex);
1522
1523 ret = nvmet_tcp_set_queue_sock(queue);
1524 if (ret)
1525 goto out_destroy_sq;
1526
1527 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1528
1529 return 0;
1530out_destroy_sq:
1531 mutex_lock(&nvmet_tcp_queue_mutex);
1532 list_del_init(&queue->queue_list);
1533 mutex_unlock(&nvmet_tcp_queue_mutex);
1534 nvmet_sq_destroy(&queue->nvme_sq);
1535out_free_connect:
1536 nvmet_tcp_free_cmd(&queue->connect);
1537out_ida_remove:
1538 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1539out_free_queue:
1540 kfree(queue);
1541 return ret;
1542}
1543
1544static void nvmet_tcp_accept_work(struct work_struct *w)
1545{
1546 struct nvmet_tcp_port *port =
1547 container_of(w, struct nvmet_tcp_port, accept_work);
1548 struct socket *newsock;
1549 int ret;
1550
1551 while (true) {
1552 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1553 if (ret < 0) {
1554 if (ret != -EAGAIN)
1555 pr_warn("failed to accept err=%d\n", ret);
1556 return;
1557 }
1558 ret = nvmet_tcp_alloc_queue(port, newsock);
1559 if (ret) {
1560 pr_err("failed to allocate queue\n");
1561 sock_release(newsock);
1562 }
1563 }
1564}
1565
1566static void nvmet_tcp_listen_data_ready(struct sock *sk)
1567{
1568 struct nvmet_tcp_port *port;
1569
1570 read_lock_bh(&sk->sk_callback_lock);
1571 port = sk->sk_user_data;
1572 if (!port)
1573 goto out;
1574
1575 if (sk->sk_state == TCP_LISTEN)
1576 schedule_work(&port->accept_work);
1577out:
1578 read_unlock_bh(&sk->sk_callback_lock);
1579}
1580
1581static int nvmet_tcp_add_port(struct nvmet_port *nport)
1582{
1583 struct nvmet_tcp_port *port;
1584 __kernel_sa_family_t af;
1585 int opt, ret;
1586
1587 port = kzalloc(sizeof(*port), GFP_KERNEL);
1588 if (!port)
1589 return -ENOMEM;
1590
1591 switch (nport->disc_addr.adrfam) {
1592 case NVMF_ADDR_FAMILY_IP4:
1593 af = AF_INET;
1594 break;
1595 case NVMF_ADDR_FAMILY_IP6:
1596 af = AF_INET6;
1597 break;
1598 default:
1599 pr_err("address family %d not supported\n",
1600 nport->disc_addr.adrfam);
1601 ret = -EINVAL;
1602 goto err_port;
1603 }
1604
1605 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1606 nport->disc_addr.trsvcid, &port->addr);
1607 if (ret) {
1608 pr_err("malformed ip/port passed: %s:%s\n",
1609 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1610 goto err_port;
1611 }
1612
1613 port->nport = nport;
1614 port->last_cpu = -1;
1615 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1616 if (port->nport->inline_data_size < 0)
1617 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1618
1619 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1620 IPPROTO_TCP, &port->sock);
1621 if (ret) {
1622 pr_err("failed to create a socket\n");
1623 goto err_port;
1624 }
1625
1626 port->sock->sk->sk_user_data = port;
1627 port->data_ready = port->sock->sk->sk_data_ready;
1628 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1629
1630 opt = 1;
1631 ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
1632 TCP_NODELAY, (char *)&opt, sizeof(opt));
1633 if (ret) {
1634 pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
1635 goto err_sock;
1636 }
1637
1638 ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
1639 (char *)&opt, sizeof(opt));
1640 if (ret) {
1641 pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
1642 goto err_sock;
1643 }
1644
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001645 if (so_priority > 0) {
1646 ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY,
1647 (char *)&so_priority, sizeof(so_priority));
1648 if (ret) {
1649 pr_err("failed to set SO_PRIORITY sock opt %d\n", ret);
1650 goto err_sock;
1651 }
1652 }
1653
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001654 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1655 sizeof(port->addr));
1656 if (ret) {
1657 pr_err("failed to bind port socket %d\n", ret);
1658 goto err_sock;
1659 }
1660
1661 ret = kernel_listen(port->sock, 128);
1662 if (ret) {
1663 pr_err("failed to listen %d on port sock\n", ret);
1664 goto err_sock;
1665 }
1666
1667 nport->priv = port;
1668 pr_info("enabling port %d (%pISpc)\n",
1669 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1670
1671 return 0;
1672
1673err_sock:
1674 sock_release(port->sock);
1675err_port:
1676 kfree(port);
1677 return ret;
1678}
1679
1680static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1681{
1682 struct nvmet_tcp_port *port = nport->priv;
1683
1684 write_lock_bh(&port->sock->sk->sk_callback_lock);
1685 port->sock->sk->sk_data_ready = port->data_ready;
1686 port->sock->sk->sk_user_data = NULL;
1687 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1688 cancel_work_sync(&port->accept_work);
1689
1690 sock_release(port->sock);
1691 kfree(port);
1692}
1693
1694static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1695{
1696 struct nvmet_tcp_queue *queue;
1697
1698 mutex_lock(&nvmet_tcp_queue_mutex);
1699 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1700 if (queue->nvme_sq.ctrl == ctrl)
1701 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1702 mutex_unlock(&nvmet_tcp_queue_mutex);
1703}
1704
1705static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1706{
1707 struct nvmet_tcp_queue *queue =
1708 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1709
1710 if (sq->qid == 0) {
1711 /* Let inflight controller teardown complete */
1712 flush_scheduled_work();
1713 }
1714
1715 queue->nr_cmds = sq->size * 2;
1716 if (nvmet_tcp_alloc_cmds(queue))
1717 return NVME_SC_INTERNAL;
1718 return 0;
1719}
1720
1721static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1722 struct nvmet_port *nport, char *traddr)
1723{
1724 struct nvmet_tcp_port *port = nport->priv;
1725
1726 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1727 struct nvmet_tcp_cmd *cmd =
1728 container_of(req, struct nvmet_tcp_cmd, req);
1729 struct nvmet_tcp_queue *queue = cmd->queue;
1730
1731 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1732 } else {
1733 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1734 }
1735}
1736
1737static struct nvmet_fabrics_ops nvmet_tcp_ops = {
1738 .owner = THIS_MODULE,
1739 .type = NVMF_TRTYPE_TCP,
1740 .msdbd = 1,
1741 .has_keyed_sgls = 0,
1742 .add_port = nvmet_tcp_add_port,
1743 .remove_port = nvmet_tcp_remove_port,
1744 .queue_response = nvmet_tcp_queue_response,
1745 .delete_ctrl = nvmet_tcp_delete_ctrl,
1746 .install_queue = nvmet_tcp_install_queue,
1747 .disc_traddr = nvmet_tcp_disc_port_addr,
1748};
1749
1750static int __init nvmet_tcp_init(void)
1751{
1752 int ret;
1753
1754 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1755 if (!nvmet_tcp_wq)
1756 return -ENOMEM;
1757
1758 ret = nvmet_register_transport(&nvmet_tcp_ops);
1759 if (ret)
1760 goto err;
1761
1762 return 0;
1763err:
1764 destroy_workqueue(nvmet_tcp_wq);
1765 return ret;
1766}
1767
1768static void __exit nvmet_tcp_exit(void)
1769{
1770 struct nvmet_tcp_queue *queue;
1771
1772 nvmet_unregister_transport(&nvmet_tcp_ops);
1773
1774 flush_scheduled_work();
1775 mutex_lock(&nvmet_tcp_queue_mutex);
1776 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1777 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1778 mutex_unlock(&nvmet_tcp_queue_mutex);
1779 flush_scheduled_work();
1780
1781 destroy_workqueue(nvmet_tcp_wq);
1782}
1783
1784module_init(nvmet_tcp_init);
1785module_exit(nvmet_tcp_exit);
1786
1787MODULE_LICENSE("GPL v2");
1788MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */