blob: 1942c941c31c2822a67b36287ca549e521df1b46 [file] [log] [blame]
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/inet.h>
15#include <linux/llist.h>
16#include <crypto/hash.h>
17
18#include "nvmet.h"
19
20#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
21
Wunderlich, Mark43cc6682020-01-16 00:46:16 +000022/* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
27 */
28static int so_priority;
29module_param(so_priority, int, 0644);
30MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
31
Sagi Grimberg872d26a2018-12-03 17:52:15 -080032#define NVMET_TCP_RECV_BUDGET 8
33#define NVMET_TCP_SEND_BUDGET 8
34#define NVMET_TCP_IO_WORK_BUDGET 64
35
36enum nvmet_tcp_send_state {
37 NVMET_TCP_SEND_DATA_PDU,
38 NVMET_TCP_SEND_DATA,
39 NVMET_TCP_SEND_R2T,
40 NVMET_TCP_SEND_DDGST,
41 NVMET_TCP_SEND_RESPONSE
42};
43
44enum nvmet_tcp_recv_state {
45 NVMET_TCP_RECV_PDU,
46 NVMET_TCP_RECV_DATA,
47 NVMET_TCP_RECV_DDGST,
48 NVMET_TCP_RECV_ERR,
49};
50
51enum {
52 NVMET_TCP_F_INIT_FAILED = (1 << 0),
53};
54
55struct nvmet_tcp_cmd {
56 struct nvmet_tcp_queue *queue;
57 struct nvmet_req req;
58
59 struct nvme_tcp_cmd_pdu *cmd_pdu;
60 struct nvme_tcp_rsp_pdu *rsp_pdu;
61 struct nvme_tcp_data_pdu *data_pdu;
62 struct nvme_tcp_r2t_pdu *r2t_pdu;
63
64 u32 rbytes_done;
65 u32 wbytes_done;
66
67 u32 pdu_len;
68 u32 pdu_recv;
69 int sg_idx;
70 int nr_mapped;
71 struct msghdr recv_msg;
72 struct kvec *iov;
73 u32 flags;
74
75 struct list_head entry;
76 struct llist_node lentry;
77
78 /* send state */
79 u32 offset;
80 struct scatterlist *cur_sg;
81 enum nvmet_tcp_send_state state;
82
83 __le32 exp_ddgst;
84 __le32 recv_ddgst;
85};
86
87enum nvmet_tcp_queue_state {
88 NVMET_TCP_Q_CONNECTING,
89 NVMET_TCP_Q_LIVE,
90 NVMET_TCP_Q_DISCONNECTING,
91};
92
93struct nvmet_tcp_queue {
94 struct socket *sock;
95 struct nvmet_tcp_port *port;
96 struct work_struct io_work;
97 int cpu;
98 struct nvmet_cq nvme_cq;
99 struct nvmet_sq nvme_sq;
100
101 /* send state */
102 struct nvmet_tcp_cmd *cmds;
103 unsigned int nr_cmds;
104 struct list_head free_list;
105 struct llist_head resp_list;
106 struct list_head resp_send_list;
107 int send_list_len;
108 struct nvmet_tcp_cmd *snd_cmd;
109
110 /* recv state */
111 int offset;
112 int left;
113 enum nvmet_tcp_recv_state rcv_state;
114 struct nvmet_tcp_cmd *cmd;
115 union nvme_tcp_pdu pdu;
116
117 /* digest state */
118 bool hdr_digest;
119 bool data_digest;
120 struct ahash_request *snd_hash;
121 struct ahash_request *rcv_hash;
122
123 spinlock_t state_lock;
124 enum nvmet_tcp_queue_state state;
125
126 struct sockaddr_storage sockaddr;
127 struct sockaddr_storage sockaddr_peer;
128 struct work_struct release_work;
129
130 int idx;
131 struct list_head queue_list;
132
133 struct nvmet_tcp_cmd connect;
134
135 struct page_frag_cache pf_cache;
136
137 void (*data_ready)(struct sock *);
138 void (*state_change)(struct sock *);
139 void (*write_space)(struct sock *);
140};
141
142struct nvmet_tcp_port {
143 struct socket *sock;
144 struct work_struct accept_work;
145 struct nvmet_port *nport;
146 struct sockaddr_storage addr;
147 int last_cpu;
148 void (*data_ready)(struct sock *);
149};
150
151static DEFINE_IDA(nvmet_tcp_queue_ida);
152static LIST_HEAD(nvmet_tcp_queue_list);
153static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
154
155static struct workqueue_struct *nvmet_tcp_wq;
156static struct nvmet_fabrics_ops nvmet_tcp_ops;
157static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
158static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
159
160static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
161 struct nvmet_tcp_cmd *cmd)
162{
163 return cmd - queue->cmds;
164}
165
166static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
167{
168 return nvme_is_write(cmd->req.cmd) &&
169 cmd->rbytes_done < cmd->req.transfer_len;
170}
171
172static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
173{
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300174 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800175}
176
177static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
178{
179 return !nvme_is_write(cmd->req.cmd) &&
180 cmd->req.transfer_len > 0 &&
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300181 !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800182}
183
184static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
185{
186 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
187 !cmd->rbytes_done;
188}
189
190static inline struct nvmet_tcp_cmd *
191nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
192{
193 struct nvmet_tcp_cmd *cmd;
194
195 cmd = list_first_entry_or_null(&queue->free_list,
196 struct nvmet_tcp_cmd, entry);
197 if (!cmd)
198 return NULL;
199 list_del_init(&cmd->entry);
200
201 cmd->rbytes_done = cmd->wbytes_done = 0;
202 cmd->pdu_len = 0;
203 cmd->pdu_recv = 0;
204 cmd->iov = NULL;
205 cmd->flags = 0;
206 return cmd;
207}
208
209static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
210{
211 if (unlikely(cmd == &cmd->queue->connect))
212 return;
213
214 list_add_tail(&cmd->entry, &cmd->queue->free_list);
215}
216
217static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
218{
219 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
220}
221
222static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
223{
224 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
225}
226
227static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
228 void *pdu, size_t len)
229{
230 struct scatterlist sg;
231
232 sg_init_one(&sg, pdu, len);
233 ahash_request_set_crypt(hash, &sg, pdu + len, len);
234 crypto_ahash_digest(hash);
235}
236
237static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
238 void *pdu, size_t len)
239{
240 struct nvme_tcp_hdr *hdr = pdu;
241 __le32 recv_digest;
242 __le32 exp_digest;
243
244 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
245 pr_err("queue %d: header digest enabled but no header digest\n",
246 queue->idx);
247 return -EPROTO;
248 }
249
250 recv_digest = *(__le32 *)(pdu + hdr->hlen);
251 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
252 exp_digest = *(__le32 *)(pdu + hdr->hlen);
253 if (recv_digest != exp_digest) {
254 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
255 queue->idx, le32_to_cpu(recv_digest),
256 le32_to_cpu(exp_digest));
257 return -EPROTO;
258 }
259
260 return 0;
261}
262
263static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
264{
265 struct nvme_tcp_hdr *hdr = pdu;
266 u8 digest_len = nvmet_tcp_hdgst_len(queue);
267 u32 len;
268
269 len = le32_to_cpu(hdr->plen) - hdr->hlen -
270 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
271
272 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
273 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
274 return -EPROTO;
275 }
276
277 return 0;
278}
279
280static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
281{
282 struct scatterlist *sg;
283 int i;
284
285 sg = &cmd->req.sg[cmd->sg_idx];
286
287 for (i = 0; i < cmd->nr_mapped; i++)
288 kunmap(sg_page(&sg[i]));
289}
290
291static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
292{
293 struct kvec *iov = cmd->iov;
294 struct scatterlist *sg;
295 u32 length, offset, sg_offset;
296
297 length = cmd->pdu_len;
298 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
299 offset = cmd->rbytes_done;
300 cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
301 sg_offset = offset % PAGE_SIZE;
302 sg = &cmd->req.sg[cmd->sg_idx];
303
304 while (length) {
305 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
306
307 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
308 iov->iov_len = iov_len;
309
310 length -= iov_len;
311 sg = sg_next(sg);
312 iov++;
313 }
314
315 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
316 cmd->nr_mapped, cmd->pdu_len);
317}
318
319static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
320{
321 queue->rcv_state = NVMET_TCP_RECV_ERR;
322 if (queue->nvme_sq.ctrl)
323 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
324 else
325 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
326}
327
328static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
329{
330 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
331 u32 len = le32_to_cpu(sgl->length);
332
Logan Gunthorpee0bace72019-10-23 10:35:39 -0600333 if (!len)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800334 return 0;
335
336 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
337 NVME_SGL_FMT_OFFSET)) {
338 if (!nvme_is_write(cmd->req.cmd))
339 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
340
341 if (len > cmd->req.port->inline_data_size)
342 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
343 cmd->pdu_len = len;
344 }
345 cmd->req.transfer_len += len;
346
347 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
348 if (!cmd->req.sg)
349 return NVME_SC_INTERNAL;
350 cmd->cur_sg = cmd->req.sg;
351
352 if (nvmet_tcp_has_data_in(cmd)) {
353 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
354 sizeof(*cmd->iov), GFP_KERNEL);
355 if (!cmd->iov)
356 goto err;
357 }
358
359 return 0;
360err:
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700361 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800362 return NVME_SC_INTERNAL;
363}
364
365static void nvmet_tcp_ddgst(struct ahash_request *hash,
366 struct nvmet_tcp_cmd *cmd)
367{
368 ahash_request_set_crypt(hash, cmd->req.sg,
369 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
370 crypto_ahash_digest(hash);
371}
372
373static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
374{
375 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
376 struct nvmet_tcp_queue *queue = cmd->queue;
377 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
378 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
379
380 cmd->offset = 0;
381 cmd->state = NVMET_TCP_SEND_DATA_PDU;
382
383 pdu->hdr.type = nvme_tcp_c2h_data;
Sagi Grimberg70583292019-03-08 15:41:21 -0800384 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
385 NVME_TCP_F_DATA_SUCCESS : 0);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800386 pdu->hdr.hlen = sizeof(*pdu);
387 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
388 pdu->hdr.plen =
389 cpu_to_le32(pdu->hdr.hlen + hdgst +
390 cmd->req.transfer_len + ddgst);
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300391 pdu->command_id = cmd->req.cqe->command_id;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800392 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
393 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
394
395 if (queue->data_digest) {
396 pdu->hdr.flags |= NVME_TCP_F_DDGST;
397 nvmet_tcp_ddgst(queue->snd_hash, cmd);
398 }
399
400 if (cmd->queue->hdr_digest) {
401 pdu->hdr.flags |= NVME_TCP_F_HDGST;
402 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
403 }
404}
405
406static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
407{
408 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
409 struct nvmet_tcp_queue *queue = cmd->queue;
410 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
411
412 cmd->offset = 0;
413 cmd->state = NVMET_TCP_SEND_R2T;
414
415 pdu->hdr.type = nvme_tcp_r2t;
416 pdu->hdr.flags = 0;
417 pdu->hdr.hlen = sizeof(*pdu);
418 pdu->hdr.pdo = 0;
419 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
420
421 pdu->command_id = cmd->req.cmd->common.command_id;
422 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
423 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
424 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
425 if (cmd->queue->hdr_digest) {
426 pdu->hdr.flags |= NVME_TCP_F_HDGST;
427 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
428 }
429}
430
431static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
432{
433 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
434 struct nvmet_tcp_queue *queue = cmd->queue;
435 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
436
437 cmd->offset = 0;
438 cmd->state = NVMET_TCP_SEND_RESPONSE;
439
440 pdu->hdr.type = nvme_tcp_rsp;
441 pdu->hdr.flags = 0;
442 pdu->hdr.hlen = sizeof(*pdu);
443 pdu->hdr.pdo = 0;
444 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
445 if (cmd->queue->hdr_digest) {
446 pdu->hdr.flags |= NVME_TCP_F_HDGST;
447 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
448 }
449}
450
451static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
452{
453 struct llist_node *node;
454
455 node = llist_del_all(&queue->resp_list);
456 if (!node)
457 return;
458
459 while (node) {
460 struct nvmet_tcp_cmd *cmd = llist_entry(node,
461 struct nvmet_tcp_cmd, lentry);
462
463 list_add(&cmd->entry, &queue->resp_send_list);
464 node = node->next;
465 queue->send_list_len++;
466 }
467}
468
469static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
470{
471 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
472 struct nvmet_tcp_cmd, entry);
473 if (!queue->snd_cmd) {
474 nvmet_tcp_process_resp_list(queue);
475 queue->snd_cmd =
476 list_first_entry_or_null(&queue->resp_send_list,
477 struct nvmet_tcp_cmd, entry);
478 if (unlikely(!queue->snd_cmd))
479 return NULL;
480 }
481
482 list_del_init(&queue->snd_cmd->entry);
483 queue->send_list_len--;
484
485 if (nvmet_tcp_need_data_out(queue->snd_cmd))
486 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
487 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
488 nvmet_setup_r2t_pdu(queue->snd_cmd);
489 else
490 nvmet_setup_response_pdu(queue->snd_cmd);
491
492 return queue->snd_cmd;
493}
494
495static void nvmet_tcp_queue_response(struct nvmet_req *req)
496{
497 struct nvmet_tcp_cmd *cmd =
498 container_of(req, struct nvmet_tcp_cmd, req);
499 struct nvmet_tcp_queue *queue = cmd->queue;
500
501 llist_add(&cmd->lentry, &queue->resp_list);
502 queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
503}
504
505static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
506{
507 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
508 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
509 int ret;
510
511 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
512 offset_in_page(cmd->data_pdu) + cmd->offset,
513 left, MSG_DONTWAIT | MSG_MORE);
514 if (ret <= 0)
515 return ret;
516
517 cmd->offset += ret;
518 left -= ret;
519
520 if (left)
521 return -EAGAIN;
522
523 cmd->state = NVMET_TCP_SEND_DATA;
524 cmd->offset = 0;
525 return 1;
526}
527
528static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
529{
530 struct nvmet_tcp_queue *queue = cmd->queue;
531 int ret;
532
533 while (cmd->cur_sg) {
534 struct page *page = sg_page(cmd->cur_sg);
535 u32 left = cmd->cur_sg->length - cmd->offset;
536
537 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
538 left, MSG_DONTWAIT | MSG_MORE);
539 if (ret <= 0)
540 return ret;
541
542 cmd->offset += ret;
543 cmd->wbytes_done += ret;
544
545 /* Done with sg?*/
546 if (cmd->offset == cmd->cur_sg->length) {
547 cmd->cur_sg = sg_next(cmd->cur_sg);
548 cmd->offset = 0;
549 }
550 }
551
552 if (queue->data_digest) {
553 cmd->state = NVMET_TCP_SEND_DDGST;
554 cmd->offset = 0;
555 } else {
Sagi Grimberg70583292019-03-08 15:41:21 -0800556 if (queue->nvme_sq.sqhd_disabled) {
557 cmd->queue->snd_cmd = NULL;
558 nvmet_tcp_put_cmd(cmd);
559 } else {
560 nvmet_setup_response_pdu(cmd);
561 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800562 }
Sagi Grimberg70583292019-03-08 15:41:21 -0800563
564 if (queue->nvme_sq.sqhd_disabled) {
565 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700566 sgl_free(cmd->req.sg);
Sagi Grimberg70583292019-03-08 15:41:21 -0800567 }
568
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800569 return 1;
570
571}
572
573static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
574 bool last_in_batch)
575{
576 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
577 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
578 int flags = MSG_DONTWAIT;
579 int ret;
580
581 if (!last_in_batch && cmd->queue->send_list_len)
582 flags |= MSG_MORE;
583 else
584 flags |= MSG_EOR;
585
586 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
587 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
588 if (ret <= 0)
589 return ret;
590 cmd->offset += ret;
591 left -= ret;
592
593 if (left)
594 return -EAGAIN;
595
596 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700597 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800598 cmd->queue->snd_cmd = NULL;
599 nvmet_tcp_put_cmd(cmd);
600 return 1;
601}
602
603static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
604{
605 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
606 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
607 int flags = MSG_DONTWAIT;
608 int ret;
609
610 if (!last_in_batch && cmd->queue->send_list_len)
611 flags |= MSG_MORE;
612 else
613 flags |= MSG_EOR;
614
615 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
616 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
617 if (ret <= 0)
618 return ret;
619 cmd->offset += ret;
620 left -= ret;
621
622 if (left)
623 return -EAGAIN;
624
625 cmd->queue->snd_cmd = NULL;
626 return 1;
627}
628
629static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
630{
631 struct nvmet_tcp_queue *queue = cmd->queue;
632 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
633 struct kvec iov = {
634 .iov_base = &cmd->exp_ddgst + cmd->offset,
635 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
636 };
637 int ret;
638
639 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
640 if (unlikely(ret <= 0))
641 return ret;
642
643 cmd->offset += ret;
Sagi Grimberg70583292019-03-08 15:41:21 -0800644
645 if (queue->nvme_sq.sqhd_disabled) {
646 cmd->queue->snd_cmd = NULL;
647 nvmet_tcp_put_cmd(cmd);
648 } else {
649 nvmet_setup_response_pdu(cmd);
650 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800651 return 1;
652}
653
654static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
655 bool last_in_batch)
656{
657 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
658 int ret = 0;
659
660 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
661 cmd = nvmet_tcp_fetch_cmd(queue);
662 if (unlikely(!cmd))
663 return 0;
664 }
665
666 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
667 ret = nvmet_try_send_data_pdu(cmd);
668 if (ret <= 0)
669 goto done_send;
670 }
671
672 if (cmd->state == NVMET_TCP_SEND_DATA) {
673 ret = nvmet_try_send_data(cmd);
674 if (ret <= 0)
675 goto done_send;
676 }
677
678 if (cmd->state == NVMET_TCP_SEND_DDGST) {
679 ret = nvmet_try_send_ddgst(cmd);
680 if (ret <= 0)
681 goto done_send;
682 }
683
684 if (cmd->state == NVMET_TCP_SEND_R2T) {
685 ret = nvmet_try_send_r2t(cmd, last_in_batch);
686 if (ret <= 0)
687 goto done_send;
688 }
689
690 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
691 ret = nvmet_try_send_response(cmd, last_in_batch);
692
693done_send:
694 if (ret < 0) {
695 if (ret == -EAGAIN)
696 return 0;
697 return ret;
698 }
699
700 return 1;
701}
702
703static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
704 int budget, int *sends)
705{
706 int i, ret = 0;
707
708 for (i = 0; i < budget; i++) {
709 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
710 if (ret <= 0)
711 break;
712 (*sends)++;
713 }
714
715 return ret;
716}
717
718static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
719{
720 queue->offset = 0;
721 queue->left = sizeof(struct nvme_tcp_hdr);
722 queue->cmd = NULL;
723 queue->rcv_state = NVMET_TCP_RECV_PDU;
724}
725
726static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
727{
728 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
729
730 ahash_request_free(queue->rcv_hash);
731 ahash_request_free(queue->snd_hash);
732 crypto_free_ahash(tfm);
733}
734
735static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
736{
737 struct crypto_ahash *tfm;
738
739 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
740 if (IS_ERR(tfm))
741 return PTR_ERR(tfm);
742
743 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
744 if (!queue->snd_hash)
745 goto free_tfm;
746 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
747
748 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
749 if (!queue->rcv_hash)
750 goto free_snd_hash;
751 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
752
753 return 0;
754free_snd_hash:
755 ahash_request_free(queue->snd_hash);
756free_tfm:
757 crypto_free_ahash(tfm);
758 return -ENOMEM;
759}
760
761
762static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
763{
764 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
765 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
766 struct msghdr msg = {};
767 struct kvec iov;
768 int ret;
769
770 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
771 pr_err("bad nvme-tcp pdu length (%d)\n",
772 le32_to_cpu(icreq->hdr.plen));
773 nvmet_tcp_fatal_error(queue);
774 }
775
776 if (icreq->pfv != NVME_TCP_PFV_1_0) {
777 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
778 return -EPROTO;
779 }
780
781 if (icreq->hpda != 0) {
782 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
783 icreq->hpda);
784 return -EPROTO;
785 }
786
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800787 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
788 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
789 if (queue->hdr_digest || queue->data_digest) {
790 ret = nvmet_tcp_alloc_crypto(queue);
791 if (ret)
792 return ret;
793 }
794
795 memset(icresp, 0, sizeof(*icresp));
796 icresp->hdr.type = nvme_tcp_icresp;
797 icresp->hdr.hlen = sizeof(*icresp);
798 icresp->hdr.pdo = 0;
799 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
800 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
Sagi Grimberg9cda34e2020-02-25 16:42:27 -0800801 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800802 icresp->cpda = 0;
803 if (queue->hdr_digest)
804 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
805 if (queue->data_digest)
806 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
807
808 iov.iov_base = icresp;
809 iov.iov_len = sizeof(*icresp);
810 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
811 if (ret < 0)
812 goto free_crypto;
813
814 queue->state = NVMET_TCP_Q_LIVE;
815 nvmet_prepare_receive_pdu(queue);
816 return 0;
817free_crypto:
818 if (queue->hdr_digest || queue->data_digest)
819 nvmet_tcp_free_crypto(queue);
820 return ret;
821}
822
823static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
824 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
825{
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600826 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800827 int ret;
828
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800829 if (!nvme_is_write(cmd->req.cmd) ||
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600830 data_len > cmd->req.port->inline_data_size) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800831 nvmet_prepare_receive_pdu(queue);
832 return;
833 }
834
835 ret = nvmet_tcp_map_data(cmd);
836 if (unlikely(ret)) {
837 pr_err("queue %d: failed to map data\n", queue->idx);
838 nvmet_tcp_fatal_error(queue);
839 return;
840 }
841
842 queue->rcv_state = NVMET_TCP_RECV_DATA;
843 nvmet_tcp_map_pdu_iovec(cmd);
844 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
845}
846
847static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
848{
849 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
850 struct nvmet_tcp_cmd *cmd;
851
852 cmd = &queue->cmds[data->ttag];
853
854 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
855 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
856 data->ttag, le32_to_cpu(data->data_offset),
857 cmd->rbytes_done);
858 /* FIXME: use path and transport errors */
859 nvmet_req_complete(&cmd->req,
860 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
861 return -EPROTO;
862 }
863
864 cmd->pdu_len = le32_to_cpu(data->data_length);
865 cmd->pdu_recv = 0;
866 nvmet_tcp_map_pdu_iovec(cmd);
867 queue->cmd = cmd;
868 queue->rcv_state = NVMET_TCP_RECV_DATA;
869
870 return 0;
871}
872
873static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
874{
875 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
876 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
877 struct nvmet_req *req;
878 int ret;
879
880 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
881 if (hdr->type != nvme_tcp_icreq) {
882 pr_err("unexpected pdu type (%d) before icreq\n",
883 hdr->type);
884 nvmet_tcp_fatal_error(queue);
885 return -EPROTO;
886 }
887 return nvmet_tcp_handle_icreq(queue);
888 }
889
890 if (hdr->type == nvme_tcp_h2c_data) {
891 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
892 if (unlikely(ret))
893 return ret;
894 return 0;
895 }
896
897 queue->cmd = nvmet_tcp_get_cmd(queue);
898 if (unlikely(!queue->cmd)) {
899 /* This should never happen */
900 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
901 queue->idx, queue->nr_cmds, queue->send_list_len,
902 nvme_cmd->common.opcode);
903 nvmet_tcp_fatal_error(queue);
904 return -ENOMEM;
905 }
906
907 req = &queue->cmd->req;
908 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
909
910 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
911 &queue->nvme_sq, &nvmet_tcp_ops))) {
912 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
913 req->cmd, req->cmd->common.command_id,
914 req->cmd->common.opcode,
915 le32_to_cpu(req->cmd->common.dptr.sgl.length));
916
917 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
918 return -EAGAIN;
919 }
920
921 ret = nvmet_tcp_map_data(queue->cmd);
922 if (unlikely(ret)) {
923 pr_err("queue %d: failed to map data\n", queue->idx);
924 if (nvmet_tcp_has_inline_data(queue->cmd))
925 nvmet_tcp_fatal_error(queue);
926 else
927 nvmet_req_complete(req, ret);
928 ret = -EAGAIN;
929 goto out;
930 }
931
932 if (nvmet_tcp_need_data_in(queue->cmd)) {
933 if (nvmet_tcp_has_inline_data(queue->cmd)) {
934 queue->rcv_state = NVMET_TCP_RECV_DATA;
935 nvmet_tcp_map_pdu_iovec(queue->cmd);
936 return 0;
937 }
938 /* send back R2T */
939 nvmet_tcp_queue_response(&queue->cmd->req);
940 goto out;
941 }
942
Christoph Hellwigbe3f3112019-10-23 10:35:45 -0600943 queue->cmd->req.execute(&queue->cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800944out:
945 nvmet_prepare_receive_pdu(queue);
946 return ret;
947}
948
949static const u8 nvme_tcp_pdu_sizes[] = {
950 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
951 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
952 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
953};
954
955static inline u8 nvmet_tcp_pdu_size(u8 type)
956{
957 size_t idx = type;
958
959 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
960 nvme_tcp_pdu_sizes[idx]) ?
961 nvme_tcp_pdu_sizes[idx] : 0;
962}
963
964static inline bool nvmet_tcp_pdu_valid(u8 type)
965{
966 switch (type) {
967 case nvme_tcp_icreq:
968 case nvme_tcp_cmd:
969 case nvme_tcp_h2c_data:
970 /* fallthru */
971 return true;
972 }
973
974 return false;
975}
976
977static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
978{
979 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
980 int len;
981 struct kvec iov;
982 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
983
984recv:
985 iov.iov_base = (void *)&queue->pdu + queue->offset;
986 iov.iov_len = queue->left;
987 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
988 iov.iov_len, msg.msg_flags);
989 if (unlikely(len < 0))
990 return len;
991
992 queue->offset += len;
993 queue->left -= len;
994 if (queue->left)
995 return -EAGAIN;
996
997 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
998 u8 hdgst = nvmet_tcp_hdgst_len(queue);
999
1000 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1001 pr_err("unexpected pdu type %d\n", hdr->type);
1002 nvmet_tcp_fatal_error(queue);
1003 return -EIO;
1004 }
1005
1006 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1007 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1008 return -EIO;
1009 }
1010
1011 queue->left = hdr->hlen - queue->offset + hdgst;
1012 goto recv;
1013 }
1014
1015 if (queue->hdr_digest &&
1016 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1017 nvmet_tcp_fatal_error(queue); /* fatal */
1018 return -EPROTO;
1019 }
1020
1021 if (queue->data_digest &&
1022 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1023 nvmet_tcp_fatal_error(queue); /* fatal */
1024 return -EPROTO;
1025 }
1026
1027 return nvmet_tcp_done_recv_pdu(queue);
1028}
1029
1030static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1031{
1032 struct nvmet_tcp_queue *queue = cmd->queue;
1033
1034 nvmet_tcp_ddgst(queue->rcv_hash, cmd);
1035 queue->offset = 0;
1036 queue->left = NVME_TCP_DIGEST_LENGTH;
1037 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1038}
1039
1040static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1041{
1042 struct nvmet_tcp_cmd *cmd = queue->cmd;
1043 int ret;
1044
1045 while (msg_data_left(&cmd->recv_msg)) {
1046 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1047 cmd->recv_msg.msg_flags);
1048 if (ret <= 0)
1049 return ret;
1050
1051 cmd->pdu_recv += ret;
1052 cmd->rbytes_done += ret;
1053 }
1054
1055 nvmet_tcp_unmap_pdu_iovec(cmd);
1056
1057 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1058 cmd->rbytes_done == cmd->req.transfer_len) {
1059 if (queue->data_digest) {
1060 nvmet_tcp_prep_recv_ddgst(cmd);
1061 return 0;
1062 }
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001063 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001064 }
1065
1066 nvmet_prepare_receive_pdu(queue);
1067 return 0;
1068}
1069
1070static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1071{
1072 struct nvmet_tcp_cmd *cmd = queue->cmd;
1073 int ret;
1074 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1075 struct kvec iov = {
1076 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1077 .iov_len = queue->left
1078 };
1079
1080 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1081 iov.iov_len, msg.msg_flags);
1082 if (unlikely(ret < 0))
1083 return ret;
1084
1085 queue->offset += ret;
1086 queue->left -= ret;
1087 if (queue->left)
1088 return -EAGAIN;
1089
1090 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1091 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1092 queue->idx, cmd->req.cmd->common.command_id,
1093 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1094 le32_to_cpu(cmd->exp_ddgst));
1095 nvmet_tcp_finish_cmd(cmd);
1096 nvmet_tcp_fatal_error(queue);
1097 ret = -EPROTO;
1098 goto out;
1099 }
1100
1101 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1102 cmd->rbytes_done == cmd->req.transfer_len)
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001103 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001104 ret = 0;
1105out:
1106 nvmet_prepare_receive_pdu(queue);
1107 return ret;
1108}
1109
1110static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1111{
Sagi Grimbergfb865852019-01-09 14:56:32 -08001112 int result = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001113
1114 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1115 return 0;
1116
1117 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1118 result = nvmet_tcp_try_recv_pdu(queue);
1119 if (result != 0)
1120 goto done_recv;
1121 }
1122
1123 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1124 result = nvmet_tcp_try_recv_data(queue);
1125 if (result != 0)
1126 goto done_recv;
1127 }
1128
1129 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1130 result = nvmet_tcp_try_recv_ddgst(queue);
1131 if (result != 0)
1132 goto done_recv;
1133 }
1134
1135done_recv:
1136 if (result < 0) {
1137 if (result == -EAGAIN)
1138 return 0;
1139 return result;
1140 }
1141 return 1;
1142}
1143
1144static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1145 int budget, int *recvs)
1146{
1147 int i, ret = 0;
1148
1149 for (i = 0; i < budget; i++) {
1150 ret = nvmet_tcp_try_recv_one(queue);
1151 if (ret <= 0)
1152 break;
1153 (*recvs)++;
1154 }
1155
1156 return ret;
1157}
1158
1159static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1160{
1161 spin_lock(&queue->state_lock);
1162 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1163 queue->state = NVMET_TCP_Q_DISCONNECTING;
1164 schedule_work(&queue->release_work);
1165 }
1166 spin_unlock(&queue->state_lock);
1167}
1168
1169static void nvmet_tcp_io_work(struct work_struct *w)
1170{
1171 struct nvmet_tcp_queue *queue =
1172 container_of(w, struct nvmet_tcp_queue, io_work);
1173 bool pending;
1174 int ret, ops = 0;
1175
1176 do {
1177 pending = false;
1178
1179 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1180 if (ret > 0) {
1181 pending = true;
1182 } else if (ret < 0) {
1183 if (ret == -EPIPE || ret == -ECONNRESET)
1184 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1185 else
1186 nvmet_tcp_fatal_error(queue);
1187 return;
1188 }
1189
1190 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1191 if (ret > 0) {
1192 /* transmitted message/data */
1193 pending = true;
1194 } else if (ret < 0) {
1195 if (ret == -EPIPE || ret == -ECONNRESET)
1196 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1197 else
1198 nvmet_tcp_fatal_error(queue);
1199 return;
1200 }
1201
1202 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1203
1204 /*
1205 * We exahusted our budget, requeue our selves
1206 */
1207 if (pending)
1208 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1209}
1210
1211static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1212 struct nvmet_tcp_cmd *c)
1213{
1214 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1215
1216 c->queue = queue;
1217 c->req.port = queue->port->nport;
1218
1219 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1220 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1221 if (!c->cmd_pdu)
1222 return -ENOMEM;
1223 c->req.cmd = &c->cmd_pdu->cmd;
1224
1225 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1226 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1227 if (!c->rsp_pdu)
1228 goto out_free_cmd;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +03001229 c->req.cqe = &c->rsp_pdu->cqe;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001230
1231 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1232 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1233 if (!c->data_pdu)
1234 goto out_free_rsp;
1235
1236 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1237 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1238 if (!c->r2t_pdu)
1239 goto out_free_data;
1240
1241 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1242
1243 list_add_tail(&c->entry, &queue->free_list);
1244
1245 return 0;
1246out_free_data:
1247 page_frag_free(c->data_pdu);
1248out_free_rsp:
1249 page_frag_free(c->rsp_pdu);
1250out_free_cmd:
1251 page_frag_free(c->cmd_pdu);
1252 return -ENOMEM;
1253}
1254
1255static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1256{
1257 page_frag_free(c->r2t_pdu);
1258 page_frag_free(c->data_pdu);
1259 page_frag_free(c->rsp_pdu);
1260 page_frag_free(c->cmd_pdu);
1261}
1262
1263static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1264{
1265 struct nvmet_tcp_cmd *cmds;
1266 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1267
1268 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1269 if (!cmds)
1270 goto out;
1271
1272 for (i = 0; i < nr_cmds; i++) {
1273 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1274 if (ret)
1275 goto out_free;
1276 }
1277
1278 queue->cmds = cmds;
1279
1280 return 0;
1281out_free:
1282 while (--i >= 0)
1283 nvmet_tcp_free_cmd(cmds + i);
1284 kfree(cmds);
1285out:
1286 return ret;
1287}
1288
1289static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1290{
1291 struct nvmet_tcp_cmd *cmds = queue->cmds;
1292 int i;
1293
1294 for (i = 0; i < queue->nr_cmds; i++)
1295 nvmet_tcp_free_cmd(cmds + i);
1296
1297 nvmet_tcp_free_cmd(&queue->connect);
1298 kfree(cmds);
1299}
1300
1301static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1302{
1303 struct socket *sock = queue->sock;
1304
1305 write_lock_bh(&sock->sk->sk_callback_lock);
1306 sock->sk->sk_data_ready = queue->data_ready;
1307 sock->sk->sk_state_change = queue->state_change;
1308 sock->sk->sk_write_space = queue->write_space;
1309 sock->sk->sk_user_data = NULL;
1310 write_unlock_bh(&sock->sk->sk_callback_lock);
1311}
1312
1313static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1314{
1315 nvmet_req_uninit(&cmd->req);
1316 nvmet_tcp_unmap_pdu_iovec(cmd);
Sagi Grimberg35d1a932019-08-02 20:29:11 -07001317 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -07001318 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001319}
1320
1321static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1322{
1323 struct nvmet_tcp_cmd *cmd = queue->cmds;
1324 int i;
1325
1326 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1327 if (nvmet_tcp_need_data_in(cmd))
1328 nvmet_tcp_finish_cmd(cmd);
1329 }
1330
1331 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1332 /* failed in connect */
1333 nvmet_tcp_finish_cmd(&queue->connect);
1334 }
1335}
1336
1337static void nvmet_tcp_release_queue_work(struct work_struct *w)
1338{
1339 struct nvmet_tcp_queue *queue =
1340 container_of(w, struct nvmet_tcp_queue, release_work);
1341
1342 mutex_lock(&nvmet_tcp_queue_mutex);
1343 list_del_init(&queue->queue_list);
1344 mutex_unlock(&nvmet_tcp_queue_mutex);
1345
1346 nvmet_tcp_restore_socket_callbacks(queue);
1347 flush_work(&queue->io_work);
1348
1349 nvmet_tcp_uninit_data_in_cmds(queue);
1350 nvmet_sq_destroy(&queue->nvme_sq);
1351 cancel_work_sync(&queue->io_work);
1352 sock_release(queue->sock);
1353 nvmet_tcp_free_cmds(queue);
1354 if (queue->hdr_digest || queue->data_digest)
1355 nvmet_tcp_free_crypto(queue);
1356 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1357
1358 kfree(queue);
1359}
1360
1361static void nvmet_tcp_data_ready(struct sock *sk)
1362{
1363 struct nvmet_tcp_queue *queue;
1364
1365 read_lock_bh(&sk->sk_callback_lock);
1366 queue = sk->sk_user_data;
1367 if (likely(queue))
1368 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1369 read_unlock_bh(&sk->sk_callback_lock);
1370}
1371
1372static void nvmet_tcp_write_space(struct sock *sk)
1373{
1374 struct nvmet_tcp_queue *queue;
1375
1376 read_lock_bh(&sk->sk_callback_lock);
1377 queue = sk->sk_user_data;
1378 if (unlikely(!queue))
1379 goto out;
1380
1381 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1382 queue->write_space(sk);
1383 goto out;
1384 }
1385
1386 if (sk_stream_is_writeable(sk)) {
1387 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1388 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1389 }
1390out:
1391 read_unlock_bh(&sk->sk_callback_lock);
1392}
1393
1394static void nvmet_tcp_state_change(struct sock *sk)
1395{
1396 struct nvmet_tcp_queue *queue;
1397
1398 write_lock_bh(&sk->sk_callback_lock);
1399 queue = sk->sk_user_data;
1400 if (!queue)
1401 goto done;
1402
1403 switch (sk->sk_state) {
1404 case TCP_FIN_WAIT1:
1405 case TCP_CLOSE_WAIT:
1406 case TCP_CLOSE:
1407 /* FALLTHRU */
1408 sk->sk_user_data = NULL;
1409 nvmet_tcp_schedule_release_queue(queue);
1410 break;
1411 default:
1412 pr_warn("queue %d unhandled state %d\n",
1413 queue->idx, sk->sk_state);
1414 }
1415done:
1416 write_unlock_bh(&sk->sk_callback_lock);
1417}
1418
1419static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1420{
1421 struct socket *sock = queue->sock;
Israel Rukshin89275a92019-08-18 12:08:55 +03001422 struct inet_sock *inet = inet_sk(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001423 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1424 int ret;
1425
1426 ret = kernel_getsockname(sock,
1427 (struct sockaddr *)&queue->sockaddr);
1428 if (ret < 0)
1429 return ret;
1430
1431 ret = kernel_getpeername(sock,
1432 (struct sockaddr *)&queue->sockaddr_peer);
1433 if (ret < 0)
1434 return ret;
1435
1436 /*
1437 * Cleanup whatever is sitting in the TCP transmit queue on socket
1438 * close. This is done to prevent stale data from being sent should
1439 * the network connection be restored before TCP times out.
1440 */
1441 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
1442 (char *)&sol, sizeof(sol));
1443 if (ret)
1444 return ret;
1445
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001446 if (so_priority > 0) {
1447 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
1448 (char *)&so_priority, sizeof(so_priority));
1449 if (ret)
1450 return ret;
1451 }
1452
Israel Rukshin89275a92019-08-18 12:08:55 +03001453 /* Set socket type of service */
1454 if (inet->rcv_tos > 0) {
1455 int tos = inet->rcv_tos;
1456
1457 ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
1458 (char *)&tos, sizeof(tos));
1459 if (ret)
1460 return ret;
1461 }
1462
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001463 write_lock_bh(&sock->sk->sk_callback_lock);
1464 sock->sk->sk_user_data = queue;
1465 queue->data_ready = sock->sk->sk_data_ready;
1466 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1467 queue->state_change = sock->sk->sk_state_change;
1468 sock->sk->sk_state_change = nvmet_tcp_state_change;
1469 queue->write_space = sock->sk->sk_write_space;
1470 sock->sk->sk_write_space = nvmet_tcp_write_space;
1471 write_unlock_bh(&sock->sk->sk_callback_lock);
1472
1473 return 0;
1474}
1475
1476static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1477 struct socket *newsock)
1478{
1479 struct nvmet_tcp_queue *queue;
1480 int ret;
1481
1482 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1483 if (!queue)
1484 return -ENOMEM;
1485
1486 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1487 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1488 queue->sock = newsock;
1489 queue->port = port;
1490 queue->nr_cmds = 0;
1491 spin_lock_init(&queue->state_lock);
1492 queue->state = NVMET_TCP_Q_CONNECTING;
1493 INIT_LIST_HEAD(&queue->free_list);
1494 init_llist_head(&queue->resp_list);
1495 INIT_LIST_HEAD(&queue->resp_send_list);
1496
1497 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1498 if (queue->idx < 0) {
1499 ret = queue->idx;
1500 goto out_free_queue;
1501 }
1502
1503 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1504 if (ret)
1505 goto out_ida_remove;
1506
1507 ret = nvmet_sq_init(&queue->nvme_sq);
1508 if (ret)
1509 goto out_free_connect;
1510
1511 port->last_cpu = cpumask_next_wrap(port->last_cpu,
1512 cpu_online_mask, -1, false);
1513 queue->cpu = port->last_cpu;
1514 nvmet_prepare_receive_pdu(queue);
1515
1516 mutex_lock(&nvmet_tcp_queue_mutex);
1517 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1518 mutex_unlock(&nvmet_tcp_queue_mutex);
1519
1520 ret = nvmet_tcp_set_queue_sock(queue);
1521 if (ret)
1522 goto out_destroy_sq;
1523
1524 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
1525
1526 return 0;
1527out_destroy_sq:
1528 mutex_lock(&nvmet_tcp_queue_mutex);
1529 list_del_init(&queue->queue_list);
1530 mutex_unlock(&nvmet_tcp_queue_mutex);
1531 nvmet_sq_destroy(&queue->nvme_sq);
1532out_free_connect:
1533 nvmet_tcp_free_cmd(&queue->connect);
1534out_ida_remove:
1535 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1536out_free_queue:
1537 kfree(queue);
1538 return ret;
1539}
1540
1541static void nvmet_tcp_accept_work(struct work_struct *w)
1542{
1543 struct nvmet_tcp_port *port =
1544 container_of(w, struct nvmet_tcp_port, accept_work);
1545 struct socket *newsock;
1546 int ret;
1547
1548 while (true) {
1549 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1550 if (ret < 0) {
1551 if (ret != -EAGAIN)
1552 pr_warn("failed to accept err=%d\n", ret);
1553 return;
1554 }
1555 ret = nvmet_tcp_alloc_queue(port, newsock);
1556 if (ret) {
1557 pr_err("failed to allocate queue\n");
1558 sock_release(newsock);
1559 }
1560 }
1561}
1562
1563static void nvmet_tcp_listen_data_ready(struct sock *sk)
1564{
1565 struct nvmet_tcp_port *port;
1566
1567 read_lock_bh(&sk->sk_callback_lock);
1568 port = sk->sk_user_data;
1569 if (!port)
1570 goto out;
1571
1572 if (sk->sk_state == TCP_LISTEN)
1573 schedule_work(&port->accept_work);
1574out:
1575 read_unlock_bh(&sk->sk_callback_lock);
1576}
1577
1578static int nvmet_tcp_add_port(struct nvmet_port *nport)
1579{
1580 struct nvmet_tcp_port *port;
1581 __kernel_sa_family_t af;
1582 int opt, ret;
1583
1584 port = kzalloc(sizeof(*port), GFP_KERNEL);
1585 if (!port)
1586 return -ENOMEM;
1587
1588 switch (nport->disc_addr.adrfam) {
1589 case NVMF_ADDR_FAMILY_IP4:
1590 af = AF_INET;
1591 break;
1592 case NVMF_ADDR_FAMILY_IP6:
1593 af = AF_INET6;
1594 break;
1595 default:
1596 pr_err("address family %d not supported\n",
1597 nport->disc_addr.adrfam);
1598 ret = -EINVAL;
1599 goto err_port;
1600 }
1601
1602 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1603 nport->disc_addr.trsvcid, &port->addr);
1604 if (ret) {
1605 pr_err("malformed ip/port passed: %s:%s\n",
1606 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1607 goto err_port;
1608 }
1609
1610 port->nport = nport;
1611 port->last_cpu = -1;
1612 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1613 if (port->nport->inline_data_size < 0)
1614 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1615
1616 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1617 IPPROTO_TCP, &port->sock);
1618 if (ret) {
1619 pr_err("failed to create a socket\n");
1620 goto err_port;
1621 }
1622
1623 port->sock->sk->sk_user_data = port;
1624 port->data_ready = port->sock->sk->sk_data_ready;
1625 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1626
1627 opt = 1;
1628 ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
1629 TCP_NODELAY, (char *)&opt, sizeof(opt));
1630 if (ret) {
1631 pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
1632 goto err_sock;
1633 }
1634
1635 ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
1636 (char *)&opt, sizeof(opt));
1637 if (ret) {
1638 pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
1639 goto err_sock;
1640 }
1641
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001642 if (so_priority > 0) {
1643 ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY,
1644 (char *)&so_priority, sizeof(so_priority));
1645 if (ret) {
1646 pr_err("failed to set SO_PRIORITY sock opt %d\n", ret);
1647 goto err_sock;
1648 }
1649 }
1650
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001651 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1652 sizeof(port->addr));
1653 if (ret) {
1654 pr_err("failed to bind port socket %d\n", ret);
1655 goto err_sock;
1656 }
1657
1658 ret = kernel_listen(port->sock, 128);
1659 if (ret) {
1660 pr_err("failed to listen %d on port sock\n", ret);
1661 goto err_sock;
1662 }
1663
1664 nport->priv = port;
1665 pr_info("enabling port %d (%pISpc)\n",
1666 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1667
1668 return 0;
1669
1670err_sock:
1671 sock_release(port->sock);
1672err_port:
1673 kfree(port);
1674 return ret;
1675}
1676
1677static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1678{
1679 struct nvmet_tcp_port *port = nport->priv;
1680
1681 write_lock_bh(&port->sock->sk->sk_callback_lock);
1682 port->sock->sk->sk_data_ready = port->data_ready;
1683 port->sock->sk->sk_user_data = NULL;
1684 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1685 cancel_work_sync(&port->accept_work);
1686
1687 sock_release(port->sock);
1688 kfree(port);
1689}
1690
1691static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1692{
1693 struct nvmet_tcp_queue *queue;
1694
1695 mutex_lock(&nvmet_tcp_queue_mutex);
1696 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1697 if (queue->nvme_sq.ctrl == ctrl)
1698 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1699 mutex_unlock(&nvmet_tcp_queue_mutex);
1700}
1701
1702static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1703{
1704 struct nvmet_tcp_queue *queue =
1705 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1706
1707 if (sq->qid == 0) {
1708 /* Let inflight controller teardown complete */
1709 flush_scheduled_work();
1710 }
1711
1712 queue->nr_cmds = sq->size * 2;
1713 if (nvmet_tcp_alloc_cmds(queue))
1714 return NVME_SC_INTERNAL;
1715 return 0;
1716}
1717
1718static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1719 struct nvmet_port *nport, char *traddr)
1720{
1721 struct nvmet_tcp_port *port = nport->priv;
1722
1723 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1724 struct nvmet_tcp_cmd *cmd =
1725 container_of(req, struct nvmet_tcp_cmd, req);
1726 struct nvmet_tcp_queue *queue = cmd->queue;
1727
1728 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1729 } else {
1730 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1731 }
1732}
1733
1734static struct nvmet_fabrics_ops nvmet_tcp_ops = {
1735 .owner = THIS_MODULE,
1736 .type = NVMF_TRTYPE_TCP,
1737 .msdbd = 1,
1738 .has_keyed_sgls = 0,
1739 .add_port = nvmet_tcp_add_port,
1740 .remove_port = nvmet_tcp_remove_port,
1741 .queue_response = nvmet_tcp_queue_response,
1742 .delete_ctrl = nvmet_tcp_delete_ctrl,
1743 .install_queue = nvmet_tcp_install_queue,
1744 .disc_traddr = nvmet_tcp_disc_port_addr,
1745};
1746
1747static int __init nvmet_tcp_init(void)
1748{
1749 int ret;
1750
1751 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1752 if (!nvmet_tcp_wq)
1753 return -ENOMEM;
1754
1755 ret = nvmet_register_transport(&nvmet_tcp_ops);
1756 if (ret)
1757 goto err;
1758
1759 return 0;
1760err:
1761 destroy_workqueue(nvmet_tcp_wq);
1762 return ret;
1763}
1764
1765static void __exit nvmet_tcp_exit(void)
1766{
1767 struct nvmet_tcp_queue *queue;
1768
1769 nvmet_unregister_transport(&nvmet_tcp_ops);
1770
1771 flush_scheduled_work();
1772 mutex_lock(&nvmet_tcp_queue_mutex);
1773 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1774 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1775 mutex_unlock(&nvmet_tcp_queue_mutex);
1776 flush_scheduled_work();
1777
1778 destroy_workqueue(nvmet_tcp_wq);
1779}
1780
1781module_init(nvmet_tcp_init);
1782module_exit(nvmet_tcp_exit);
1783
1784MODULE_LICENSE("GPL v2");
1785MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */