blob: dc1f0f6471896e3dfd3f38dee4084184bc5d588c [file] [log] [blame]
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/inet.h>
15#include <linux/llist.h>
16#include <crypto/hash.h>
17
18#include "nvmet.h"
19
20#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
21
Wunderlich, Mark43cc6682020-01-16 00:46:16 +000022/* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
27 */
28static int so_priority;
29module_param(so_priority, int, 0644);
30MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
31
Sagi Grimberg872d26a2018-12-03 17:52:15 -080032#define NVMET_TCP_RECV_BUDGET 8
33#define NVMET_TCP_SEND_BUDGET 8
34#define NVMET_TCP_IO_WORK_BUDGET 64
35
36enum nvmet_tcp_send_state {
37 NVMET_TCP_SEND_DATA_PDU,
38 NVMET_TCP_SEND_DATA,
39 NVMET_TCP_SEND_R2T,
40 NVMET_TCP_SEND_DDGST,
41 NVMET_TCP_SEND_RESPONSE
42};
43
44enum nvmet_tcp_recv_state {
45 NVMET_TCP_RECV_PDU,
46 NVMET_TCP_RECV_DATA,
47 NVMET_TCP_RECV_DDGST,
48 NVMET_TCP_RECV_ERR,
49};
50
51enum {
52 NVMET_TCP_F_INIT_FAILED = (1 << 0),
53};
54
55struct nvmet_tcp_cmd {
56 struct nvmet_tcp_queue *queue;
57 struct nvmet_req req;
58
59 struct nvme_tcp_cmd_pdu *cmd_pdu;
60 struct nvme_tcp_rsp_pdu *rsp_pdu;
61 struct nvme_tcp_data_pdu *data_pdu;
62 struct nvme_tcp_r2t_pdu *r2t_pdu;
63
64 u32 rbytes_done;
65 u32 wbytes_done;
66
67 u32 pdu_len;
68 u32 pdu_recv;
69 int sg_idx;
70 int nr_mapped;
71 struct msghdr recv_msg;
72 struct kvec *iov;
73 u32 flags;
74
75 struct list_head entry;
76 struct llist_node lentry;
77
78 /* send state */
79 u32 offset;
80 struct scatterlist *cur_sg;
81 enum nvmet_tcp_send_state state;
82
83 __le32 exp_ddgst;
84 __le32 recv_ddgst;
85};
86
87enum nvmet_tcp_queue_state {
88 NVMET_TCP_Q_CONNECTING,
89 NVMET_TCP_Q_LIVE,
90 NVMET_TCP_Q_DISCONNECTING,
91};
92
93struct nvmet_tcp_queue {
94 struct socket *sock;
95 struct nvmet_tcp_port *port;
96 struct work_struct io_work;
Sagi Grimberg872d26a2018-12-03 17:52:15 -080097 struct nvmet_cq nvme_cq;
98 struct nvmet_sq nvme_sq;
99
100 /* send state */
101 struct nvmet_tcp_cmd *cmds;
102 unsigned int nr_cmds;
103 struct list_head free_list;
104 struct llist_head resp_list;
105 struct list_head resp_send_list;
106 int send_list_len;
107 struct nvmet_tcp_cmd *snd_cmd;
108
109 /* recv state */
110 int offset;
111 int left;
112 enum nvmet_tcp_recv_state rcv_state;
113 struct nvmet_tcp_cmd *cmd;
114 union nvme_tcp_pdu pdu;
115
116 /* digest state */
117 bool hdr_digest;
118 bool data_digest;
119 struct ahash_request *snd_hash;
120 struct ahash_request *rcv_hash;
121
122 spinlock_t state_lock;
123 enum nvmet_tcp_queue_state state;
124
125 struct sockaddr_storage sockaddr;
126 struct sockaddr_storage sockaddr_peer;
127 struct work_struct release_work;
128
129 int idx;
130 struct list_head queue_list;
131
132 struct nvmet_tcp_cmd connect;
133
134 struct page_frag_cache pf_cache;
135
136 void (*data_ready)(struct sock *);
137 void (*state_change)(struct sock *);
138 void (*write_space)(struct sock *);
139};
140
141struct nvmet_tcp_port {
142 struct socket *sock;
143 struct work_struct accept_work;
144 struct nvmet_port *nport;
145 struct sockaddr_storage addr;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800146 void (*data_ready)(struct sock *);
147};
148
149static DEFINE_IDA(nvmet_tcp_queue_ida);
150static LIST_HEAD(nvmet_tcp_queue_list);
151static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
152
153static struct workqueue_struct *nvmet_tcp_wq;
Max Gurtovoya40aae62020-06-01 20:05:20 +0300154static const struct nvmet_fabrics_ops nvmet_tcp_ops;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800155static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
156static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
157
158static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
159 struct nvmet_tcp_cmd *cmd)
160{
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800161 if (unlikely(!queue->nr_cmds)) {
162 /* We didn't allocate cmds yet, send 0xffff */
163 return USHRT_MAX;
164 }
165
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800166 return cmd - queue->cmds;
167}
168
169static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
170{
171 return nvme_is_write(cmd->req.cmd) &&
172 cmd->rbytes_done < cmd->req.transfer_len;
173}
174
175static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
176{
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300177 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800178}
179
180static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
181{
182 return !nvme_is_write(cmd->req.cmd) &&
183 cmd->req.transfer_len > 0 &&
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300184 !cmd->req.cqe->status;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800185}
186
187static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
188{
189 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
190 !cmd->rbytes_done;
191}
192
193static inline struct nvmet_tcp_cmd *
194nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
195{
196 struct nvmet_tcp_cmd *cmd;
197
198 cmd = list_first_entry_or_null(&queue->free_list,
199 struct nvmet_tcp_cmd, entry);
200 if (!cmd)
201 return NULL;
202 list_del_init(&cmd->entry);
203
204 cmd->rbytes_done = cmd->wbytes_done = 0;
205 cmd->pdu_len = 0;
206 cmd->pdu_recv = 0;
207 cmd->iov = NULL;
208 cmd->flags = 0;
209 return cmd;
210}
211
212static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
213{
214 if (unlikely(cmd == &cmd->queue->connect))
215 return;
216
217 list_add_tail(&cmd->entry, &cmd->queue->free_list);
218}
219
Mark Wunderlichf7790e52020-08-28 01:00:53 +0000220static inline int queue_cpu(struct nvmet_tcp_queue *queue)
221{
222 return queue->sock->sk->sk_incoming_cpu;
223}
224
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800225static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
226{
227 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
228}
229
230static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
231{
232 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
233}
234
235static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
236 void *pdu, size_t len)
237{
238 struct scatterlist sg;
239
240 sg_init_one(&sg, pdu, len);
241 ahash_request_set_crypt(hash, &sg, pdu + len, len);
242 crypto_ahash_digest(hash);
243}
244
245static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
246 void *pdu, size_t len)
247{
248 struct nvme_tcp_hdr *hdr = pdu;
249 __le32 recv_digest;
250 __le32 exp_digest;
251
252 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
253 pr_err("queue %d: header digest enabled but no header digest\n",
254 queue->idx);
255 return -EPROTO;
256 }
257
258 recv_digest = *(__le32 *)(pdu + hdr->hlen);
259 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
260 exp_digest = *(__le32 *)(pdu + hdr->hlen);
261 if (recv_digest != exp_digest) {
262 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
263 queue->idx, le32_to_cpu(recv_digest),
264 le32_to_cpu(exp_digest));
265 return -EPROTO;
266 }
267
268 return 0;
269}
270
271static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
272{
273 struct nvme_tcp_hdr *hdr = pdu;
274 u8 digest_len = nvmet_tcp_hdgst_len(queue);
275 u32 len;
276
277 len = le32_to_cpu(hdr->plen) - hdr->hlen -
278 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
279
280 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
281 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
282 return -EPROTO;
283 }
284
285 return 0;
286}
287
288static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
289{
290 struct scatterlist *sg;
291 int i;
292
293 sg = &cmd->req.sg[cmd->sg_idx];
294
295 for (i = 0; i < cmd->nr_mapped; i++)
296 kunmap(sg_page(&sg[i]));
297}
298
299static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
300{
301 struct kvec *iov = cmd->iov;
302 struct scatterlist *sg;
303 u32 length, offset, sg_offset;
304
305 length = cmd->pdu_len;
306 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
307 offset = cmd->rbytes_done;
308 cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
309 sg_offset = offset % PAGE_SIZE;
310 sg = &cmd->req.sg[cmd->sg_idx];
311
312 while (length) {
313 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
314
315 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
316 iov->iov_len = iov_len;
317
318 length -= iov_len;
319 sg = sg_next(sg);
320 iov++;
321 }
322
323 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
324 cmd->nr_mapped, cmd->pdu_len);
325}
326
327static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
328{
329 queue->rcv_state = NVMET_TCP_RECV_ERR;
330 if (queue->nvme_sq.ctrl)
331 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
332 else
333 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
334}
335
Sagi Grimberg0236d342020-05-18 10:47:48 -0700336static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
337{
338 if (status == -EPIPE || status == -ECONNRESET)
339 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
340 else
341 nvmet_tcp_fatal_error(queue);
342}
343
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800344static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
345{
346 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
347 u32 len = le32_to_cpu(sgl->length);
348
Logan Gunthorpee0bace72019-10-23 10:35:39 -0600349 if (!len)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800350 return 0;
351
352 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
353 NVME_SGL_FMT_OFFSET)) {
354 if (!nvme_is_write(cmd->req.cmd))
355 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
356
357 if (len > cmd->req.port->inline_data_size)
358 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
359 cmd->pdu_len = len;
360 }
361 cmd->req.transfer_len += len;
362
363 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
364 if (!cmd->req.sg)
365 return NVME_SC_INTERNAL;
366 cmd->cur_sg = cmd->req.sg;
367
368 if (nvmet_tcp_has_data_in(cmd)) {
369 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
370 sizeof(*cmd->iov), GFP_KERNEL);
371 if (!cmd->iov)
372 goto err;
373 }
374
375 return 0;
376err:
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700377 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800378 return NVME_SC_INTERNAL;
379}
380
381static void nvmet_tcp_ddgst(struct ahash_request *hash,
382 struct nvmet_tcp_cmd *cmd)
383{
384 ahash_request_set_crypt(hash, cmd->req.sg,
385 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
386 crypto_ahash_digest(hash);
387}
388
389static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
390{
391 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
392 struct nvmet_tcp_queue *queue = cmd->queue;
393 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
394 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
395
396 cmd->offset = 0;
397 cmd->state = NVMET_TCP_SEND_DATA_PDU;
398
399 pdu->hdr.type = nvme_tcp_c2h_data;
Sagi Grimberg70583292019-03-08 15:41:21 -0800400 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
401 NVME_TCP_F_DATA_SUCCESS : 0);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800402 pdu->hdr.hlen = sizeof(*pdu);
403 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
404 pdu->hdr.plen =
405 cpu_to_le32(pdu->hdr.hlen + hdgst +
406 cmd->req.transfer_len + ddgst);
Max Gurtovoyfc6c9732019-04-08 18:39:59 +0300407 pdu->command_id = cmd->req.cqe->command_id;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800408 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
409 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
410
411 if (queue->data_digest) {
412 pdu->hdr.flags |= NVME_TCP_F_DDGST;
413 nvmet_tcp_ddgst(queue->snd_hash, cmd);
414 }
415
416 if (cmd->queue->hdr_digest) {
417 pdu->hdr.flags |= NVME_TCP_F_HDGST;
418 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
419 }
420}
421
422static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
423{
424 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
425 struct nvmet_tcp_queue *queue = cmd->queue;
426 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
427
428 cmd->offset = 0;
429 cmd->state = NVMET_TCP_SEND_R2T;
430
431 pdu->hdr.type = nvme_tcp_r2t;
432 pdu->hdr.flags = 0;
433 pdu->hdr.hlen = sizeof(*pdu);
434 pdu->hdr.pdo = 0;
435 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
436
437 pdu->command_id = cmd->req.cmd->common.command_id;
438 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
439 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
440 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
441 if (cmd->queue->hdr_digest) {
442 pdu->hdr.flags |= NVME_TCP_F_HDGST;
443 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
444 }
445}
446
447static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
448{
449 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
450 struct nvmet_tcp_queue *queue = cmd->queue;
451 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
452
453 cmd->offset = 0;
454 cmd->state = NVMET_TCP_SEND_RESPONSE;
455
456 pdu->hdr.type = nvme_tcp_rsp;
457 pdu->hdr.flags = 0;
458 pdu->hdr.hlen = sizeof(*pdu);
459 pdu->hdr.pdo = 0;
460 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
461 if (cmd->queue->hdr_digest) {
462 pdu->hdr.flags |= NVME_TCP_F_HDGST;
463 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
464 }
465}
466
467static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
468{
469 struct llist_node *node;
Sagi Grimbergb8a12e92020-06-24 12:27:16 -0700470 struct nvmet_tcp_cmd *cmd;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800471
Sagi Grimbergb8a12e92020-06-24 12:27:16 -0700472 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
473 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800474 list_add(&cmd->entry, &queue->resp_send_list);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800475 queue->send_list_len++;
476 }
477}
478
479static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
480{
481 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
482 struct nvmet_tcp_cmd, entry);
483 if (!queue->snd_cmd) {
484 nvmet_tcp_process_resp_list(queue);
485 queue->snd_cmd =
486 list_first_entry_or_null(&queue->resp_send_list,
487 struct nvmet_tcp_cmd, entry);
488 if (unlikely(!queue->snd_cmd))
489 return NULL;
490 }
491
492 list_del_init(&queue->snd_cmd->entry);
493 queue->send_list_len--;
494
495 if (nvmet_tcp_need_data_out(queue->snd_cmd))
496 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
497 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
498 nvmet_setup_r2t_pdu(queue->snd_cmd);
499 else
500 nvmet_setup_response_pdu(queue->snd_cmd);
501
502 return queue->snd_cmd;
503}
504
505static void nvmet_tcp_queue_response(struct nvmet_req *req)
506{
507 struct nvmet_tcp_cmd *cmd =
508 container_of(req, struct nvmet_tcp_cmd, req);
509 struct nvmet_tcp_queue *queue = cmd->queue;
510
511 llist_add(&cmd->lentry, &queue->resp_list);
Mark Wunderlichf7790e52020-08-28 01:00:53 +0000512 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800513}
514
515static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
516{
517 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
518 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
519 int ret;
520
521 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
522 offset_in_page(cmd->data_pdu) + cmd->offset,
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700523 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800524 if (ret <= 0)
525 return ret;
526
527 cmd->offset += ret;
528 left -= ret;
529
530 if (left)
531 return -EAGAIN;
532
533 cmd->state = NVMET_TCP_SEND_DATA;
534 cmd->offset = 0;
535 return 1;
536}
537
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700538static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800539{
540 struct nvmet_tcp_queue *queue = cmd->queue;
541 int ret;
542
543 while (cmd->cur_sg) {
544 struct page *page = sg_page(cmd->cur_sg);
545 u32 left = cmd->cur_sg->length - cmd->offset;
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700546 int flags = MSG_DONTWAIT;
547
548 if ((!last_in_batch && cmd->queue->send_list_len) ||
549 cmd->wbytes_done + left < cmd->req.transfer_len ||
550 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700551 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800552
553 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700554 left, flags);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800555 if (ret <= 0)
556 return ret;
557
558 cmd->offset += ret;
559 cmd->wbytes_done += ret;
560
561 /* Done with sg?*/
562 if (cmd->offset == cmd->cur_sg->length) {
563 cmd->cur_sg = sg_next(cmd->cur_sg);
564 cmd->offset = 0;
565 }
566 }
567
568 if (queue->data_digest) {
569 cmd->state = NVMET_TCP_SEND_DDGST;
570 cmd->offset = 0;
571 } else {
Sagi Grimberg70583292019-03-08 15:41:21 -0800572 if (queue->nvme_sq.sqhd_disabled) {
573 cmd->queue->snd_cmd = NULL;
574 nvmet_tcp_put_cmd(cmd);
575 } else {
576 nvmet_setup_response_pdu(cmd);
577 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800578 }
Sagi Grimberg70583292019-03-08 15:41:21 -0800579
580 if (queue->nvme_sq.sqhd_disabled) {
581 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700582 sgl_free(cmd->req.sg);
Sagi Grimberg70583292019-03-08 15:41:21 -0800583 }
584
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800585 return 1;
586
587}
588
589static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
590 bool last_in_batch)
591{
592 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
593 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
594 int flags = MSG_DONTWAIT;
595 int ret;
596
597 if (!last_in_batch && cmd->queue->send_list_len)
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700598 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800599 else
600 flags |= MSG_EOR;
601
602 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
603 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
604 if (ret <= 0)
605 return ret;
606 cmd->offset += ret;
607 left -= ret;
608
609 if (left)
610 return -EAGAIN;
611
612 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -0700613 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800614 cmd->queue->snd_cmd = NULL;
615 nvmet_tcp_put_cmd(cmd);
616 return 1;
617}
618
619static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
620{
621 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
622 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
623 int flags = MSG_DONTWAIT;
624 int ret;
625
626 if (!last_in_batch && cmd->queue->send_list_len)
Sagi Grimberg4eea8042020-05-04 22:20:02 -0700627 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800628 else
629 flags |= MSG_EOR;
630
631 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
632 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
633 if (ret <= 0)
634 return ret;
635 cmd->offset += ret;
636 left -= ret;
637
638 if (left)
639 return -EAGAIN;
640
641 cmd->queue->snd_cmd = NULL;
642 return 1;
643}
644
Sagi Grimberge90d1722020-03-12 16:06:39 -0700645static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800646{
647 struct nvmet_tcp_queue *queue = cmd->queue;
648 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
649 struct kvec iov = {
650 .iov_base = &cmd->exp_ddgst + cmd->offset,
651 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
652 };
653 int ret;
654
Sagi Grimberge90d1722020-03-12 16:06:39 -0700655 if (!last_in_batch && cmd->queue->send_list_len)
656 msg.msg_flags |= MSG_MORE;
Sagi Grimbergf381ab12020-05-12 18:01:43 -0700657 else
658 msg.msg_flags |= MSG_EOR;
Sagi Grimberge90d1722020-03-12 16:06:39 -0700659
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800660 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
661 if (unlikely(ret <= 0))
662 return ret;
663
664 cmd->offset += ret;
Sagi Grimberg70583292019-03-08 15:41:21 -0800665
666 if (queue->nvme_sq.sqhd_disabled) {
667 cmd->queue->snd_cmd = NULL;
668 nvmet_tcp_put_cmd(cmd);
669 } else {
670 nvmet_setup_response_pdu(cmd);
671 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800672 return 1;
673}
674
675static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
676 bool last_in_batch)
677{
678 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
679 int ret = 0;
680
681 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
682 cmd = nvmet_tcp_fetch_cmd(queue);
683 if (unlikely(!cmd))
684 return 0;
685 }
686
687 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
688 ret = nvmet_try_send_data_pdu(cmd);
689 if (ret <= 0)
690 goto done_send;
691 }
692
693 if (cmd->state == NVMET_TCP_SEND_DATA) {
Sagi Grimberg98fd5c72020-03-12 16:06:38 -0700694 ret = nvmet_try_send_data(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800695 if (ret <= 0)
696 goto done_send;
697 }
698
699 if (cmd->state == NVMET_TCP_SEND_DDGST) {
Sagi Grimberge90d1722020-03-12 16:06:39 -0700700 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800701 if (ret <= 0)
702 goto done_send;
703 }
704
705 if (cmd->state == NVMET_TCP_SEND_R2T) {
706 ret = nvmet_try_send_r2t(cmd, last_in_batch);
707 if (ret <= 0)
708 goto done_send;
709 }
710
711 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
712 ret = nvmet_try_send_response(cmd, last_in_batch);
713
714done_send:
715 if (ret < 0) {
716 if (ret == -EAGAIN)
717 return 0;
718 return ret;
719 }
720
721 return 1;
722}
723
724static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
725 int budget, int *sends)
726{
727 int i, ret = 0;
728
729 for (i = 0; i < budget; i++) {
730 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
Sagi Grimberg0236d342020-05-18 10:47:48 -0700731 if (unlikely(ret < 0)) {
732 nvmet_tcp_socket_error(queue, ret);
733 goto done;
734 } else if (ret == 0) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800735 break;
Sagi Grimberg0236d342020-05-18 10:47:48 -0700736 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800737 (*sends)++;
738 }
Sagi Grimberg0236d342020-05-18 10:47:48 -0700739done:
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800740 return ret;
741}
742
743static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
744{
745 queue->offset = 0;
746 queue->left = sizeof(struct nvme_tcp_hdr);
747 queue->cmd = NULL;
748 queue->rcv_state = NVMET_TCP_RECV_PDU;
749}
750
751static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
752{
753 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
754
755 ahash_request_free(queue->rcv_hash);
756 ahash_request_free(queue->snd_hash);
757 crypto_free_ahash(tfm);
758}
759
760static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
761{
762 struct crypto_ahash *tfm;
763
764 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
765 if (IS_ERR(tfm))
766 return PTR_ERR(tfm);
767
768 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
769 if (!queue->snd_hash)
770 goto free_tfm;
771 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
772
773 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
774 if (!queue->rcv_hash)
775 goto free_snd_hash;
776 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
777
778 return 0;
779free_snd_hash:
780 ahash_request_free(queue->snd_hash);
781free_tfm:
782 crypto_free_ahash(tfm);
783 return -ENOMEM;
784}
785
786
787static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
788{
789 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
790 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
791 struct msghdr msg = {};
792 struct kvec iov;
793 int ret;
794
795 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
796 pr_err("bad nvme-tcp pdu length (%d)\n",
797 le32_to_cpu(icreq->hdr.plen));
798 nvmet_tcp_fatal_error(queue);
799 }
800
801 if (icreq->pfv != NVME_TCP_PFV_1_0) {
802 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
803 return -EPROTO;
804 }
805
806 if (icreq->hpda != 0) {
807 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
808 icreq->hpda);
809 return -EPROTO;
810 }
811
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800812 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
813 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
814 if (queue->hdr_digest || queue->data_digest) {
815 ret = nvmet_tcp_alloc_crypto(queue);
816 if (ret)
817 return ret;
818 }
819
820 memset(icresp, 0, sizeof(*icresp));
821 icresp->hdr.type = nvme_tcp_icresp;
822 icresp->hdr.hlen = sizeof(*icresp);
823 icresp->hdr.pdo = 0;
824 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
825 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
Sagi Grimberg9cda34e2020-02-25 16:42:27 -0800826 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800827 icresp->cpda = 0;
828 if (queue->hdr_digest)
829 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
830 if (queue->data_digest)
831 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
832
833 iov.iov_base = icresp;
834 iov.iov_len = sizeof(*icresp);
835 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
836 if (ret < 0)
837 goto free_crypto;
838
839 queue->state = NVMET_TCP_Q_LIVE;
840 nvmet_prepare_receive_pdu(queue);
841 return 0;
842free_crypto:
843 if (queue->hdr_digest || queue->data_digest)
844 nvmet_tcp_free_crypto(queue);
845 return ret;
846}
847
848static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
849 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
850{
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600851 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800852 int ret;
853
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800854 if (!nvme_is_write(cmd->req.cmd) ||
Logan Gunthorpec73eebc2019-10-23 10:35:40 -0600855 data_len > cmd->req.port->inline_data_size) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800856 nvmet_prepare_receive_pdu(queue);
857 return;
858 }
859
860 ret = nvmet_tcp_map_data(cmd);
861 if (unlikely(ret)) {
862 pr_err("queue %d: failed to map data\n", queue->idx);
863 nvmet_tcp_fatal_error(queue);
864 return;
865 }
866
867 queue->rcv_state = NVMET_TCP_RECV_DATA;
868 nvmet_tcp_map_pdu_iovec(cmd);
869 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
870}
871
872static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
873{
874 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
875 struct nvmet_tcp_cmd *cmd;
876
Ziye Yanga6ce7d72020-08-22 00:48:10 +0800877 if (likely(queue->nr_cmds))
878 cmd = &queue->cmds[data->ttag];
879 else
880 cmd = &queue->connect;
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800881
882 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
883 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
884 data->ttag, le32_to_cpu(data->data_offset),
885 cmd->rbytes_done);
886 /* FIXME: use path and transport errors */
887 nvmet_req_complete(&cmd->req,
888 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
889 return -EPROTO;
890 }
891
892 cmd->pdu_len = le32_to_cpu(data->data_length);
893 cmd->pdu_recv = 0;
894 nvmet_tcp_map_pdu_iovec(cmd);
895 queue->cmd = cmd;
896 queue->rcv_state = NVMET_TCP_RECV_DATA;
897
898 return 0;
899}
900
901static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
902{
903 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
904 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
905 struct nvmet_req *req;
906 int ret;
907
908 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
909 if (hdr->type != nvme_tcp_icreq) {
910 pr_err("unexpected pdu type (%d) before icreq\n",
911 hdr->type);
912 nvmet_tcp_fatal_error(queue);
913 return -EPROTO;
914 }
915 return nvmet_tcp_handle_icreq(queue);
916 }
917
918 if (hdr->type == nvme_tcp_h2c_data) {
919 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
920 if (unlikely(ret))
921 return ret;
922 return 0;
923 }
924
925 queue->cmd = nvmet_tcp_get_cmd(queue);
926 if (unlikely(!queue->cmd)) {
927 /* This should never happen */
928 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
929 queue->idx, queue->nr_cmds, queue->send_list_len,
930 nvme_cmd->common.opcode);
931 nvmet_tcp_fatal_error(queue);
932 return -ENOMEM;
933 }
934
935 req = &queue->cmd->req;
936 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
937
938 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
939 &queue->nvme_sq, &nvmet_tcp_ops))) {
940 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
941 req->cmd, req->cmd->common.command_id,
942 req->cmd->common.opcode,
943 le32_to_cpu(req->cmd->common.dptr.sgl.length));
944
945 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
946 return -EAGAIN;
947 }
948
949 ret = nvmet_tcp_map_data(queue->cmd);
950 if (unlikely(ret)) {
951 pr_err("queue %d: failed to map data\n", queue->idx);
952 if (nvmet_tcp_has_inline_data(queue->cmd))
953 nvmet_tcp_fatal_error(queue);
954 else
955 nvmet_req_complete(req, ret);
956 ret = -EAGAIN;
957 goto out;
958 }
959
960 if (nvmet_tcp_need_data_in(queue->cmd)) {
961 if (nvmet_tcp_has_inline_data(queue->cmd)) {
962 queue->rcv_state = NVMET_TCP_RECV_DATA;
963 nvmet_tcp_map_pdu_iovec(queue->cmd);
964 return 0;
965 }
966 /* send back R2T */
967 nvmet_tcp_queue_response(&queue->cmd->req);
968 goto out;
969 }
970
Christoph Hellwigbe3f3112019-10-23 10:35:45 -0600971 queue->cmd->req.execute(&queue->cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -0800972out:
973 nvmet_prepare_receive_pdu(queue);
974 return ret;
975}
976
977static const u8 nvme_tcp_pdu_sizes[] = {
978 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
979 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
980 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
981};
982
983static inline u8 nvmet_tcp_pdu_size(u8 type)
984{
985 size_t idx = type;
986
987 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
988 nvme_tcp_pdu_sizes[idx]) ?
989 nvme_tcp_pdu_sizes[idx] : 0;
990}
991
992static inline bool nvmet_tcp_pdu_valid(u8 type)
993{
994 switch (type) {
995 case nvme_tcp_icreq:
996 case nvme_tcp_cmd:
997 case nvme_tcp_h2c_data:
998 /* fallthru */
999 return true;
1000 }
1001
1002 return false;
1003}
1004
1005static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1006{
1007 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1008 int len;
1009 struct kvec iov;
1010 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1011
1012recv:
1013 iov.iov_base = (void *)&queue->pdu + queue->offset;
1014 iov.iov_len = queue->left;
1015 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1016 iov.iov_len, msg.msg_flags);
1017 if (unlikely(len < 0))
1018 return len;
1019
1020 queue->offset += len;
1021 queue->left -= len;
1022 if (queue->left)
1023 return -EAGAIN;
1024
1025 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1026 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1027
1028 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1029 pr_err("unexpected pdu type %d\n", hdr->type);
1030 nvmet_tcp_fatal_error(queue);
1031 return -EIO;
1032 }
1033
1034 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1035 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1036 return -EIO;
1037 }
1038
1039 queue->left = hdr->hlen - queue->offset + hdgst;
1040 goto recv;
1041 }
1042
1043 if (queue->hdr_digest &&
1044 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
1045 nvmet_tcp_fatal_error(queue); /* fatal */
1046 return -EPROTO;
1047 }
1048
1049 if (queue->data_digest &&
1050 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1051 nvmet_tcp_fatal_error(queue); /* fatal */
1052 return -EPROTO;
1053 }
1054
1055 return nvmet_tcp_done_recv_pdu(queue);
1056}
1057
1058static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1059{
1060 struct nvmet_tcp_queue *queue = cmd->queue;
1061
1062 nvmet_tcp_ddgst(queue->rcv_hash, cmd);
1063 queue->offset = 0;
1064 queue->left = NVME_TCP_DIGEST_LENGTH;
1065 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1066}
1067
1068static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1069{
1070 struct nvmet_tcp_cmd *cmd = queue->cmd;
1071 int ret;
1072
1073 while (msg_data_left(&cmd->recv_msg)) {
1074 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1075 cmd->recv_msg.msg_flags);
1076 if (ret <= 0)
1077 return ret;
1078
1079 cmd->pdu_recv += ret;
1080 cmd->rbytes_done += ret;
1081 }
1082
1083 nvmet_tcp_unmap_pdu_iovec(cmd);
1084
1085 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1086 cmd->rbytes_done == cmd->req.transfer_len) {
1087 if (queue->data_digest) {
1088 nvmet_tcp_prep_recv_ddgst(cmd);
1089 return 0;
1090 }
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001091 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001092 }
1093
1094 nvmet_prepare_receive_pdu(queue);
1095 return 0;
1096}
1097
1098static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1099{
1100 struct nvmet_tcp_cmd *cmd = queue->cmd;
1101 int ret;
1102 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1103 struct kvec iov = {
1104 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1105 .iov_len = queue->left
1106 };
1107
1108 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1109 iov.iov_len, msg.msg_flags);
1110 if (unlikely(ret < 0))
1111 return ret;
1112
1113 queue->offset += ret;
1114 queue->left -= ret;
1115 if (queue->left)
1116 return -EAGAIN;
1117
1118 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1119 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1120 queue->idx, cmd->req.cmd->common.command_id,
1121 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1122 le32_to_cpu(cmd->exp_ddgst));
1123 nvmet_tcp_finish_cmd(cmd);
1124 nvmet_tcp_fatal_error(queue);
1125 ret = -EPROTO;
1126 goto out;
1127 }
1128
1129 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
1130 cmd->rbytes_done == cmd->req.transfer_len)
Christoph Hellwigbe3f3112019-10-23 10:35:45 -06001131 cmd->req.execute(&cmd->req);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001132 ret = 0;
1133out:
1134 nvmet_prepare_receive_pdu(queue);
1135 return ret;
1136}
1137
1138static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1139{
Sagi Grimbergfb865852019-01-09 14:56:32 -08001140 int result = 0;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001141
1142 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1143 return 0;
1144
1145 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1146 result = nvmet_tcp_try_recv_pdu(queue);
1147 if (result != 0)
1148 goto done_recv;
1149 }
1150
1151 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1152 result = nvmet_tcp_try_recv_data(queue);
1153 if (result != 0)
1154 goto done_recv;
1155 }
1156
1157 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1158 result = nvmet_tcp_try_recv_ddgst(queue);
1159 if (result != 0)
1160 goto done_recv;
1161 }
1162
1163done_recv:
1164 if (result < 0) {
1165 if (result == -EAGAIN)
1166 return 0;
1167 return result;
1168 }
1169 return 1;
1170}
1171
1172static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1173 int budget, int *recvs)
1174{
1175 int i, ret = 0;
1176
1177 for (i = 0; i < budget; i++) {
1178 ret = nvmet_tcp_try_recv_one(queue);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001179 if (unlikely(ret < 0)) {
1180 nvmet_tcp_socket_error(queue, ret);
1181 goto done;
1182 } else if (ret == 0) {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001183 break;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001184 }
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001185 (*recvs)++;
1186 }
Sagi Grimberg0236d342020-05-18 10:47:48 -07001187done:
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001188 return ret;
1189}
1190
1191static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1192{
1193 spin_lock(&queue->state_lock);
1194 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1195 queue->state = NVMET_TCP_Q_DISCONNECTING;
1196 schedule_work(&queue->release_work);
1197 }
1198 spin_unlock(&queue->state_lock);
1199}
1200
1201static void nvmet_tcp_io_work(struct work_struct *w)
1202{
1203 struct nvmet_tcp_queue *queue =
1204 container_of(w, struct nvmet_tcp_queue, io_work);
1205 bool pending;
1206 int ret, ops = 0;
1207
1208 do {
1209 pending = false;
1210
1211 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001212 if (ret > 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001213 pending = true;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001214 else if (ret < 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001215 return;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001216
1217 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
Sagi Grimberg0236d342020-05-18 10:47:48 -07001218 if (ret > 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001219 pending = true;
Sagi Grimberg0236d342020-05-18 10:47:48 -07001220 else if (ret < 0)
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001221 return;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001222
1223 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1224
1225 /*
1226 * We exahusted our budget, requeue our selves
1227 */
1228 if (pending)
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001229 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001230}
1231
1232static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1233 struct nvmet_tcp_cmd *c)
1234{
1235 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1236
1237 c->queue = queue;
1238 c->req.port = queue->port->nport;
1239
1240 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1241 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1242 if (!c->cmd_pdu)
1243 return -ENOMEM;
1244 c->req.cmd = &c->cmd_pdu->cmd;
1245
1246 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1247 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1248 if (!c->rsp_pdu)
1249 goto out_free_cmd;
Max Gurtovoyfc6c9732019-04-08 18:39:59 +03001250 c->req.cqe = &c->rsp_pdu->cqe;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001251
1252 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1253 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1254 if (!c->data_pdu)
1255 goto out_free_rsp;
1256
1257 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1258 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1259 if (!c->r2t_pdu)
1260 goto out_free_data;
1261
1262 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1263
1264 list_add_tail(&c->entry, &queue->free_list);
1265
1266 return 0;
1267out_free_data:
1268 page_frag_free(c->data_pdu);
1269out_free_rsp:
1270 page_frag_free(c->rsp_pdu);
1271out_free_cmd:
1272 page_frag_free(c->cmd_pdu);
1273 return -ENOMEM;
1274}
1275
1276static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1277{
1278 page_frag_free(c->r2t_pdu);
1279 page_frag_free(c->data_pdu);
1280 page_frag_free(c->rsp_pdu);
1281 page_frag_free(c->cmd_pdu);
1282}
1283
1284static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1285{
1286 struct nvmet_tcp_cmd *cmds;
1287 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1288
1289 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1290 if (!cmds)
1291 goto out;
1292
1293 for (i = 0; i < nr_cmds; i++) {
1294 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1295 if (ret)
1296 goto out_free;
1297 }
1298
1299 queue->cmds = cmds;
1300
1301 return 0;
1302out_free:
1303 while (--i >= 0)
1304 nvmet_tcp_free_cmd(cmds + i);
1305 kfree(cmds);
1306out:
1307 return ret;
1308}
1309
1310static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1311{
1312 struct nvmet_tcp_cmd *cmds = queue->cmds;
1313 int i;
1314
1315 for (i = 0; i < queue->nr_cmds; i++)
1316 nvmet_tcp_free_cmd(cmds + i);
1317
1318 nvmet_tcp_free_cmd(&queue->connect);
1319 kfree(cmds);
1320}
1321
1322static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1323{
1324 struct socket *sock = queue->sock;
1325
1326 write_lock_bh(&sock->sk->sk_callback_lock);
1327 sock->sk->sk_data_ready = queue->data_ready;
1328 sock->sk->sk_state_change = queue->state_change;
1329 sock->sk->sk_write_space = queue->write_space;
1330 sock->sk->sk_user_data = NULL;
1331 write_unlock_bh(&sock->sk->sk_callback_lock);
1332}
1333
1334static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1335{
1336 nvmet_req_uninit(&cmd->req);
1337 nvmet_tcp_unmap_pdu_iovec(cmd);
Sagi Grimberg35d1a932019-08-02 20:29:11 -07001338 kfree(cmd->iov);
Sagi Grimberg30f27d52019-09-13 10:36:40 -07001339 sgl_free(cmd->req.sg);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001340}
1341
1342static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1343{
1344 struct nvmet_tcp_cmd *cmd = queue->cmds;
1345 int i;
1346
1347 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1348 if (nvmet_tcp_need_data_in(cmd))
1349 nvmet_tcp_finish_cmd(cmd);
1350 }
1351
1352 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1353 /* failed in connect */
1354 nvmet_tcp_finish_cmd(&queue->connect);
1355 }
1356}
1357
1358static void nvmet_tcp_release_queue_work(struct work_struct *w)
1359{
1360 struct nvmet_tcp_queue *queue =
1361 container_of(w, struct nvmet_tcp_queue, release_work);
1362
1363 mutex_lock(&nvmet_tcp_queue_mutex);
1364 list_del_init(&queue->queue_list);
1365 mutex_unlock(&nvmet_tcp_queue_mutex);
1366
1367 nvmet_tcp_restore_socket_callbacks(queue);
1368 flush_work(&queue->io_work);
1369
1370 nvmet_tcp_uninit_data_in_cmds(queue);
1371 nvmet_sq_destroy(&queue->nvme_sq);
1372 cancel_work_sync(&queue->io_work);
1373 sock_release(queue->sock);
1374 nvmet_tcp_free_cmds(queue);
1375 if (queue->hdr_digest || queue->data_digest)
1376 nvmet_tcp_free_crypto(queue);
1377 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1378
1379 kfree(queue);
1380}
1381
1382static void nvmet_tcp_data_ready(struct sock *sk)
1383{
1384 struct nvmet_tcp_queue *queue;
1385
1386 read_lock_bh(&sk->sk_callback_lock);
1387 queue = sk->sk_user_data;
1388 if (likely(queue))
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001389 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001390 read_unlock_bh(&sk->sk_callback_lock);
1391}
1392
1393static void nvmet_tcp_write_space(struct sock *sk)
1394{
1395 struct nvmet_tcp_queue *queue;
1396
1397 read_lock_bh(&sk->sk_callback_lock);
1398 queue = sk->sk_user_data;
1399 if (unlikely(!queue))
1400 goto out;
1401
1402 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1403 queue->write_space(sk);
1404 goto out;
1405 }
1406
1407 if (sk_stream_is_writeable(sk)) {
1408 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001409 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001410 }
1411out:
1412 read_unlock_bh(&sk->sk_callback_lock);
1413}
1414
1415static void nvmet_tcp_state_change(struct sock *sk)
1416{
1417 struct nvmet_tcp_queue *queue;
1418
1419 write_lock_bh(&sk->sk_callback_lock);
1420 queue = sk->sk_user_data;
1421 if (!queue)
1422 goto done;
1423
1424 switch (sk->sk_state) {
1425 case TCP_FIN_WAIT1:
1426 case TCP_CLOSE_WAIT:
1427 case TCP_CLOSE:
1428 /* FALLTHRU */
1429 sk->sk_user_data = NULL;
1430 nvmet_tcp_schedule_release_queue(queue);
1431 break;
1432 default:
1433 pr_warn("queue %d unhandled state %d\n",
1434 queue->idx, sk->sk_state);
1435 }
1436done:
1437 write_unlock_bh(&sk->sk_callback_lock);
1438}
1439
1440static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1441{
1442 struct socket *sock = queue->sock;
Israel Rukshin89275a92019-08-18 12:08:55 +03001443 struct inet_sock *inet = inet_sk(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001444 int ret;
1445
1446 ret = kernel_getsockname(sock,
1447 (struct sockaddr *)&queue->sockaddr);
1448 if (ret < 0)
1449 return ret;
1450
1451 ret = kernel_getpeername(sock,
1452 (struct sockaddr *)&queue->sockaddr_peer);
1453 if (ret < 0)
1454 return ret;
1455
1456 /*
1457 * Cleanup whatever is sitting in the TCP transmit queue on socket
1458 * close. This is done to prevent stale data from being sent should
1459 * the network connection be restored before TCP times out.
1460 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001461 sock_no_linger(sock->sk);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001462
Christoph Hellwig6e434962020-05-28 07:12:11 +02001463 if (so_priority > 0)
1464 sock_set_priority(sock->sk, so_priority);
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001465
Israel Rukshin89275a92019-08-18 12:08:55 +03001466 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001467 if (inet->rcv_tos > 0)
1468 ip_sock_set_tos(sock->sk, inet->rcv_tos);
Israel Rukshin89275a92019-08-18 12:08:55 +03001469
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001470 write_lock_bh(&sock->sk->sk_callback_lock);
1471 sock->sk->sk_user_data = queue;
1472 queue->data_ready = sock->sk->sk_data_ready;
1473 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1474 queue->state_change = sock->sk->sk_state_change;
1475 sock->sk->sk_state_change = nvmet_tcp_state_change;
1476 queue->write_space = sock->sk->sk_write_space;
1477 sock->sk->sk_write_space = nvmet_tcp_write_space;
1478 write_unlock_bh(&sock->sk->sk_callback_lock);
1479
1480 return 0;
1481}
1482
1483static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1484 struct socket *newsock)
1485{
1486 struct nvmet_tcp_queue *queue;
1487 int ret;
1488
1489 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1490 if (!queue)
1491 return -ENOMEM;
1492
1493 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1494 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1495 queue->sock = newsock;
1496 queue->port = port;
1497 queue->nr_cmds = 0;
1498 spin_lock_init(&queue->state_lock);
1499 queue->state = NVMET_TCP_Q_CONNECTING;
1500 INIT_LIST_HEAD(&queue->free_list);
1501 init_llist_head(&queue->resp_list);
1502 INIT_LIST_HEAD(&queue->resp_send_list);
1503
1504 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1505 if (queue->idx < 0) {
1506 ret = queue->idx;
1507 goto out_free_queue;
1508 }
1509
1510 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1511 if (ret)
1512 goto out_ida_remove;
1513
1514 ret = nvmet_sq_init(&queue->nvme_sq);
1515 if (ret)
1516 goto out_free_connect;
1517
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001518 nvmet_prepare_receive_pdu(queue);
1519
1520 mutex_lock(&nvmet_tcp_queue_mutex);
1521 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1522 mutex_unlock(&nvmet_tcp_queue_mutex);
1523
1524 ret = nvmet_tcp_set_queue_sock(queue);
1525 if (ret)
1526 goto out_destroy_sq;
1527
Mark Wunderlichf7790e52020-08-28 01:00:53 +00001528 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001529
1530 return 0;
1531out_destroy_sq:
1532 mutex_lock(&nvmet_tcp_queue_mutex);
1533 list_del_init(&queue->queue_list);
1534 mutex_unlock(&nvmet_tcp_queue_mutex);
1535 nvmet_sq_destroy(&queue->nvme_sq);
1536out_free_connect:
1537 nvmet_tcp_free_cmd(&queue->connect);
1538out_ida_remove:
1539 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1540out_free_queue:
1541 kfree(queue);
1542 return ret;
1543}
1544
1545static void nvmet_tcp_accept_work(struct work_struct *w)
1546{
1547 struct nvmet_tcp_port *port =
1548 container_of(w, struct nvmet_tcp_port, accept_work);
1549 struct socket *newsock;
1550 int ret;
1551
1552 while (true) {
1553 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1554 if (ret < 0) {
1555 if (ret != -EAGAIN)
1556 pr_warn("failed to accept err=%d\n", ret);
1557 return;
1558 }
1559 ret = nvmet_tcp_alloc_queue(port, newsock);
1560 if (ret) {
1561 pr_err("failed to allocate queue\n");
1562 sock_release(newsock);
1563 }
1564 }
1565}
1566
1567static void nvmet_tcp_listen_data_ready(struct sock *sk)
1568{
1569 struct nvmet_tcp_port *port;
1570
1571 read_lock_bh(&sk->sk_callback_lock);
1572 port = sk->sk_user_data;
1573 if (!port)
1574 goto out;
1575
1576 if (sk->sk_state == TCP_LISTEN)
1577 schedule_work(&port->accept_work);
1578out:
1579 read_unlock_bh(&sk->sk_callback_lock);
1580}
1581
1582static int nvmet_tcp_add_port(struct nvmet_port *nport)
1583{
1584 struct nvmet_tcp_port *port;
1585 __kernel_sa_family_t af;
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001586 int ret;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001587
1588 port = kzalloc(sizeof(*port), GFP_KERNEL);
1589 if (!port)
1590 return -ENOMEM;
1591
1592 switch (nport->disc_addr.adrfam) {
1593 case NVMF_ADDR_FAMILY_IP4:
1594 af = AF_INET;
1595 break;
1596 case NVMF_ADDR_FAMILY_IP6:
1597 af = AF_INET6;
1598 break;
1599 default:
1600 pr_err("address family %d not supported\n",
1601 nport->disc_addr.adrfam);
1602 ret = -EINVAL;
1603 goto err_port;
1604 }
1605
1606 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1607 nport->disc_addr.trsvcid, &port->addr);
1608 if (ret) {
1609 pr_err("malformed ip/port passed: %s:%s\n",
1610 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1611 goto err_port;
1612 }
1613
1614 port->nport = nport;
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001615 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1616 if (port->nport->inline_data_size < 0)
1617 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1618
1619 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1620 IPPROTO_TCP, &port->sock);
1621 if (ret) {
1622 pr_err("failed to create a socket\n");
1623 goto err_port;
1624 }
1625
1626 port->sock->sk->sk_user_data = port;
1627 port->data_ready = port->sock->sk->sk_data_ready;
1628 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
Christoph Hellwigb58f0e82020-05-28 07:12:09 +02001629 sock_set_reuseaddr(port->sock->sk);
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001630 tcp_sock_set_nodelay(port->sock->sk);
Christoph Hellwig6e434962020-05-28 07:12:11 +02001631 if (so_priority > 0)
1632 sock_set_priority(port->sock->sk, so_priority);
Wunderlich, Mark43cc6682020-01-16 00:46:16 +00001633
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001634 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1635 sizeof(port->addr));
1636 if (ret) {
1637 pr_err("failed to bind port socket %d\n", ret);
1638 goto err_sock;
1639 }
1640
1641 ret = kernel_listen(port->sock, 128);
1642 if (ret) {
1643 pr_err("failed to listen %d on port sock\n", ret);
1644 goto err_sock;
1645 }
1646
1647 nport->priv = port;
1648 pr_info("enabling port %d (%pISpc)\n",
1649 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1650
1651 return 0;
1652
1653err_sock:
1654 sock_release(port->sock);
1655err_port:
1656 kfree(port);
1657 return ret;
1658}
1659
1660static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1661{
1662 struct nvmet_tcp_port *port = nport->priv;
1663
1664 write_lock_bh(&port->sock->sk->sk_callback_lock);
1665 port->sock->sk->sk_data_ready = port->data_ready;
1666 port->sock->sk->sk_user_data = NULL;
1667 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1668 cancel_work_sync(&port->accept_work);
1669
1670 sock_release(port->sock);
1671 kfree(port);
1672}
1673
1674static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1675{
1676 struct nvmet_tcp_queue *queue;
1677
1678 mutex_lock(&nvmet_tcp_queue_mutex);
1679 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1680 if (queue->nvme_sq.ctrl == ctrl)
1681 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1682 mutex_unlock(&nvmet_tcp_queue_mutex);
1683}
1684
1685static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1686{
1687 struct nvmet_tcp_queue *queue =
1688 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1689
1690 if (sq->qid == 0) {
1691 /* Let inflight controller teardown complete */
1692 flush_scheduled_work();
1693 }
1694
1695 queue->nr_cmds = sq->size * 2;
1696 if (nvmet_tcp_alloc_cmds(queue))
1697 return NVME_SC_INTERNAL;
1698 return 0;
1699}
1700
1701static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1702 struct nvmet_port *nport, char *traddr)
1703{
1704 struct nvmet_tcp_port *port = nport->priv;
1705
1706 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1707 struct nvmet_tcp_cmd *cmd =
1708 container_of(req, struct nvmet_tcp_cmd, req);
1709 struct nvmet_tcp_queue *queue = cmd->queue;
1710
1711 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1712 } else {
1713 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1714 }
1715}
1716
Max Gurtovoya40aae62020-06-01 20:05:20 +03001717static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001718 .owner = THIS_MODULE,
1719 .type = NVMF_TRTYPE_TCP,
1720 .msdbd = 1,
Sagi Grimberg872d26a2018-12-03 17:52:15 -08001721 .add_port = nvmet_tcp_add_port,
1722 .remove_port = nvmet_tcp_remove_port,
1723 .queue_response = nvmet_tcp_queue_response,
1724 .delete_ctrl = nvmet_tcp_delete_ctrl,
1725 .install_queue = nvmet_tcp_install_queue,
1726 .disc_traddr = nvmet_tcp_disc_port_addr,
1727};
1728
1729static int __init nvmet_tcp_init(void)
1730{
1731 int ret;
1732
1733 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
1734 if (!nvmet_tcp_wq)
1735 return -ENOMEM;
1736
1737 ret = nvmet_register_transport(&nvmet_tcp_ops);
1738 if (ret)
1739 goto err;
1740
1741 return 0;
1742err:
1743 destroy_workqueue(nvmet_tcp_wq);
1744 return ret;
1745}
1746
1747static void __exit nvmet_tcp_exit(void)
1748{
1749 struct nvmet_tcp_queue *queue;
1750
1751 nvmet_unregister_transport(&nvmet_tcp_ops);
1752
1753 flush_scheduled_work();
1754 mutex_lock(&nvmet_tcp_queue_mutex);
1755 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1756 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1757 mutex_unlock(&nvmet_tcp_queue_mutex);
1758 flush_scheduled_work();
1759
1760 destroy_workqueue(nvmet_tcp_wq);
1761}
1762
1763module_init(nvmet_tcp_init);
1764module_exit(nvmet_tcp_exit);
1765
1766MODULE_LICENSE("GPL v2");
1767MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */