blob: 0ef14f0fad863216236a1358f6d73bf8f696d4f3 [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010049 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080050
51 struct bio *curr_bio;
52 struct iov_iter iter;
53
54 /* send state */
55 size_t offset;
56 size_t data_sent;
57 enum nvme_tcp_send_state state;
58};
59
60enum nvme_tcp_queue_flags {
61 NVME_TCP_Q_ALLOCATED = 0,
62 NVME_TCP_Q_LIVE = 1,
63};
64
65enum nvme_tcp_recv_state {
66 NVME_TCP_RECV_PDU = 0,
67 NVME_TCP_RECV_DATA,
68 NVME_TCP_RECV_DDGST,
69};
70
71struct nvme_tcp_ctrl;
72struct nvme_tcp_queue {
73 struct socket *sock;
74 struct work_struct io_work;
75 int io_cpu;
76
77 spinlock_t lock;
78 struct list_head send_list;
79
80 /* recv state */
81 void *pdu;
82 int pdu_remaining;
83 int pdu_offset;
84 size_t data_remaining;
85 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070086 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080087
88 /* send state */
89 struct nvme_tcp_request *request;
90
91 int queue_size;
92 size_t cmnd_capsule_len;
93 struct nvme_tcp_ctrl *ctrl;
94 unsigned long flags;
95 bool rd_enabled;
96
97 bool hdr_digest;
98 bool data_digest;
99 struct ahash_request *rcv_hash;
100 struct ahash_request *snd_hash;
101 __le32 exp_ddgst;
102 __le32 recv_ddgst;
103
104 struct page_frag_cache pf_cache;
105
106 void (*state_change)(struct sock *);
107 void (*data_ready)(struct sock *);
108 void (*write_space)(struct sock *);
109};
110
111struct nvme_tcp_ctrl {
112 /* read only in the hot path */
113 struct nvme_tcp_queue *queues;
114 struct blk_mq_tag_set tag_set;
115
116 /* other member variables */
117 struct list_head list;
118 struct blk_mq_tag_set admin_tag_set;
119 struct sockaddr_storage addr;
120 struct sockaddr_storage src_addr;
121 struct nvme_ctrl ctrl;
122
123 struct work_struct err_work;
124 struct delayed_work connect_work;
125 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700126 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800127};
128
129static LIST_HEAD(nvme_tcp_ctrl_list);
130static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
131static struct workqueue_struct *nvme_tcp_wq;
132static struct blk_mq_ops nvme_tcp_mq_ops;
133static struct blk_mq_ops nvme_tcp_admin_mq_ops;
134
135static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
136{
137 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
138}
139
140static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
141{
142 return queue - queue->ctrl->queues;
143}
144
145static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
146{
147 u32 queue_idx = nvme_tcp_queue_id(queue);
148
149 if (queue_idx == 0)
150 return queue->ctrl->admin_tag_set.tags[queue_idx];
151 return queue->ctrl->tag_set.tags[queue_idx - 1];
152}
153
154static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
155{
156 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
157}
158
159static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
160{
161 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
162}
163
164static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
165{
166 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
167}
168
169static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
170{
171 return req == &req->queue->ctrl->async_req;
172}
173
174static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
175{
176 struct request *rq;
177 unsigned int bytes;
178
179 if (unlikely(nvme_tcp_async_req(req)))
180 return false; /* async events don't have a request */
181
182 rq = blk_mq_rq_from_pdu(req);
183 bytes = blk_rq_payload_bytes(rq);
184
185 return rq_data_dir(rq) == WRITE && bytes &&
186 bytes <= nvme_tcp_inline_data_size(req->queue);
187}
188
189static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
190{
191 return req->iter.bvec->bv_page;
192}
193
194static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
195{
196 return req->iter.bvec->bv_offset + req->iter.iov_offset;
197}
198
199static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
200{
201 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
202 req->pdu_len - req->pdu_sent);
203}
204
205static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
206{
207 return req->iter.iov_offset;
208}
209
210static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
211{
212 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
213 req->pdu_len - req->pdu_sent : 0;
214}
215
216static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
217 int len)
218{
219 return nvme_tcp_pdu_data_left(req) <= len;
220}
221
222static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
223 unsigned int dir)
224{
225 struct request *rq = blk_mq_rq_from_pdu(req);
226 struct bio_vec *vec;
227 unsigned int size;
228 int nsegs;
229 size_t offset;
230
231 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
232 vec = &rq->special_vec;
233 nsegs = 1;
234 size = blk_rq_payload_bytes(rq);
235 offset = 0;
236 } else {
237 struct bio *bio = req->curr_bio;
238
239 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
240 nsegs = bio_segments(bio);
241 size = bio->bi_iter.bi_size;
242 offset = bio->bi_iter.bi_bvec_done;
243 }
244
245 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
246 req->iter.iov_offset = offset;
247}
248
249static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
250 int len)
251{
252 req->data_sent += len;
253 req->pdu_sent += len;
254 iov_iter_advance(&req->iter, len);
255 if (!iov_iter_count(&req->iter) &&
256 req->data_sent < req->data_len) {
257 req->curr_bio = req->curr_bio->bi_next;
258 nvme_tcp_init_iter(req, WRITE);
259 }
260}
261
262static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
263{
264 struct nvme_tcp_queue *queue = req->queue;
265
266 spin_lock(&queue->lock);
267 list_add_tail(&req->entry, &queue->send_list);
268 spin_unlock(&queue->lock);
269
270 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
271}
272
273static inline struct nvme_tcp_request *
274nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
275{
276 struct nvme_tcp_request *req;
277
278 spin_lock(&queue->lock);
279 req = list_first_entry_or_null(&queue->send_list,
280 struct nvme_tcp_request, entry);
281 if (req)
282 list_del(&req->entry);
283 spin_unlock(&queue->lock);
284
285 return req;
286}
287
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100288static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
289 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800290{
291 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
292 crypto_ahash_final(hash);
293}
294
295static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
296 struct page *page, off_t off, size_t len)
297{
298 struct scatterlist sg;
299
300 sg_init_marker(&sg, 1);
301 sg_set_page(&sg, page, len, off);
302 ahash_request_set_crypt(hash, &sg, NULL, len);
303 crypto_ahash_update(hash);
304}
305
306static inline void nvme_tcp_hdgst(struct ahash_request *hash,
307 void *pdu, size_t len)
308{
309 struct scatterlist sg;
310
311 sg_init_one(&sg, pdu, len);
312 ahash_request_set_crypt(hash, &sg, pdu + len, len);
313 crypto_ahash_digest(hash);
314}
315
316static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
317 void *pdu, size_t pdu_len)
318{
319 struct nvme_tcp_hdr *hdr = pdu;
320 __le32 recv_digest;
321 __le32 exp_digest;
322
323 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
324 dev_err(queue->ctrl->ctrl.device,
325 "queue %d: header digest flag is cleared\n",
326 nvme_tcp_queue_id(queue));
327 return -EPROTO;
328 }
329
330 recv_digest = *(__le32 *)(pdu + hdr->hlen);
331 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
332 exp_digest = *(__le32 *)(pdu + hdr->hlen);
333 if (recv_digest != exp_digest) {
334 dev_err(queue->ctrl->ctrl.device,
335 "header digest error: recv %#x expected %#x\n",
336 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
337 return -EIO;
338 }
339
340 return 0;
341}
342
343static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
344{
345 struct nvme_tcp_hdr *hdr = pdu;
346 u8 digest_len = nvme_tcp_hdgst_len(queue);
347 u32 len;
348
349 len = le32_to_cpu(hdr->plen) - hdr->hlen -
350 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
351
352 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
353 dev_err(queue->ctrl->ctrl.device,
354 "queue %d: data digest flag is cleared\n",
355 nvme_tcp_queue_id(queue));
356 return -EPROTO;
357 }
358 crypto_ahash_init(queue->rcv_hash);
359
360 return 0;
361}
362
363static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
364 struct request *rq, unsigned int hctx_idx)
365{
366 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
367
368 page_frag_free(req->pdu);
369}
370
371static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
372 struct request *rq, unsigned int hctx_idx,
373 unsigned int numa_node)
374{
375 struct nvme_tcp_ctrl *ctrl = set->driver_data;
376 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
377 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
378 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
379 u8 hdgst = nvme_tcp_hdgst_len(queue);
380
381 req->pdu = page_frag_alloc(&queue->pf_cache,
382 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
383 GFP_KERNEL | __GFP_ZERO);
384 if (!req->pdu)
385 return -ENOMEM;
386
387 req->queue = queue;
388 nvme_req(rq)->ctrl = &ctrl->ctrl;
389
390 return 0;
391}
392
393static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
394 unsigned int hctx_idx)
395{
396 struct nvme_tcp_ctrl *ctrl = data;
397 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
398
399 hctx->driver_data = queue;
400 return 0;
401}
402
403static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
404 unsigned int hctx_idx)
405{
406 struct nvme_tcp_ctrl *ctrl = data;
407 struct nvme_tcp_queue *queue = &ctrl->queues[0];
408
409 hctx->driver_data = queue;
410 return 0;
411}
412
413static enum nvme_tcp_recv_state
414nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
415{
416 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
417 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
418 NVME_TCP_RECV_DATA;
419}
420
421static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
422{
423 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
424 nvme_tcp_hdgst_len(queue);
425 queue->pdu_offset = 0;
426 queue->data_remaining = -1;
427 queue->ddgst_remaining = 0;
428}
429
430static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
431{
432 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
433 return;
434
Nigel Kirkland97b25122020-02-10 16:01:45 -0800435 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800436}
437
438static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
439 struct nvme_completion *cqe)
440{
441 struct request *rq;
442
443 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
444 if (!rq) {
445 dev_err(queue->ctrl->ctrl.device,
446 "queue %d tag 0x%x not found\n",
447 nvme_tcp_queue_id(queue), cqe->command_id);
448 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
449 return -EINVAL;
450 }
451
452 nvme_end_request(rq, cqe->status, cqe->result);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700453 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800454
455 return 0;
456}
457
458static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
459 struct nvme_tcp_data_pdu *pdu)
460{
461 struct request *rq;
462
463 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
464 if (!rq) {
465 dev_err(queue->ctrl->ctrl.device,
466 "queue %d tag %#x not found\n",
467 nvme_tcp_queue_id(queue), pdu->command_id);
468 return -ENOENT;
469 }
470
471 if (!blk_rq_payload_bytes(rq)) {
472 dev_err(queue->ctrl->ctrl.device,
473 "queue %d tag %#x unexpected data\n",
474 nvme_tcp_queue_id(queue), rq->tag);
475 return -EIO;
476 }
477
478 queue->data_remaining = le32_to_cpu(pdu->data_length);
479
Sagi Grimberg602d6742019-03-13 18:55:10 +0100480 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
481 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
482 dev_err(queue->ctrl->ctrl.device,
483 "queue %d tag %#x SUCCESS set but not last PDU\n",
484 nvme_tcp_queue_id(queue), rq->tag);
485 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
486 return -EPROTO;
487 }
488
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800489 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800490}
491
492static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
493 struct nvme_tcp_rsp_pdu *pdu)
494{
495 struct nvme_completion *cqe = &pdu->cqe;
496 int ret = 0;
497
498 /*
499 * AEN requests are special as they don't time out and can
500 * survive any kind of queue freeze and often don't respond to
501 * aborts. We don't even bother to allocate a struct request
502 * for them but rather special case them here.
503 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300504 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
505 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800506 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
507 &cqe->result);
508 else
509 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
510
511 return ret;
512}
513
514static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
515 struct nvme_tcp_r2t_pdu *pdu)
516{
517 struct nvme_tcp_data_pdu *data = req->pdu;
518 struct nvme_tcp_queue *queue = req->queue;
519 struct request *rq = blk_mq_rq_from_pdu(req);
520 u8 hdgst = nvme_tcp_hdgst_len(queue);
521 u8 ddgst = nvme_tcp_ddgst_len(queue);
522
523 req->pdu_len = le32_to_cpu(pdu->r2t_length);
524 req->pdu_sent = 0;
525
526 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
527 dev_err(queue->ctrl->ctrl.device,
528 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
529 rq->tag, req->pdu_len, req->data_len,
530 req->data_sent);
531 return -EPROTO;
532 }
533
534 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
535 dev_err(queue->ctrl->ctrl.device,
536 "req %d unexpected r2t offset %u (expected %zu)\n",
537 rq->tag, le32_to_cpu(pdu->r2t_offset),
538 req->data_sent);
539 return -EPROTO;
540 }
541
542 memset(data, 0, sizeof(*data));
543 data->hdr.type = nvme_tcp_h2c_data;
544 data->hdr.flags = NVME_TCP_F_DATA_LAST;
545 if (queue->hdr_digest)
546 data->hdr.flags |= NVME_TCP_F_HDGST;
547 if (queue->data_digest)
548 data->hdr.flags |= NVME_TCP_F_DDGST;
549 data->hdr.hlen = sizeof(*data);
550 data->hdr.pdo = data->hdr.hlen + hdgst;
551 data->hdr.plen =
552 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
553 data->ttag = pdu->ttag;
554 data->command_id = rq->tag;
555 data->data_offset = cpu_to_le32(req->data_sent);
556 data->data_length = cpu_to_le32(req->pdu_len);
557 return 0;
558}
559
560static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
561 struct nvme_tcp_r2t_pdu *pdu)
562{
563 struct nvme_tcp_request *req;
564 struct request *rq;
565 int ret;
566
567 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
568 if (!rq) {
569 dev_err(queue->ctrl->ctrl.device,
570 "queue %d tag %#x not found\n",
571 nvme_tcp_queue_id(queue), pdu->command_id);
572 return -ENOENT;
573 }
574 req = blk_mq_rq_to_pdu(rq);
575
576 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
577 if (unlikely(ret))
578 return ret;
579
580 req->state = NVME_TCP_SEND_H2C_PDU;
581 req->offset = 0;
582
583 nvme_tcp_queue_request(req);
584
585 return 0;
586}
587
588static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
589 unsigned int *offset, size_t *len)
590{
591 struct nvme_tcp_hdr *hdr;
592 char *pdu = queue->pdu;
593 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
594 int ret;
595
596 ret = skb_copy_bits(skb, *offset,
597 &pdu[queue->pdu_offset], rcv_len);
598 if (unlikely(ret))
599 return ret;
600
601 queue->pdu_remaining -= rcv_len;
602 queue->pdu_offset += rcv_len;
603 *offset += rcv_len;
604 *len -= rcv_len;
605 if (queue->pdu_remaining)
606 return 0;
607
608 hdr = queue->pdu;
609 if (queue->hdr_digest) {
610 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
611 if (unlikely(ret))
612 return ret;
613 }
614
615
616 if (queue->data_digest) {
617 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
618 if (unlikely(ret))
619 return ret;
620 }
621
622 switch (hdr->type) {
623 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700624 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800625 case nvme_tcp_rsp:
626 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700627 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800628 case nvme_tcp_r2t:
629 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700630 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800631 default:
632 dev_err(queue->ctrl->ctrl.device,
633 "unsupported pdu type (%d)\n", hdr->type);
634 return -EINVAL;
635 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800636}
637
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100638static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100639{
640 union nvme_result res = {};
641
642 nvme_end_request(rq, cpu_to_le16(status << 1), res);
643}
644
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800645static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
646 unsigned int *offset, size_t *len)
647{
648 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
649 struct nvme_tcp_request *req;
650 struct request *rq;
651
652 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
653 if (!rq) {
654 dev_err(queue->ctrl->ctrl.device,
655 "queue %d tag %#x not found\n",
656 nvme_tcp_queue_id(queue), pdu->command_id);
657 return -ENOENT;
658 }
659 req = blk_mq_rq_to_pdu(rq);
660
661 while (true) {
662 int recv_len, ret;
663
664 recv_len = min_t(size_t, *len, queue->data_remaining);
665 if (!recv_len)
666 break;
667
668 if (!iov_iter_count(&req->iter)) {
669 req->curr_bio = req->curr_bio->bi_next;
670
671 /*
672 * If we don`t have any bios it means that controller
673 * sent more data than we requested, hence error
674 */
675 if (!req->curr_bio) {
676 dev_err(queue->ctrl->ctrl.device,
677 "queue %d no space in request %#x",
678 nvme_tcp_queue_id(queue), rq->tag);
679 nvme_tcp_init_recv_ctx(queue);
680 return -EIO;
681 }
682 nvme_tcp_init_iter(req, READ);
683 }
684
685 /* we can read only from what is left in this bio */
686 recv_len = min_t(size_t, recv_len,
687 iov_iter_count(&req->iter));
688
689 if (queue->data_digest)
690 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
691 &req->iter, recv_len, queue->rcv_hash);
692 else
693 ret = skb_copy_datagram_iter(skb, *offset,
694 &req->iter, recv_len);
695 if (ret) {
696 dev_err(queue->ctrl->ctrl.device,
697 "queue %d failed to copy request %#x data",
698 nvme_tcp_queue_id(queue), rq->tag);
699 return ret;
700 }
701
702 *len -= recv_len;
703 *offset += recv_len;
704 queue->data_remaining -= recv_len;
705 }
706
707 if (!queue->data_remaining) {
708 if (queue->data_digest) {
709 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
710 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
711 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700712 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100713 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700714 queue->nr_cqe++;
715 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800716 nvme_tcp_init_recv_ctx(queue);
717 }
718 }
719
720 return 0;
721}
722
723static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
724 struct sk_buff *skb, unsigned int *offset, size_t *len)
725{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100726 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800727 char *ddgst = (char *)&queue->recv_ddgst;
728 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
729 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
730 int ret;
731
732 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
733 if (unlikely(ret))
734 return ret;
735
736 queue->ddgst_remaining -= recv_len;
737 *offset += recv_len;
738 *len -= recv_len;
739 if (queue->ddgst_remaining)
740 return 0;
741
742 if (queue->recv_ddgst != queue->exp_ddgst) {
743 dev_err(queue->ctrl->ctrl.device,
744 "data digest error: recv %#x expected %#x\n",
745 le32_to_cpu(queue->recv_ddgst),
746 le32_to_cpu(queue->exp_ddgst));
747 return -EIO;
748 }
749
Sagi Grimberg602d6742019-03-13 18:55:10 +0100750 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
751 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
752 pdu->command_id);
753
754 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700755 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100756 }
757
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800758 nvme_tcp_init_recv_ctx(queue);
759 return 0;
760}
761
762static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
763 unsigned int offset, size_t len)
764{
765 struct nvme_tcp_queue *queue = desc->arg.data;
766 size_t consumed = len;
767 int result;
768
769 while (len) {
770 switch (nvme_tcp_recv_state(queue)) {
771 case NVME_TCP_RECV_PDU:
772 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
773 break;
774 case NVME_TCP_RECV_DATA:
775 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
776 break;
777 case NVME_TCP_RECV_DDGST:
778 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
779 break;
780 default:
781 result = -EFAULT;
782 }
783 if (result) {
784 dev_err(queue->ctrl->ctrl.device,
785 "receive failed: %d\n", result);
786 queue->rd_enabled = false;
787 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
788 return result;
789 }
790 }
791
792 return consumed;
793}
794
795static void nvme_tcp_data_ready(struct sock *sk)
796{
797 struct nvme_tcp_queue *queue;
798
799 read_lock(&sk->sk_callback_lock);
800 queue = sk->sk_user_data;
801 if (likely(queue && queue->rd_enabled))
802 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
803 read_unlock(&sk->sk_callback_lock);
804}
805
806static void nvme_tcp_write_space(struct sock *sk)
807{
808 struct nvme_tcp_queue *queue;
809
810 read_lock_bh(&sk->sk_callback_lock);
811 queue = sk->sk_user_data;
812 if (likely(queue && sk_stream_is_writeable(sk))) {
813 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
814 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
815 }
816 read_unlock_bh(&sk->sk_callback_lock);
817}
818
819static void nvme_tcp_state_change(struct sock *sk)
820{
821 struct nvme_tcp_queue *queue;
822
823 read_lock(&sk->sk_callback_lock);
824 queue = sk->sk_user_data;
825 if (!queue)
826 goto done;
827
828 switch (sk->sk_state) {
829 case TCP_CLOSE:
830 case TCP_CLOSE_WAIT:
831 case TCP_LAST_ACK:
832 case TCP_FIN_WAIT1:
833 case TCP_FIN_WAIT2:
834 /* fallthrough */
835 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
836 break;
837 default:
838 dev_info(queue->ctrl->ctrl.device,
839 "queue %d socket state %d\n",
840 nvme_tcp_queue_id(queue), sk->sk_state);
841 }
842
843 queue->state_change(sk);
844done:
845 read_unlock(&sk->sk_callback_lock);
846}
847
848static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
849{
850 queue->request = NULL;
851}
852
853static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
854{
Sagi Grimberg16686012019-08-02 18:17:52 -0700855 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800856}
857
858static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
859{
860 struct nvme_tcp_queue *queue = req->queue;
861
862 while (true) {
863 struct page *page = nvme_tcp_req_cur_page(req);
864 size_t offset = nvme_tcp_req_cur_offset(req);
865 size_t len = nvme_tcp_req_cur_length(req);
866 bool last = nvme_tcp_pdu_last_send(req, len);
867 int ret, flags = MSG_DONTWAIT;
868
869 if (last && !queue->data_digest)
870 flags |= MSG_EOR;
871 else
872 flags |= MSG_MORE;
873
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200874 /* can't zcopy slab pages */
875 if (unlikely(PageSlab(page))) {
876 ret = sock_no_sendpage(queue->sock, page, offset, len,
877 flags);
878 } else {
879 ret = kernel_sendpage(queue->sock, page, offset, len,
880 flags);
881 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800882 if (ret <= 0)
883 return ret;
884
885 nvme_tcp_advance_req(req, ret);
886 if (queue->data_digest)
887 nvme_tcp_ddgst_update(queue->snd_hash, page,
888 offset, ret);
889
890 /* fully successful last write*/
891 if (last && ret == len) {
892 if (queue->data_digest) {
893 nvme_tcp_ddgst_final(queue->snd_hash,
894 &req->ddgst);
895 req->state = NVME_TCP_SEND_DDGST;
896 req->offset = 0;
897 } else {
898 nvme_tcp_done_send_req(queue);
899 }
900 return 1;
901 }
902 }
903 return -EAGAIN;
904}
905
906static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
907{
908 struct nvme_tcp_queue *queue = req->queue;
909 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
910 bool inline_data = nvme_tcp_has_inline_data(req);
911 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
912 u8 hdgst = nvme_tcp_hdgst_len(queue);
913 int len = sizeof(*pdu) + hdgst - req->offset;
914 int ret;
915
916 if (queue->hdr_digest && !req->offset)
917 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
918
919 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
920 offset_in_page(pdu) + req->offset, len, flags);
921 if (unlikely(ret <= 0))
922 return ret;
923
924 len -= ret;
925 if (!len) {
926 if (inline_data) {
927 req->state = NVME_TCP_SEND_DATA;
928 if (queue->data_digest)
929 crypto_ahash_init(queue->snd_hash);
930 nvme_tcp_init_iter(req, WRITE);
931 } else {
932 nvme_tcp_done_send_req(queue);
933 }
934 return 1;
935 }
936 req->offset += ret;
937
938 return -EAGAIN;
939}
940
941static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
942{
943 struct nvme_tcp_queue *queue = req->queue;
944 struct nvme_tcp_data_pdu *pdu = req->pdu;
945 u8 hdgst = nvme_tcp_hdgst_len(queue);
946 int len = sizeof(*pdu) - req->offset + hdgst;
947 int ret;
948
949 if (queue->hdr_digest && !req->offset)
950 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
951
952 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
953 offset_in_page(pdu) + req->offset, len,
954 MSG_DONTWAIT | MSG_MORE);
955 if (unlikely(ret <= 0))
956 return ret;
957
958 len -= ret;
959 if (!len) {
960 req->state = NVME_TCP_SEND_DATA;
961 if (queue->data_digest)
962 crypto_ahash_init(queue->snd_hash);
963 if (!req->data_sent)
964 nvme_tcp_init_iter(req, WRITE);
965 return 1;
966 }
967 req->offset += ret;
968
969 return -EAGAIN;
970}
971
972static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
973{
974 struct nvme_tcp_queue *queue = req->queue;
975 int ret;
976 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
977 struct kvec iov = {
978 .iov_base = &req->ddgst + req->offset,
979 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
980 };
981
982 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
983 if (unlikely(ret <= 0))
984 return ret;
985
986 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
987 nvme_tcp_done_send_req(queue);
988 return 1;
989 }
990
991 req->offset += ret;
992 return -EAGAIN;
993}
994
995static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
996{
997 struct nvme_tcp_request *req;
998 int ret = 1;
999
1000 if (!queue->request) {
1001 queue->request = nvme_tcp_fetch_request(queue);
1002 if (!queue->request)
1003 return 0;
1004 }
1005 req = queue->request;
1006
1007 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1008 ret = nvme_tcp_try_send_cmd_pdu(req);
1009 if (ret <= 0)
1010 goto done;
1011 if (!nvme_tcp_has_inline_data(req))
1012 return ret;
1013 }
1014
1015 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1016 ret = nvme_tcp_try_send_data_pdu(req);
1017 if (ret <= 0)
1018 goto done;
1019 }
1020
1021 if (req->state == NVME_TCP_SEND_DATA) {
1022 ret = nvme_tcp_try_send_data(req);
1023 if (ret <= 0)
1024 goto done;
1025 }
1026
1027 if (req->state == NVME_TCP_SEND_DDGST)
1028 ret = nvme_tcp_try_send_ddgst(req);
1029done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001030 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001031 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001032 } else if (ret < 0) {
1033 dev_err(queue->ctrl->ctrl.device,
1034 "failed to send request %d\n", ret);
1035 if (ret != -EPIPE && ret != -ECONNRESET)
1036 nvme_tcp_fail_request(queue->request);
1037 nvme_tcp_done_send_req(queue);
1038 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001039 return ret;
1040}
1041
1042static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1043{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301044 struct socket *sock = queue->sock;
1045 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001046 read_descriptor_t rd_desc;
1047 int consumed;
1048
1049 rd_desc.arg.data = queue;
1050 rd_desc.count = 1;
1051 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001052 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301053 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001054 release_sock(sk);
1055 return consumed;
1056}
1057
1058static void nvme_tcp_io_work(struct work_struct *w)
1059{
1060 struct nvme_tcp_queue *queue =
1061 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001062 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001063
1064 do {
1065 bool pending = false;
1066 int result;
1067
1068 result = nvme_tcp_try_send(queue);
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001069 if (result > 0)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001070 pending = true;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001071 else if (unlikely(result < 0))
1072 break;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001073
1074 result = nvme_tcp_try_recv(queue);
1075 if (result > 0)
1076 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001077 else if (unlikely(result < 0))
1078 break;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001079
1080 if (!pending)
1081 return;
1082
Wunderlich, Markddef2952019-09-18 23:36:37 +00001083 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001084
1085 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1086}
1087
1088static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1089{
1090 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1091
1092 ahash_request_free(queue->rcv_hash);
1093 ahash_request_free(queue->snd_hash);
1094 crypto_free_ahash(tfm);
1095}
1096
1097static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1098{
1099 struct crypto_ahash *tfm;
1100
1101 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1102 if (IS_ERR(tfm))
1103 return PTR_ERR(tfm);
1104
1105 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1106 if (!queue->snd_hash)
1107 goto free_tfm;
1108 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1109
1110 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1111 if (!queue->rcv_hash)
1112 goto free_snd_hash;
1113 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1114
1115 return 0;
1116free_snd_hash:
1117 ahash_request_free(queue->snd_hash);
1118free_tfm:
1119 crypto_free_ahash(tfm);
1120 return -ENOMEM;
1121}
1122
1123static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1124{
1125 struct nvme_tcp_request *async = &ctrl->async_req;
1126
1127 page_frag_free(async->pdu);
1128}
1129
1130static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1131{
1132 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1133 struct nvme_tcp_request *async = &ctrl->async_req;
1134 u8 hdgst = nvme_tcp_hdgst_len(queue);
1135
1136 async->pdu = page_frag_alloc(&queue->pf_cache,
1137 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1138 GFP_KERNEL | __GFP_ZERO);
1139 if (!async->pdu)
1140 return -ENOMEM;
1141
1142 async->queue = &ctrl->queues[0];
1143 return 0;
1144}
1145
1146static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1147{
1148 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1149 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1150
1151 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1152 return;
1153
1154 if (queue->hdr_digest || queue->data_digest)
1155 nvme_tcp_free_crypto(queue);
1156
1157 sock_release(queue->sock);
1158 kfree(queue->pdu);
1159}
1160
1161static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1162{
1163 struct nvme_tcp_icreq_pdu *icreq;
1164 struct nvme_tcp_icresp_pdu *icresp;
1165 struct msghdr msg = {};
1166 struct kvec iov;
1167 bool ctrl_hdgst, ctrl_ddgst;
1168 int ret;
1169
1170 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1171 if (!icreq)
1172 return -ENOMEM;
1173
1174 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1175 if (!icresp) {
1176 ret = -ENOMEM;
1177 goto free_icreq;
1178 }
1179
1180 icreq->hdr.type = nvme_tcp_icreq;
1181 icreq->hdr.hlen = sizeof(*icreq);
1182 icreq->hdr.pdo = 0;
1183 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1184 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1185 icreq->maxr2t = 0; /* single inflight r2t supported */
1186 icreq->hpda = 0; /* no alignment constraint */
1187 if (queue->hdr_digest)
1188 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1189 if (queue->data_digest)
1190 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1191
1192 iov.iov_base = icreq;
1193 iov.iov_len = sizeof(*icreq);
1194 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1195 if (ret < 0)
1196 goto free_icresp;
1197
1198 memset(&msg, 0, sizeof(msg));
1199 iov.iov_base = icresp;
1200 iov.iov_len = sizeof(*icresp);
1201 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1202 iov.iov_len, msg.msg_flags);
1203 if (ret < 0)
1204 goto free_icresp;
1205
1206 ret = -EINVAL;
1207 if (icresp->hdr.type != nvme_tcp_icresp) {
1208 pr_err("queue %d: bad type returned %d\n",
1209 nvme_tcp_queue_id(queue), icresp->hdr.type);
1210 goto free_icresp;
1211 }
1212
1213 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1214 pr_err("queue %d: bad pdu length returned %d\n",
1215 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1216 goto free_icresp;
1217 }
1218
1219 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1220 pr_err("queue %d: bad pfv returned %d\n",
1221 nvme_tcp_queue_id(queue), icresp->pfv);
1222 goto free_icresp;
1223 }
1224
1225 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1226 if ((queue->data_digest && !ctrl_ddgst) ||
1227 (!queue->data_digest && ctrl_ddgst)) {
1228 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1229 nvme_tcp_queue_id(queue),
1230 queue->data_digest ? "enabled" : "disabled",
1231 ctrl_ddgst ? "enabled" : "disabled");
1232 goto free_icresp;
1233 }
1234
1235 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1236 if ((queue->hdr_digest && !ctrl_hdgst) ||
1237 (!queue->hdr_digest && ctrl_hdgst)) {
1238 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1239 nvme_tcp_queue_id(queue),
1240 queue->hdr_digest ? "enabled" : "disabled",
1241 ctrl_hdgst ? "enabled" : "disabled");
1242 goto free_icresp;
1243 }
1244
1245 if (icresp->cpda != 0) {
1246 pr_err("queue %d: unsupported cpda returned %d\n",
1247 nvme_tcp_queue_id(queue), icresp->cpda);
1248 goto free_icresp;
1249 }
1250
1251 ret = 0;
1252free_icresp:
1253 kfree(icresp);
1254free_icreq:
1255 kfree(icreq);
1256 return ret;
1257}
1258
Sagi Grimberg40510a62020-02-25 15:53:09 -08001259static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1260{
1261 return nvme_tcp_queue_id(queue) == 0;
1262}
1263
1264static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1265{
1266 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1267 int qid = nvme_tcp_queue_id(queue);
1268
1269 return !nvme_tcp_admin_queue(queue) &&
1270 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1271}
1272
1273static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1274{
1275 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1276 int qid = nvme_tcp_queue_id(queue);
1277
1278 return !nvme_tcp_admin_queue(queue) &&
1279 !nvme_tcp_default_queue(queue) &&
1280 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1281 ctrl->io_queues[HCTX_TYPE_READ];
1282}
1283
1284static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1285{
1286 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1287 int qid = nvme_tcp_queue_id(queue);
1288
1289 return !nvme_tcp_admin_queue(queue) &&
1290 !nvme_tcp_default_queue(queue) &&
1291 !nvme_tcp_read_queue(queue) &&
1292 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1293 ctrl->io_queues[HCTX_TYPE_READ] +
1294 ctrl->io_queues[HCTX_TYPE_POLL];
1295}
1296
1297static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1298{
1299 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1300 int qid = nvme_tcp_queue_id(queue);
1301 int n = 0;
1302
1303 if (nvme_tcp_default_queue(queue))
1304 n = qid - 1;
1305 else if (nvme_tcp_read_queue(queue))
1306 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1307 else if (nvme_tcp_poll_queue(queue))
1308 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1309 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1310 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1311}
1312
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001313static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1314 int qid, size_t queue_size)
1315{
1316 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1317 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1318 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
Sagi Grimberg40510a62020-02-25 15:53:09 -08001319 int ret, opt, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001320
1321 queue->ctrl = ctrl;
1322 INIT_LIST_HEAD(&queue->send_list);
1323 spin_lock_init(&queue->lock);
1324 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1325 queue->queue_size = queue_size;
1326
1327 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001328 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001329 else
1330 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1331 NVME_TCP_ADMIN_CCSZ;
1332
1333 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1334 IPPROTO_TCP, &queue->sock);
1335 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001336 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001337 "failed to create socket: %d\n", ret);
1338 return ret;
1339 }
1340
1341 /* Single syn retry */
1342 opt = 1;
1343 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1344 (char *)&opt, sizeof(opt));
1345 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001346 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001347 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1348 goto err_sock;
1349 }
1350
1351 /* Set TCP no delay */
1352 opt = 1;
1353 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1354 TCP_NODELAY, (char *)&opt, sizeof(opt));
1355 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001356 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001357 "failed to set TCP_NODELAY sock opt %d\n", ret);
1358 goto err_sock;
1359 }
1360
1361 /*
1362 * Cleanup whatever is sitting in the TCP transmit queue on socket
1363 * close. This is done to prevent stale data from being sent should
1364 * the network connection be restored before TCP times out.
1365 */
1366 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1367 (char *)&sol, sizeof(sol));
1368 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001369 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001370 "failed to set SO_LINGER sock opt %d\n", ret);
1371 goto err_sock;
1372 }
1373
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001374 if (so_priority > 0) {
1375 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY,
1376 (char *)&so_priority, sizeof(so_priority));
1377 if (ret) {
1378 dev_err(ctrl->ctrl.device,
1379 "failed to set SO_PRIORITY sock opt, ret %d\n",
1380 ret);
1381 goto err_sock;
1382 }
1383 }
1384
Israel Rukshinbb139852019-08-18 12:08:54 +03001385 /* Set socket type of service */
1386 if (nctrl->opts->tos >= 0) {
1387 opt = nctrl->opts->tos;
1388 ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
1389 (char *)&opt, sizeof(opt));
1390 if (ret) {
1391 dev_err(nctrl->device,
1392 "failed to set IP_TOS sock opt %d\n", ret);
1393 goto err_sock;
1394 }
1395 }
1396
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001397 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001398 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001399 queue->request = NULL;
1400 queue->data_remaining = 0;
1401 queue->ddgst_remaining = 0;
1402 queue->pdu_remaining = 0;
1403 queue->pdu_offset = 0;
1404 sk_set_memalloc(queue->sock->sk);
1405
Israel Rukshin9924b032019-08-18 12:08:53 +03001406 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001407 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1408 sizeof(ctrl->src_addr));
1409 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001410 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001411 "failed to bind queue %d socket %d\n",
1412 qid, ret);
1413 goto err_sock;
1414 }
1415 }
1416
1417 queue->hdr_digest = nctrl->opts->hdr_digest;
1418 queue->data_digest = nctrl->opts->data_digest;
1419 if (queue->hdr_digest || queue->data_digest) {
1420 ret = nvme_tcp_alloc_crypto(queue);
1421 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001422 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001423 "failed to allocate queue %d crypto\n", qid);
1424 goto err_sock;
1425 }
1426 }
1427
1428 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1429 nvme_tcp_hdgst_len(queue);
1430 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1431 if (!queue->pdu) {
1432 ret = -ENOMEM;
1433 goto err_crypto;
1434 }
1435
Israel Rukshin9924b032019-08-18 12:08:53 +03001436 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001437 nvme_tcp_queue_id(queue));
1438
1439 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1440 sizeof(ctrl->addr), 0);
1441 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001442 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001443 "failed to connect socket: %d\n", ret);
1444 goto err_rcv_pdu;
1445 }
1446
1447 ret = nvme_tcp_init_connection(queue);
1448 if (ret)
1449 goto err_init_connect;
1450
1451 queue->rd_enabled = true;
1452 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1453 nvme_tcp_init_recv_ctx(queue);
1454
1455 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1456 queue->sock->sk->sk_user_data = queue;
1457 queue->state_change = queue->sock->sk->sk_state_change;
1458 queue->data_ready = queue->sock->sk->sk_data_ready;
1459 queue->write_space = queue->sock->sk->sk_write_space;
1460 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1461 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1462 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001463#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001464 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001465#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001466 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1467
1468 return 0;
1469
1470err_init_connect:
1471 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1472err_rcv_pdu:
1473 kfree(queue->pdu);
1474err_crypto:
1475 if (queue->hdr_digest || queue->data_digest)
1476 nvme_tcp_free_crypto(queue);
1477err_sock:
1478 sock_release(queue->sock);
1479 queue->sock = NULL;
1480 return ret;
1481}
1482
1483static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1484{
1485 struct socket *sock = queue->sock;
1486
1487 write_lock_bh(&sock->sk->sk_callback_lock);
1488 sock->sk->sk_user_data = NULL;
1489 sock->sk->sk_data_ready = queue->data_ready;
1490 sock->sk->sk_state_change = queue->state_change;
1491 sock->sk->sk_write_space = queue->write_space;
1492 write_unlock_bh(&sock->sk->sk_callback_lock);
1493}
1494
1495static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1496{
1497 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1498 nvme_tcp_restore_sock_calls(queue);
1499 cancel_work_sync(&queue->io_work);
1500}
1501
1502static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1503{
1504 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1505 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1506
1507 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1508 return;
1509
1510 __nvme_tcp_stop_queue(queue);
1511}
1512
1513static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1514{
1515 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1516 int ret;
1517
1518 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001519 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001520 else
1521 ret = nvmf_connect_admin_queue(nctrl);
1522
1523 if (!ret) {
1524 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1525 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001526 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1527 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001528 dev_err(nctrl->device,
1529 "failed to connect queue: %d ret=%d\n", idx, ret);
1530 }
1531 return ret;
1532}
1533
1534static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1535 bool admin)
1536{
1537 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1538 struct blk_mq_tag_set *set;
1539 int ret;
1540
1541 if (admin) {
1542 set = &ctrl->admin_tag_set;
1543 memset(set, 0, sizeof(*set));
1544 set->ops = &nvme_tcp_admin_mq_ops;
1545 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1546 set->reserved_tags = 2; /* connect + keep-alive */
1547 set->numa_node = NUMA_NO_NODE;
1548 set->cmd_size = sizeof(struct nvme_tcp_request);
1549 set->driver_data = ctrl;
1550 set->nr_hw_queues = 1;
1551 set->timeout = ADMIN_TIMEOUT;
1552 } else {
1553 set = &ctrl->tag_set;
1554 memset(set, 0, sizeof(*set));
1555 set->ops = &nvme_tcp_mq_ops;
1556 set->queue_depth = nctrl->sqsize + 1;
1557 set->reserved_tags = 1; /* fabric connect */
1558 set->numa_node = NUMA_NO_NODE;
1559 set->flags = BLK_MQ_F_SHOULD_MERGE;
1560 set->cmd_size = sizeof(struct nvme_tcp_request);
1561 set->driver_data = ctrl;
1562 set->nr_hw_queues = nctrl->queue_count - 1;
1563 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001564 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001565 }
1566
1567 ret = blk_mq_alloc_tag_set(set);
1568 if (ret)
1569 return ERR_PTR(ret);
1570
1571 return set;
1572}
1573
1574static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1575{
1576 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1577 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1578 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1579 }
1580
1581 nvme_tcp_free_queue(ctrl, 0);
1582}
1583
1584static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1585{
1586 int i;
1587
1588 for (i = 1; i < ctrl->queue_count; i++)
1589 nvme_tcp_free_queue(ctrl, i);
1590}
1591
1592static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1593{
1594 int i;
1595
1596 for (i = 1; i < ctrl->queue_count; i++)
1597 nvme_tcp_stop_queue(ctrl, i);
1598}
1599
1600static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1601{
1602 int i, ret = 0;
1603
1604 for (i = 1; i < ctrl->queue_count; i++) {
1605 ret = nvme_tcp_start_queue(ctrl, i);
1606 if (ret)
1607 goto out_stop_queues;
1608 }
1609
1610 return 0;
1611
1612out_stop_queues:
1613 for (i--; i >= 1; i--)
1614 nvme_tcp_stop_queue(ctrl, i);
1615 return ret;
1616}
1617
1618static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1619{
1620 int ret;
1621
1622 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1623 if (ret)
1624 return ret;
1625
1626 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1627 if (ret)
1628 goto out_free_queue;
1629
1630 return 0;
1631
1632out_free_queue:
1633 nvme_tcp_free_queue(ctrl, 0);
1634 return ret;
1635}
1636
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001637static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001638{
1639 int i, ret;
1640
1641 for (i = 1; i < ctrl->queue_count; i++) {
1642 ret = nvme_tcp_alloc_queue(ctrl, i,
1643 ctrl->sqsize + 1);
1644 if (ret)
1645 goto out_free_queues;
1646 }
1647
1648 return 0;
1649
1650out_free_queues:
1651 for (i--; i >= 1; i--)
1652 nvme_tcp_free_queue(ctrl, i);
1653
1654 return ret;
1655}
1656
1657static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1658{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001659 unsigned int nr_io_queues;
1660
1661 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1662 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001663 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001664
1665 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001666}
1667
Sagi Grimberg64861992019-05-28 22:49:05 -07001668static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1669 unsigned int nr_io_queues)
1670{
1671 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1672 struct nvmf_ctrl_options *opts = nctrl->opts;
1673
1674 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1675 /*
1676 * separate read/write queues
1677 * hand out dedicated default queues only after we have
1678 * sufficient read queues.
1679 */
1680 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1681 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1682 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1683 min(opts->nr_write_queues, nr_io_queues);
1684 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1685 } else {
1686 /*
1687 * shared read/write queues
1688 * either no write queues were requested, or we don't have
1689 * sufficient queue count to have dedicated default queues.
1690 */
1691 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1692 min(opts->nr_io_queues, nr_io_queues);
1693 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1694 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001695
1696 if (opts->nr_poll_queues && nr_io_queues) {
1697 /* map dedicated poll queues only if we have queues left */
1698 ctrl->io_queues[HCTX_TYPE_POLL] =
1699 min(opts->nr_poll_queues, nr_io_queues);
1700 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001701}
1702
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001703static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001704{
1705 unsigned int nr_io_queues;
1706 int ret;
1707
1708 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1709 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1710 if (ret)
1711 return ret;
1712
1713 ctrl->queue_count = nr_io_queues + 1;
1714 if (ctrl->queue_count < 2)
1715 return 0;
1716
1717 dev_info(ctrl->device,
1718 "creating %d I/O queues.\n", nr_io_queues);
1719
Sagi Grimberg64861992019-05-28 22:49:05 -07001720 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1721
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001722 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001723}
1724
1725static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1726{
1727 nvme_tcp_stop_io_queues(ctrl);
1728 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001729 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001730 blk_mq_free_tag_set(ctrl->tagset);
1731 }
1732 nvme_tcp_free_io_queues(ctrl);
1733}
1734
1735static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1736{
1737 int ret;
1738
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001739 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001740 if (ret)
1741 return ret;
1742
1743 if (new) {
1744 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1745 if (IS_ERR(ctrl->tagset)) {
1746 ret = PTR_ERR(ctrl->tagset);
1747 goto out_free_io_queues;
1748 }
1749
Sagi Grimberge85037a2018-12-31 23:58:30 -08001750 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1751 if (IS_ERR(ctrl->connect_q)) {
1752 ret = PTR_ERR(ctrl->connect_q);
1753 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001754 }
1755 } else {
1756 blk_mq_update_nr_hw_queues(ctrl->tagset,
1757 ctrl->queue_count - 1);
1758 }
1759
1760 ret = nvme_tcp_start_io_queues(ctrl);
1761 if (ret)
1762 goto out_cleanup_connect_q;
1763
1764 return 0;
1765
1766out_cleanup_connect_q:
Sagi Grimberge85037a2018-12-31 23:58:30 -08001767 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001768 blk_cleanup_queue(ctrl->connect_q);
1769out_free_tag_set:
1770 if (new)
1771 blk_mq_free_tag_set(ctrl->tagset);
1772out_free_io_queues:
1773 nvme_tcp_free_io_queues(ctrl);
1774 return ret;
1775}
1776
1777static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1778{
1779 nvme_tcp_stop_queue(ctrl, 0);
1780 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001781 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001782 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001783 blk_mq_free_tag_set(ctrl->admin_tagset);
1784 }
1785 nvme_tcp_free_admin_queue(ctrl);
1786}
1787
1788static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1789{
1790 int error;
1791
1792 error = nvme_tcp_alloc_admin_queue(ctrl);
1793 if (error)
1794 return error;
1795
1796 if (new) {
1797 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1798 if (IS_ERR(ctrl->admin_tagset)) {
1799 error = PTR_ERR(ctrl->admin_tagset);
1800 goto out_free_queue;
1801 }
1802
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001803 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1804 if (IS_ERR(ctrl->fabrics_q)) {
1805 error = PTR_ERR(ctrl->fabrics_q);
1806 goto out_free_tagset;
1807 }
1808
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001809 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1810 if (IS_ERR(ctrl->admin_q)) {
1811 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001812 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001813 }
1814 }
1815
1816 error = nvme_tcp_start_queue(ctrl, 0);
1817 if (error)
1818 goto out_cleanup_queue;
1819
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001820 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001821 if (error)
1822 goto out_stop_queue;
1823
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001824 blk_mq_unquiesce_queue(ctrl->admin_q);
1825
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001826 error = nvme_init_identify(ctrl);
1827 if (error)
1828 goto out_stop_queue;
1829
1830 return 0;
1831
1832out_stop_queue:
1833 nvme_tcp_stop_queue(ctrl, 0);
1834out_cleanup_queue:
1835 if (new)
1836 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001837out_cleanup_fabrics_q:
1838 if (new)
1839 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001840out_free_tagset:
1841 if (new)
1842 blk_mq_free_tag_set(ctrl->admin_tagset);
1843out_free_queue:
1844 nvme_tcp_free_admin_queue(ctrl);
1845 return error;
1846}
1847
1848static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1849 bool remove)
1850{
1851 blk_mq_quiesce_queue(ctrl->admin_q);
1852 nvme_tcp_stop_queue(ctrl, 0);
Ming Lei622b8b62019-07-24 11:48:42 +08001853 if (ctrl->admin_tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001854 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1855 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001856 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1857 }
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001858 if (remove)
1859 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001860 nvme_tcp_destroy_admin_queue(ctrl, remove);
1861}
1862
1863static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1864 bool remove)
1865{
1866 if (ctrl->queue_count <= 1)
1867 return;
1868 nvme_stop_queues(ctrl);
1869 nvme_tcp_stop_io_queues(ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001870 if (ctrl->tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001871 blk_mq_tagset_busy_iter(ctrl->tagset,
1872 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001873 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1874 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001875 if (remove)
1876 nvme_start_queues(ctrl);
1877 nvme_tcp_destroy_io_queues(ctrl, remove);
1878}
1879
1880static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1881{
1882 /* If we are resetting/deleting then do nothing */
1883 if (ctrl->state != NVME_CTRL_CONNECTING) {
1884 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1885 ctrl->state == NVME_CTRL_LIVE);
1886 return;
1887 }
1888
1889 if (nvmf_should_reconnect(ctrl)) {
1890 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1891 ctrl->opts->reconnect_delay);
1892 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1893 ctrl->opts->reconnect_delay * HZ);
1894 } else {
1895 dev_info(ctrl->device, "Removing controller...\n");
1896 nvme_delete_ctrl(ctrl);
1897 }
1898}
1899
1900static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1901{
1902 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001903 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001904
1905 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1906 if (ret)
1907 return ret;
1908
1909 if (ctrl->icdoff) {
1910 dev_err(ctrl->device, "icdoff is not supported!\n");
1911 goto destroy_admin;
1912 }
1913
1914 if (opts->queue_size > ctrl->sqsize + 1)
1915 dev_warn(ctrl->device,
1916 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1917 opts->queue_size, ctrl->sqsize + 1);
1918
1919 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1920 dev_warn(ctrl->device,
1921 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1922 ctrl->sqsize + 1, ctrl->maxcmd);
1923 ctrl->sqsize = ctrl->maxcmd - 1;
1924 }
1925
1926 if (ctrl->queue_count > 1) {
1927 ret = nvme_tcp_configure_io_queues(ctrl, new);
1928 if (ret)
1929 goto destroy_admin;
1930 }
1931
1932 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001933 /*
1934 * state change failure is ok if we're in DELETING state,
1935 * unless we're during creation of a new controller to
1936 * avoid races with teardown flow.
1937 */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001938 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001939 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001940 ret = -EINVAL;
1941 goto destroy_io;
1942 }
1943
1944 nvme_start_ctrl(ctrl);
1945 return 0;
1946
1947destroy_io:
1948 if (ctrl->queue_count > 1)
1949 nvme_tcp_destroy_io_queues(ctrl, new);
1950destroy_admin:
1951 nvme_tcp_stop_queue(ctrl, 0);
1952 nvme_tcp_destroy_admin_queue(ctrl, new);
1953 return ret;
1954}
1955
1956static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1957{
1958 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1959 struct nvme_tcp_ctrl, connect_work);
1960 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1961
1962 ++ctrl->nr_reconnects;
1963
1964 if (nvme_tcp_setup_ctrl(ctrl, false))
1965 goto requeue;
1966
Colin Ian King56a77d22018-12-14 11:42:43 +00001967 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001968 ctrl->nr_reconnects);
1969
1970 ctrl->nr_reconnects = 0;
1971
1972 return;
1973
1974requeue:
1975 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1976 ctrl->nr_reconnects);
1977 nvme_tcp_reconnect_or_remove(ctrl);
1978}
1979
1980static void nvme_tcp_error_recovery_work(struct work_struct *work)
1981{
1982 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1983 struct nvme_tcp_ctrl, err_work);
1984 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1985
1986 nvme_stop_keep_alive(ctrl);
1987 nvme_tcp_teardown_io_queues(ctrl, false);
1988 /* unquiesce to fail fast pending requests */
1989 nvme_start_queues(ctrl);
1990 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001991 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001992
1993 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1994 /* state change failure is ok if we're in DELETING state */
1995 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1996 return;
1997 }
1998
1999 nvme_tcp_reconnect_or_remove(ctrl);
2000}
2001
2002static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2003{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002004 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2005 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2006
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002007 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002008 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002009 if (shutdown)
2010 nvme_shutdown_ctrl(ctrl);
2011 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002012 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002013 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2014}
2015
2016static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2017{
2018 nvme_tcp_teardown_ctrl(ctrl, true);
2019}
2020
2021static void nvme_reset_ctrl_work(struct work_struct *work)
2022{
2023 struct nvme_ctrl *ctrl =
2024 container_of(work, struct nvme_ctrl, reset_work);
2025
2026 nvme_stop_ctrl(ctrl);
2027 nvme_tcp_teardown_ctrl(ctrl, false);
2028
2029 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2030 /* state change failure is ok if we're in DELETING state */
2031 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2032 return;
2033 }
2034
2035 if (nvme_tcp_setup_ctrl(ctrl, false))
2036 goto out_fail;
2037
2038 return;
2039
2040out_fail:
2041 ++ctrl->nr_reconnects;
2042 nvme_tcp_reconnect_or_remove(ctrl);
2043}
2044
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002045static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2046{
2047 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2048
2049 if (list_empty(&ctrl->list))
2050 goto free_ctrl;
2051
2052 mutex_lock(&nvme_tcp_ctrl_mutex);
2053 list_del(&ctrl->list);
2054 mutex_unlock(&nvme_tcp_ctrl_mutex);
2055
2056 nvmf_free_options(nctrl->opts);
2057free_ctrl:
2058 kfree(ctrl->queues);
2059 kfree(ctrl);
2060}
2061
2062static void nvme_tcp_set_sg_null(struct nvme_command *c)
2063{
2064 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2065
2066 sg->addr = 0;
2067 sg->length = 0;
2068 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2069 NVME_SGL_FMT_TRANSPORT_A;
2070}
2071
2072static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2073 struct nvme_command *c, u32 data_len)
2074{
2075 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2076
2077 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2078 sg->length = cpu_to_le32(data_len);
2079 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2080}
2081
2082static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2083 u32 data_len)
2084{
2085 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2086
2087 sg->addr = 0;
2088 sg->length = cpu_to_le32(data_len);
2089 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2090 NVME_SGL_FMT_TRANSPORT_A;
2091}
2092
2093static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2094{
2095 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2096 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2097 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2098 struct nvme_command *cmd = &pdu->cmd;
2099 u8 hdgst = nvme_tcp_hdgst_len(queue);
2100
2101 memset(pdu, 0, sizeof(*pdu));
2102 pdu->hdr.type = nvme_tcp_cmd;
2103 if (queue->hdr_digest)
2104 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2105 pdu->hdr.hlen = sizeof(*pdu);
2106 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2107
2108 cmd->common.opcode = nvme_admin_async_event;
2109 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2110 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2111 nvme_tcp_set_sg_null(cmd);
2112
2113 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2114 ctrl->async_req.offset = 0;
2115 ctrl->async_req.curr_bio = NULL;
2116 ctrl->async_req.data_len = 0;
2117
2118 nvme_tcp_queue_request(&ctrl->async_req);
2119}
2120
2121static enum blk_eh_timer_return
2122nvme_tcp_timeout(struct request *rq, bool reserved)
2123{
2124 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2125 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2126 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2127
Keith Busch92b98e82019-09-05 08:09:33 -06002128 /*
2129 * Restart the timer if a controller reset is already scheduled. Any
2130 * timed out commands would be handled before entering the connecting
2131 * state.
2132 */
2133 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2134 return BLK_EH_RESET_TIMER;
2135
Sagi Grimberg39d57752019-01-08 01:01:30 -08002136 dev_warn(ctrl->ctrl.device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002137 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002138 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002139
2140 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002141 /*
2142 * Teardown immediately if controller times out while starting
2143 * or we are already started error recovery. all outstanding
2144 * requests are completed on shutdown, so we return BLK_EH_DONE.
2145 */
2146 flush_work(&ctrl->err_work);
2147 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2148 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002149 return BLK_EH_DONE;
2150 }
2151
Sagi Grimberg39d57752019-01-08 01:01:30 -08002152 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002153 nvme_tcp_error_recovery(&ctrl->ctrl);
2154
2155 return BLK_EH_RESET_TIMER;
2156}
2157
2158static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2159 struct request *rq)
2160{
2161 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2162 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2163 struct nvme_command *c = &pdu->cmd;
2164
2165 c->common.flags |= NVME_CMD_SGL_METABUF;
2166
2167 if (rq_data_dir(rq) == WRITE && req->data_len &&
2168 req->data_len <= nvme_tcp_inline_data_size(queue))
2169 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2170 else
2171 nvme_tcp_set_sg_host_data(c, req->data_len);
2172
2173 return 0;
2174}
2175
2176static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2177 struct request *rq)
2178{
2179 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2180 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2181 struct nvme_tcp_queue *queue = req->queue;
2182 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2183 blk_status_t ret;
2184
2185 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2186 if (ret)
2187 return ret;
2188
2189 req->state = NVME_TCP_SEND_CMD_PDU;
2190 req->offset = 0;
2191 req->data_sent = 0;
2192 req->pdu_len = 0;
2193 req->pdu_sent = 0;
2194 req->data_len = blk_rq_payload_bytes(rq);
2195 req->curr_bio = rq->bio;
2196
2197 if (rq_data_dir(rq) == WRITE &&
2198 req->data_len <= nvme_tcp_inline_data_size(queue))
2199 req->pdu_len = req->data_len;
2200 else if (req->curr_bio)
2201 nvme_tcp_init_iter(req, READ);
2202
2203 pdu->hdr.type = nvme_tcp_cmd;
2204 pdu->hdr.flags = 0;
2205 if (queue->hdr_digest)
2206 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2207 if (queue->data_digest && req->pdu_len) {
2208 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2209 ddgst = nvme_tcp_ddgst_len(queue);
2210 }
2211 pdu->hdr.hlen = sizeof(*pdu);
2212 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2213 pdu->hdr.plen =
2214 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2215
2216 ret = nvme_tcp_map_data(queue, rq);
2217 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002218 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002219 dev_err(queue->ctrl->ctrl.device,
2220 "Failed to map data (%d)\n", ret);
2221 return ret;
2222 }
2223
2224 return 0;
2225}
2226
2227static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2228 const struct blk_mq_queue_data *bd)
2229{
2230 struct nvme_ns *ns = hctx->queue->queuedata;
2231 struct nvme_tcp_queue *queue = hctx->driver_data;
2232 struct request *rq = bd->rq;
2233 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2234 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2235 blk_status_t ret;
2236
2237 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2238 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2239
2240 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2241 if (unlikely(ret))
2242 return ret;
2243
2244 blk_mq_start_request(rq);
2245
2246 nvme_tcp_queue_request(req);
2247
2248 return BLK_STS_OK;
2249}
2250
Sagi Grimberg873946f2018-12-11 23:38:57 -08002251static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2252{
2253 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002254 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002255
Sagi Grimberg64861992019-05-28 22:49:05 -07002256 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002257 /* separate read/write queues */
2258 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002259 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2260 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2261 set->map[HCTX_TYPE_READ].nr_queues =
2262 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002263 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002264 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002265 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002266 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002267 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002268 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2269 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2270 set->map[HCTX_TYPE_READ].nr_queues =
2271 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002272 set->map[HCTX_TYPE_READ].queue_offset = 0;
2273 }
2274 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2275 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002276
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002277 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2278 /* map dedicated poll queues only if we have queues left */
2279 set->map[HCTX_TYPE_POLL].nr_queues =
2280 ctrl->io_queues[HCTX_TYPE_POLL];
2281 set->map[HCTX_TYPE_POLL].queue_offset =
2282 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2283 ctrl->io_queues[HCTX_TYPE_READ];
2284 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2285 }
2286
Sagi Grimberg64861992019-05-28 22:49:05 -07002287 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002288 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002289 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002290 ctrl->io_queues[HCTX_TYPE_READ],
2291 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002292
Sagi Grimberg873946f2018-12-11 23:38:57 -08002293 return 0;
2294}
2295
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002296static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2297{
2298 struct nvme_tcp_queue *queue = hctx->driver_data;
2299 struct sock *sk = queue->sock->sk;
2300
Eric Dumazet3f926af2019-10-23 22:44:51 -07002301 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002302 sk_busy_loop(sk, true);
2303 nvme_tcp_try_recv(queue);
2304 return queue->nr_cqe;
2305}
2306
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002307static struct blk_mq_ops nvme_tcp_mq_ops = {
2308 .queue_rq = nvme_tcp_queue_rq,
2309 .complete = nvme_complete_rq,
2310 .init_request = nvme_tcp_init_request,
2311 .exit_request = nvme_tcp_exit_request,
2312 .init_hctx = nvme_tcp_init_hctx,
2313 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002314 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002315 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002316};
2317
2318static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2319 .queue_rq = nvme_tcp_queue_rq,
2320 .complete = nvme_complete_rq,
2321 .init_request = nvme_tcp_init_request,
2322 .exit_request = nvme_tcp_exit_request,
2323 .init_hctx = nvme_tcp_init_admin_hctx,
2324 .timeout = nvme_tcp_timeout,
2325};
2326
2327static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2328 .name = "tcp",
2329 .module = THIS_MODULE,
2330 .flags = NVME_F_FABRICS,
2331 .reg_read32 = nvmf_reg_read32,
2332 .reg_read64 = nvmf_reg_read64,
2333 .reg_write32 = nvmf_reg_write32,
2334 .free_ctrl = nvme_tcp_free_ctrl,
2335 .submit_async_event = nvme_tcp_submit_async_event,
2336 .delete_ctrl = nvme_tcp_delete_ctrl,
2337 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002338};
2339
2340static bool
2341nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2342{
2343 struct nvme_tcp_ctrl *ctrl;
2344 bool found = false;
2345
2346 mutex_lock(&nvme_tcp_ctrl_mutex);
2347 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2348 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2349 if (found)
2350 break;
2351 }
2352 mutex_unlock(&nvme_tcp_ctrl_mutex);
2353
2354 return found;
2355}
2356
2357static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2358 struct nvmf_ctrl_options *opts)
2359{
2360 struct nvme_tcp_ctrl *ctrl;
2361 int ret;
2362
2363 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2364 if (!ctrl)
2365 return ERR_PTR(-ENOMEM);
2366
2367 INIT_LIST_HEAD(&ctrl->list);
2368 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002369 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2370 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002371 ctrl->ctrl.sqsize = opts->queue_size - 1;
2372 ctrl->ctrl.kato = opts->kato;
2373
2374 INIT_DELAYED_WORK(&ctrl->connect_work,
2375 nvme_tcp_reconnect_ctrl_work);
2376 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2377 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2378
2379 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2380 opts->trsvcid =
2381 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2382 if (!opts->trsvcid) {
2383 ret = -ENOMEM;
2384 goto out_free_ctrl;
2385 }
2386 opts->mask |= NVMF_OPT_TRSVCID;
2387 }
2388
2389 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2390 opts->traddr, opts->trsvcid, &ctrl->addr);
2391 if (ret) {
2392 pr_err("malformed address passed: %s:%s\n",
2393 opts->traddr, opts->trsvcid);
2394 goto out_free_ctrl;
2395 }
2396
2397 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2398 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2399 opts->host_traddr, NULL, &ctrl->src_addr);
2400 if (ret) {
2401 pr_err("malformed src address passed: %s\n",
2402 opts->host_traddr);
2403 goto out_free_ctrl;
2404 }
2405 }
2406
2407 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2408 ret = -EALREADY;
2409 goto out_free_ctrl;
2410 }
2411
Sagi Grimberg873946f2018-12-11 23:38:57 -08002412 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002413 GFP_KERNEL);
2414 if (!ctrl->queues) {
2415 ret = -ENOMEM;
2416 goto out_free_ctrl;
2417 }
2418
2419 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2420 if (ret)
2421 goto out_kfree_queues;
2422
2423 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2424 WARN_ON_ONCE(1);
2425 ret = -EINTR;
2426 goto out_uninit_ctrl;
2427 }
2428
2429 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2430 if (ret)
2431 goto out_uninit_ctrl;
2432
2433 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2434 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2435
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002436 mutex_lock(&nvme_tcp_ctrl_mutex);
2437 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2438 mutex_unlock(&nvme_tcp_ctrl_mutex);
2439
2440 return &ctrl->ctrl;
2441
2442out_uninit_ctrl:
2443 nvme_uninit_ctrl(&ctrl->ctrl);
2444 nvme_put_ctrl(&ctrl->ctrl);
2445 if (ret > 0)
2446 ret = -EIO;
2447 return ERR_PTR(ret);
2448out_kfree_queues:
2449 kfree(ctrl->queues);
2450out_free_ctrl:
2451 kfree(ctrl);
2452 return ERR_PTR(ret);
2453}
2454
2455static struct nvmf_transport_ops nvme_tcp_transport = {
2456 .name = "tcp",
2457 .module = THIS_MODULE,
2458 .required_opts = NVMF_OPT_TRADDR,
2459 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2460 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002461 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002462 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2463 NVMF_OPT_TOS,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002464 .create_ctrl = nvme_tcp_create_ctrl,
2465};
2466
2467static int __init nvme_tcp_init_module(void)
2468{
2469 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2470 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2471 if (!nvme_tcp_wq)
2472 return -ENOMEM;
2473
2474 nvmf_register_transport(&nvme_tcp_transport);
2475 return 0;
2476}
2477
2478static void __exit nvme_tcp_cleanup_module(void)
2479{
2480 struct nvme_tcp_ctrl *ctrl;
2481
2482 nvmf_unregister_transport(&nvme_tcp_transport);
2483
2484 mutex_lock(&nvme_tcp_ctrl_mutex);
2485 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2486 nvme_delete_ctrl(&ctrl->ctrl);
2487 mutex_unlock(&nvme_tcp_ctrl_mutex);
2488 flush_workqueue(nvme_delete_wq);
2489
2490 destroy_workqueue(nvme_tcp_wq);
2491}
2492
2493module_init(nvme_tcp_init_module);
2494module_exit(nvme_tcp_cleanup_module);
2495
2496MODULE_LICENSE("GPL v2");