blob: 4e4a750ecdb9709b3a79dcf3392f9676a1c0eefc [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010049 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080050
51 struct bio *curr_bio;
52 struct iov_iter iter;
53
54 /* send state */
55 size_t offset;
56 size_t data_sent;
57 enum nvme_tcp_send_state state;
58};
59
60enum nvme_tcp_queue_flags {
61 NVME_TCP_Q_ALLOCATED = 0,
62 NVME_TCP_Q_LIVE = 1,
63};
64
65enum nvme_tcp_recv_state {
66 NVME_TCP_RECV_PDU = 0,
67 NVME_TCP_RECV_DATA,
68 NVME_TCP_RECV_DDGST,
69};
70
71struct nvme_tcp_ctrl;
72struct nvme_tcp_queue {
73 struct socket *sock;
74 struct work_struct io_work;
75 int io_cpu;
76
77 spinlock_t lock;
78 struct list_head send_list;
79
80 /* recv state */
81 void *pdu;
82 int pdu_remaining;
83 int pdu_offset;
84 size_t data_remaining;
85 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070086 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080087
88 /* send state */
89 struct nvme_tcp_request *request;
90
91 int queue_size;
92 size_t cmnd_capsule_len;
93 struct nvme_tcp_ctrl *ctrl;
94 unsigned long flags;
95 bool rd_enabled;
96
97 bool hdr_digest;
98 bool data_digest;
99 struct ahash_request *rcv_hash;
100 struct ahash_request *snd_hash;
101 __le32 exp_ddgst;
102 __le32 recv_ddgst;
103
104 struct page_frag_cache pf_cache;
105
106 void (*state_change)(struct sock *);
107 void (*data_ready)(struct sock *);
108 void (*write_space)(struct sock *);
109};
110
111struct nvme_tcp_ctrl {
112 /* read only in the hot path */
113 struct nvme_tcp_queue *queues;
114 struct blk_mq_tag_set tag_set;
115
116 /* other member variables */
117 struct list_head list;
118 struct blk_mq_tag_set admin_tag_set;
119 struct sockaddr_storage addr;
120 struct sockaddr_storage src_addr;
121 struct nvme_ctrl ctrl;
122
123 struct work_struct err_work;
124 struct delayed_work connect_work;
125 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700126 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800127};
128
129static LIST_HEAD(nvme_tcp_ctrl_list);
130static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
131static struct workqueue_struct *nvme_tcp_wq;
132static struct blk_mq_ops nvme_tcp_mq_ops;
133static struct blk_mq_ops nvme_tcp_admin_mq_ops;
134
135static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
136{
137 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
138}
139
140static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
141{
142 return queue - queue->ctrl->queues;
143}
144
145static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
146{
147 u32 queue_idx = nvme_tcp_queue_id(queue);
148
149 if (queue_idx == 0)
150 return queue->ctrl->admin_tag_set.tags[queue_idx];
151 return queue->ctrl->tag_set.tags[queue_idx - 1];
152}
153
154static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
155{
156 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
157}
158
159static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
160{
161 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
162}
163
164static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
165{
166 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
167}
168
169static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
170{
171 return req == &req->queue->ctrl->async_req;
172}
173
174static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
175{
176 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800177
178 if (unlikely(nvme_tcp_async_req(req)))
179 return false; /* async events don't have a request */
180
181 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800182
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700183 return rq_data_dir(rq) == WRITE && req->data_len &&
184 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800185}
186
187static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
188{
189 return req->iter.bvec->bv_page;
190}
191
192static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
193{
194 return req->iter.bvec->bv_offset + req->iter.iov_offset;
195}
196
197static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
198{
199 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
200 req->pdu_len - req->pdu_sent);
201}
202
203static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
204{
205 return req->iter.iov_offset;
206}
207
208static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
209{
210 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
211 req->pdu_len - req->pdu_sent : 0;
212}
213
214static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
215 int len)
216{
217 return nvme_tcp_pdu_data_left(req) <= len;
218}
219
220static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
221 unsigned int dir)
222{
223 struct request *rq = blk_mq_rq_from_pdu(req);
224 struct bio_vec *vec;
225 unsigned int size;
226 int nsegs;
227 size_t offset;
228
229 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
230 vec = &rq->special_vec;
231 nsegs = 1;
232 size = blk_rq_payload_bytes(rq);
233 offset = 0;
234 } else {
235 struct bio *bio = req->curr_bio;
236
237 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
238 nsegs = bio_segments(bio);
239 size = bio->bi_iter.bi_size;
240 offset = bio->bi_iter.bi_bvec_done;
241 }
242
243 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
244 req->iter.iov_offset = offset;
245}
246
247static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
248 int len)
249{
250 req->data_sent += len;
251 req->pdu_sent += len;
252 iov_iter_advance(&req->iter, len);
253 if (!iov_iter_count(&req->iter) &&
254 req->data_sent < req->data_len) {
255 req->curr_bio = req->curr_bio->bi_next;
256 nvme_tcp_init_iter(req, WRITE);
257 }
258}
259
260static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
261{
262 struct nvme_tcp_queue *queue = req->queue;
263
264 spin_lock(&queue->lock);
265 list_add_tail(&req->entry, &queue->send_list);
266 spin_unlock(&queue->lock);
267
268 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
269}
270
271static inline struct nvme_tcp_request *
272nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
273{
274 struct nvme_tcp_request *req;
275
276 spin_lock(&queue->lock);
277 req = list_first_entry_or_null(&queue->send_list,
278 struct nvme_tcp_request, entry);
279 if (req)
280 list_del(&req->entry);
281 spin_unlock(&queue->lock);
282
283 return req;
284}
285
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100286static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
287 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800288{
289 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
290 crypto_ahash_final(hash);
291}
292
293static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
294 struct page *page, off_t off, size_t len)
295{
296 struct scatterlist sg;
297
298 sg_init_marker(&sg, 1);
299 sg_set_page(&sg, page, len, off);
300 ahash_request_set_crypt(hash, &sg, NULL, len);
301 crypto_ahash_update(hash);
302}
303
304static inline void nvme_tcp_hdgst(struct ahash_request *hash,
305 void *pdu, size_t len)
306{
307 struct scatterlist sg;
308
309 sg_init_one(&sg, pdu, len);
310 ahash_request_set_crypt(hash, &sg, pdu + len, len);
311 crypto_ahash_digest(hash);
312}
313
314static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
315 void *pdu, size_t pdu_len)
316{
317 struct nvme_tcp_hdr *hdr = pdu;
318 __le32 recv_digest;
319 __le32 exp_digest;
320
321 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
322 dev_err(queue->ctrl->ctrl.device,
323 "queue %d: header digest flag is cleared\n",
324 nvme_tcp_queue_id(queue));
325 return -EPROTO;
326 }
327
328 recv_digest = *(__le32 *)(pdu + hdr->hlen);
329 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
330 exp_digest = *(__le32 *)(pdu + hdr->hlen);
331 if (recv_digest != exp_digest) {
332 dev_err(queue->ctrl->ctrl.device,
333 "header digest error: recv %#x expected %#x\n",
334 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
335 return -EIO;
336 }
337
338 return 0;
339}
340
341static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
342{
343 struct nvme_tcp_hdr *hdr = pdu;
344 u8 digest_len = nvme_tcp_hdgst_len(queue);
345 u32 len;
346
347 len = le32_to_cpu(hdr->plen) - hdr->hlen -
348 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
349
350 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
351 dev_err(queue->ctrl->ctrl.device,
352 "queue %d: data digest flag is cleared\n",
353 nvme_tcp_queue_id(queue));
354 return -EPROTO;
355 }
356 crypto_ahash_init(queue->rcv_hash);
357
358 return 0;
359}
360
361static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
362 struct request *rq, unsigned int hctx_idx)
363{
364 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
365
366 page_frag_free(req->pdu);
367}
368
369static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
370 struct request *rq, unsigned int hctx_idx,
371 unsigned int numa_node)
372{
373 struct nvme_tcp_ctrl *ctrl = set->driver_data;
374 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
375 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
376 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
377 u8 hdgst = nvme_tcp_hdgst_len(queue);
378
379 req->pdu = page_frag_alloc(&queue->pf_cache,
380 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
381 GFP_KERNEL | __GFP_ZERO);
382 if (!req->pdu)
383 return -ENOMEM;
384
385 req->queue = queue;
386 nvme_req(rq)->ctrl = &ctrl->ctrl;
387
388 return 0;
389}
390
391static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
392 unsigned int hctx_idx)
393{
394 struct nvme_tcp_ctrl *ctrl = data;
395 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
396
397 hctx->driver_data = queue;
398 return 0;
399}
400
401static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
402 unsigned int hctx_idx)
403{
404 struct nvme_tcp_ctrl *ctrl = data;
405 struct nvme_tcp_queue *queue = &ctrl->queues[0];
406
407 hctx->driver_data = queue;
408 return 0;
409}
410
411static enum nvme_tcp_recv_state
412nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
413{
414 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
415 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
416 NVME_TCP_RECV_DATA;
417}
418
419static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
420{
421 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
422 nvme_tcp_hdgst_len(queue);
423 queue->pdu_offset = 0;
424 queue->data_remaining = -1;
425 queue->ddgst_remaining = 0;
426}
427
428static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
429{
430 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
431 return;
432
Nigel Kirkland97b25122020-02-10 16:01:45 -0800433 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800434}
435
436static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
437 struct nvme_completion *cqe)
438{
439 struct request *rq;
440
441 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
442 if (!rq) {
443 dev_err(queue->ctrl->ctrl.device,
444 "queue %d tag 0x%x not found\n",
445 nvme_tcp_queue_id(queue), cqe->command_id);
446 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
447 return -EINVAL;
448 }
449
450 nvme_end_request(rq, cqe->status, cqe->result);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700451 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800452
453 return 0;
454}
455
456static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
457 struct nvme_tcp_data_pdu *pdu)
458{
459 struct request *rq;
460
461 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
462 if (!rq) {
463 dev_err(queue->ctrl->ctrl.device,
464 "queue %d tag %#x not found\n",
465 nvme_tcp_queue_id(queue), pdu->command_id);
466 return -ENOENT;
467 }
468
469 if (!blk_rq_payload_bytes(rq)) {
470 dev_err(queue->ctrl->ctrl.device,
471 "queue %d tag %#x unexpected data\n",
472 nvme_tcp_queue_id(queue), rq->tag);
473 return -EIO;
474 }
475
476 queue->data_remaining = le32_to_cpu(pdu->data_length);
477
Sagi Grimberg602d6742019-03-13 18:55:10 +0100478 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
479 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
480 dev_err(queue->ctrl->ctrl.device,
481 "queue %d tag %#x SUCCESS set but not last PDU\n",
482 nvme_tcp_queue_id(queue), rq->tag);
483 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
484 return -EPROTO;
485 }
486
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800487 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800488}
489
490static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
491 struct nvme_tcp_rsp_pdu *pdu)
492{
493 struct nvme_completion *cqe = &pdu->cqe;
494 int ret = 0;
495
496 /*
497 * AEN requests are special as they don't time out and can
498 * survive any kind of queue freeze and often don't respond to
499 * aborts. We don't even bother to allocate a struct request
500 * for them but rather special case them here.
501 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300502 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
503 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800504 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
505 &cqe->result);
506 else
507 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
508
509 return ret;
510}
511
512static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
513 struct nvme_tcp_r2t_pdu *pdu)
514{
515 struct nvme_tcp_data_pdu *data = req->pdu;
516 struct nvme_tcp_queue *queue = req->queue;
517 struct request *rq = blk_mq_rq_from_pdu(req);
518 u8 hdgst = nvme_tcp_hdgst_len(queue);
519 u8 ddgst = nvme_tcp_ddgst_len(queue);
520
521 req->pdu_len = le32_to_cpu(pdu->r2t_length);
522 req->pdu_sent = 0;
523
524 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
525 dev_err(queue->ctrl->ctrl.device,
526 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
527 rq->tag, req->pdu_len, req->data_len,
528 req->data_sent);
529 return -EPROTO;
530 }
531
532 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
533 dev_err(queue->ctrl->ctrl.device,
534 "req %d unexpected r2t offset %u (expected %zu)\n",
535 rq->tag, le32_to_cpu(pdu->r2t_offset),
536 req->data_sent);
537 return -EPROTO;
538 }
539
540 memset(data, 0, sizeof(*data));
541 data->hdr.type = nvme_tcp_h2c_data;
542 data->hdr.flags = NVME_TCP_F_DATA_LAST;
543 if (queue->hdr_digest)
544 data->hdr.flags |= NVME_TCP_F_HDGST;
545 if (queue->data_digest)
546 data->hdr.flags |= NVME_TCP_F_DDGST;
547 data->hdr.hlen = sizeof(*data);
548 data->hdr.pdo = data->hdr.hlen + hdgst;
549 data->hdr.plen =
550 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
551 data->ttag = pdu->ttag;
552 data->command_id = rq->tag;
553 data->data_offset = cpu_to_le32(req->data_sent);
554 data->data_length = cpu_to_le32(req->pdu_len);
555 return 0;
556}
557
558static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
559 struct nvme_tcp_r2t_pdu *pdu)
560{
561 struct nvme_tcp_request *req;
562 struct request *rq;
563 int ret;
564
565 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
566 if (!rq) {
567 dev_err(queue->ctrl->ctrl.device,
568 "queue %d tag %#x not found\n",
569 nvme_tcp_queue_id(queue), pdu->command_id);
570 return -ENOENT;
571 }
572 req = blk_mq_rq_to_pdu(rq);
573
574 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
575 if (unlikely(ret))
576 return ret;
577
578 req->state = NVME_TCP_SEND_H2C_PDU;
579 req->offset = 0;
580
581 nvme_tcp_queue_request(req);
582
583 return 0;
584}
585
586static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
587 unsigned int *offset, size_t *len)
588{
589 struct nvme_tcp_hdr *hdr;
590 char *pdu = queue->pdu;
591 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
592 int ret;
593
594 ret = skb_copy_bits(skb, *offset,
595 &pdu[queue->pdu_offset], rcv_len);
596 if (unlikely(ret))
597 return ret;
598
599 queue->pdu_remaining -= rcv_len;
600 queue->pdu_offset += rcv_len;
601 *offset += rcv_len;
602 *len -= rcv_len;
603 if (queue->pdu_remaining)
604 return 0;
605
606 hdr = queue->pdu;
607 if (queue->hdr_digest) {
608 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
609 if (unlikely(ret))
610 return ret;
611 }
612
613
614 if (queue->data_digest) {
615 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
616 if (unlikely(ret))
617 return ret;
618 }
619
620 switch (hdr->type) {
621 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700622 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800623 case nvme_tcp_rsp:
624 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700625 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800626 case nvme_tcp_r2t:
627 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700628 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800629 default:
630 dev_err(queue->ctrl->ctrl.device,
631 "unsupported pdu type (%d)\n", hdr->type);
632 return -EINVAL;
633 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800634}
635
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100636static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100637{
638 union nvme_result res = {};
639
640 nvme_end_request(rq, cpu_to_le16(status << 1), res);
641}
642
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800643static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
644 unsigned int *offset, size_t *len)
645{
646 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
647 struct nvme_tcp_request *req;
648 struct request *rq;
649
650 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
651 if (!rq) {
652 dev_err(queue->ctrl->ctrl.device,
653 "queue %d tag %#x not found\n",
654 nvme_tcp_queue_id(queue), pdu->command_id);
655 return -ENOENT;
656 }
657 req = blk_mq_rq_to_pdu(rq);
658
659 while (true) {
660 int recv_len, ret;
661
662 recv_len = min_t(size_t, *len, queue->data_remaining);
663 if (!recv_len)
664 break;
665
666 if (!iov_iter_count(&req->iter)) {
667 req->curr_bio = req->curr_bio->bi_next;
668
669 /*
670 * If we don`t have any bios it means that controller
671 * sent more data than we requested, hence error
672 */
673 if (!req->curr_bio) {
674 dev_err(queue->ctrl->ctrl.device,
675 "queue %d no space in request %#x",
676 nvme_tcp_queue_id(queue), rq->tag);
677 nvme_tcp_init_recv_ctx(queue);
678 return -EIO;
679 }
680 nvme_tcp_init_iter(req, READ);
681 }
682
683 /* we can read only from what is left in this bio */
684 recv_len = min_t(size_t, recv_len,
685 iov_iter_count(&req->iter));
686
687 if (queue->data_digest)
688 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
689 &req->iter, recv_len, queue->rcv_hash);
690 else
691 ret = skb_copy_datagram_iter(skb, *offset,
692 &req->iter, recv_len);
693 if (ret) {
694 dev_err(queue->ctrl->ctrl.device,
695 "queue %d failed to copy request %#x data",
696 nvme_tcp_queue_id(queue), rq->tag);
697 return ret;
698 }
699
700 *len -= recv_len;
701 *offset += recv_len;
702 queue->data_remaining -= recv_len;
703 }
704
705 if (!queue->data_remaining) {
706 if (queue->data_digest) {
707 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
708 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
709 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700710 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100711 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700712 queue->nr_cqe++;
713 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800714 nvme_tcp_init_recv_ctx(queue);
715 }
716 }
717
718 return 0;
719}
720
721static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
722 struct sk_buff *skb, unsigned int *offset, size_t *len)
723{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100724 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800725 char *ddgst = (char *)&queue->recv_ddgst;
726 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
727 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
728 int ret;
729
730 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
731 if (unlikely(ret))
732 return ret;
733
734 queue->ddgst_remaining -= recv_len;
735 *offset += recv_len;
736 *len -= recv_len;
737 if (queue->ddgst_remaining)
738 return 0;
739
740 if (queue->recv_ddgst != queue->exp_ddgst) {
741 dev_err(queue->ctrl->ctrl.device,
742 "data digest error: recv %#x expected %#x\n",
743 le32_to_cpu(queue->recv_ddgst),
744 le32_to_cpu(queue->exp_ddgst));
745 return -EIO;
746 }
747
Sagi Grimberg602d6742019-03-13 18:55:10 +0100748 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
749 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
750 pdu->command_id);
751
752 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700753 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100754 }
755
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800756 nvme_tcp_init_recv_ctx(queue);
757 return 0;
758}
759
760static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
761 unsigned int offset, size_t len)
762{
763 struct nvme_tcp_queue *queue = desc->arg.data;
764 size_t consumed = len;
765 int result;
766
767 while (len) {
768 switch (nvme_tcp_recv_state(queue)) {
769 case NVME_TCP_RECV_PDU:
770 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
771 break;
772 case NVME_TCP_RECV_DATA:
773 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
774 break;
775 case NVME_TCP_RECV_DDGST:
776 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
777 break;
778 default:
779 result = -EFAULT;
780 }
781 if (result) {
782 dev_err(queue->ctrl->ctrl.device,
783 "receive failed: %d\n", result);
784 queue->rd_enabled = false;
785 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
786 return result;
787 }
788 }
789
790 return consumed;
791}
792
793static void nvme_tcp_data_ready(struct sock *sk)
794{
795 struct nvme_tcp_queue *queue;
796
797 read_lock(&sk->sk_callback_lock);
798 queue = sk->sk_user_data;
799 if (likely(queue && queue->rd_enabled))
800 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
801 read_unlock(&sk->sk_callback_lock);
802}
803
804static void nvme_tcp_write_space(struct sock *sk)
805{
806 struct nvme_tcp_queue *queue;
807
808 read_lock_bh(&sk->sk_callback_lock);
809 queue = sk->sk_user_data;
810 if (likely(queue && sk_stream_is_writeable(sk))) {
811 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
812 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
813 }
814 read_unlock_bh(&sk->sk_callback_lock);
815}
816
817static void nvme_tcp_state_change(struct sock *sk)
818{
819 struct nvme_tcp_queue *queue;
820
821 read_lock(&sk->sk_callback_lock);
822 queue = sk->sk_user_data;
823 if (!queue)
824 goto done;
825
826 switch (sk->sk_state) {
827 case TCP_CLOSE:
828 case TCP_CLOSE_WAIT:
829 case TCP_LAST_ACK:
830 case TCP_FIN_WAIT1:
831 case TCP_FIN_WAIT2:
832 /* fallthrough */
833 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
834 break;
835 default:
836 dev_info(queue->ctrl->ctrl.device,
837 "queue %d socket state %d\n",
838 nvme_tcp_queue_id(queue), sk->sk_state);
839 }
840
841 queue->state_change(sk);
842done:
843 read_unlock(&sk->sk_callback_lock);
844}
845
846static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
847{
848 queue->request = NULL;
849}
850
851static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
852{
Sagi Grimberg16686012019-08-02 18:17:52 -0700853 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800854}
855
856static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
857{
858 struct nvme_tcp_queue *queue = req->queue;
859
860 while (true) {
861 struct page *page = nvme_tcp_req_cur_page(req);
862 size_t offset = nvme_tcp_req_cur_offset(req);
863 size_t len = nvme_tcp_req_cur_length(req);
864 bool last = nvme_tcp_pdu_last_send(req, len);
865 int ret, flags = MSG_DONTWAIT;
866
867 if (last && !queue->data_digest)
868 flags |= MSG_EOR;
869 else
870 flags |= MSG_MORE;
871
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200872 /* can't zcopy slab pages */
873 if (unlikely(PageSlab(page))) {
874 ret = sock_no_sendpage(queue->sock, page, offset, len,
875 flags);
876 } else {
877 ret = kernel_sendpage(queue->sock, page, offset, len,
878 flags);
879 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800880 if (ret <= 0)
881 return ret;
882
883 nvme_tcp_advance_req(req, ret);
884 if (queue->data_digest)
885 nvme_tcp_ddgst_update(queue->snd_hash, page,
886 offset, ret);
887
888 /* fully successful last write*/
889 if (last && ret == len) {
890 if (queue->data_digest) {
891 nvme_tcp_ddgst_final(queue->snd_hash,
892 &req->ddgst);
893 req->state = NVME_TCP_SEND_DDGST;
894 req->offset = 0;
895 } else {
896 nvme_tcp_done_send_req(queue);
897 }
898 return 1;
899 }
900 }
901 return -EAGAIN;
902}
903
904static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
905{
906 struct nvme_tcp_queue *queue = req->queue;
907 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
908 bool inline_data = nvme_tcp_has_inline_data(req);
909 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
910 u8 hdgst = nvme_tcp_hdgst_len(queue);
911 int len = sizeof(*pdu) + hdgst - req->offset;
912 int ret;
913
914 if (queue->hdr_digest && !req->offset)
915 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
916
917 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
918 offset_in_page(pdu) + req->offset, len, flags);
919 if (unlikely(ret <= 0))
920 return ret;
921
922 len -= ret;
923 if (!len) {
924 if (inline_data) {
925 req->state = NVME_TCP_SEND_DATA;
926 if (queue->data_digest)
927 crypto_ahash_init(queue->snd_hash);
928 nvme_tcp_init_iter(req, WRITE);
929 } else {
930 nvme_tcp_done_send_req(queue);
931 }
932 return 1;
933 }
934 req->offset += ret;
935
936 return -EAGAIN;
937}
938
939static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
940{
941 struct nvme_tcp_queue *queue = req->queue;
942 struct nvme_tcp_data_pdu *pdu = req->pdu;
943 u8 hdgst = nvme_tcp_hdgst_len(queue);
944 int len = sizeof(*pdu) - req->offset + hdgst;
945 int ret;
946
947 if (queue->hdr_digest && !req->offset)
948 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
949
950 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
951 offset_in_page(pdu) + req->offset, len,
952 MSG_DONTWAIT | MSG_MORE);
953 if (unlikely(ret <= 0))
954 return ret;
955
956 len -= ret;
957 if (!len) {
958 req->state = NVME_TCP_SEND_DATA;
959 if (queue->data_digest)
960 crypto_ahash_init(queue->snd_hash);
961 if (!req->data_sent)
962 nvme_tcp_init_iter(req, WRITE);
963 return 1;
964 }
965 req->offset += ret;
966
967 return -EAGAIN;
968}
969
970static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
971{
972 struct nvme_tcp_queue *queue = req->queue;
973 int ret;
974 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
975 struct kvec iov = {
976 .iov_base = &req->ddgst + req->offset,
977 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
978 };
979
980 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
981 if (unlikely(ret <= 0))
982 return ret;
983
984 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
985 nvme_tcp_done_send_req(queue);
986 return 1;
987 }
988
989 req->offset += ret;
990 return -EAGAIN;
991}
992
993static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
994{
995 struct nvme_tcp_request *req;
996 int ret = 1;
997
998 if (!queue->request) {
999 queue->request = nvme_tcp_fetch_request(queue);
1000 if (!queue->request)
1001 return 0;
1002 }
1003 req = queue->request;
1004
1005 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1006 ret = nvme_tcp_try_send_cmd_pdu(req);
1007 if (ret <= 0)
1008 goto done;
1009 if (!nvme_tcp_has_inline_data(req))
1010 return ret;
1011 }
1012
1013 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1014 ret = nvme_tcp_try_send_data_pdu(req);
1015 if (ret <= 0)
1016 goto done;
1017 }
1018
1019 if (req->state == NVME_TCP_SEND_DATA) {
1020 ret = nvme_tcp_try_send_data(req);
1021 if (ret <= 0)
1022 goto done;
1023 }
1024
1025 if (req->state == NVME_TCP_SEND_DDGST)
1026 ret = nvme_tcp_try_send_ddgst(req);
1027done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001028 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001029 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001030 } else if (ret < 0) {
1031 dev_err(queue->ctrl->ctrl.device,
1032 "failed to send request %d\n", ret);
1033 if (ret != -EPIPE && ret != -ECONNRESET)
1034 nvme_tcp_fail_request(queue->request);
1035 nvme_tcp_done_send_req(queue);
1036 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001037 return ret;
1038}
1039
1040static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1041{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301042 struct socket *sock = queue->sock;
1043 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001044 read_descriptor_t rd_desc;
1045 int consumed;
1046
1047 rd_desc.arg.data = queue;
1048 rd_desc.count = 1;
1049 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001050 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301051 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001052 release_sock(sk);
1053 return consumed;
1054}
1055
1056static void nvme_tcp_io_work(struct work_struct *w)
1057{
1058 struct nvme_tcp_queue *queue =
1059 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001060 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001061
1062 do {
1063 bool pending = false;
1064 int result;
1065
1066 result = nvme_tcp_try_send(queue);
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001067 if (result > 0)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001068 pending = true;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001069 else if (unlikely(result < 0))
1070 break;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001071
1072 result = nvme_tcp_try_recv(queue);
1073 if (result > 0)
1074 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001075 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001076 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001077
1078 if (!pending)
1079 return;
1080
Wunderlich, Markddef2952019-09-18 23:36:37 +00001081 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001082
1083 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1084}
1085
1086static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1087{
1088 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1089
1090 ahash_request_free(queue->rcv_hash);
1091 ahash_request_free(queue->snd_hash);
1092 crypto_free_ahash(tfm);
1093}
1094
1095static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1096{
1097 struct crypto_ahash *tfm;
1098
1099 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1100 if (IS_ERR(tfm))
1101 return PTR_ERR(tfm);
1102
1103 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1104 if (!queue->snd_hash)
1105 goto free_tfm;
1106 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1107
1108 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1109 if (!queue->rcv_hash)
1110 goto free_snd_hash;
1111 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1112
1113 return 0;
1114free_snd_hash:
1115 ahash_request_free(queue->snd_hash);
1116free_tfm:
1117 crypto_free_ahash(tfm);
1118 return -ENOMEM;
1119}
1120
1121static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1122{
1123 struct nvme_tcp_request *async = &ctrl->async_req;
1124
1125 page_frag_free(async->pdu);
1126}
1127
1128static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1129{
1130 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1131 struct nvme_tcp_request *async = &ctrl->async_req;
1132 u8 hdgst = nvme_tcp_hdgst_len(queue);
1133
1134 async->pdu = page_frag_alloc(&queue->pf_cache,
1135 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1136 GFP_KERNEL | __GFP_ZERO);
1137 if (!async->pdu)
1138 return -ENOMEM;
1139
1140 async->queue = &ctrl->queues[0];
1141 return 0;
1142}
1143
1144static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1145{
1146 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1147 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1148
1149 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1150 return;
1151
1152 if (queue->hdr_digest || queue->data_digest)
1153 nvme_tcp_free_crypto(queue);
1154
1155 sock_release(queue->sock);
1156 kfree(queue->pdu);
1157}
1158
1159static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1160{
1161 struct nvme_tcp_icreq_pdu *icreq;
1162 struct nvme_tcp_icresp_pdu *icresp;
1163 struct msghdr msg = {};
1164 struct kvec iov;
1165 bool ctrl_hdgst, ctrl_ddgst;
1166 int ret;
1167
1168 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1169 if (!icreq)
1170 return -ENOMEM;
1171
1172 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1173 if (!icresp) {
1174 ret = -ENOMEM;
1175 goto free_icreq;
1176 }
1177
1178 icreq->hdr.type = nvme_tcp_icreq;
1179 icreq->hdr.hlen = sizeof(*icreq);
1180 icreq->hdr.pdo = 0;
1181 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1182 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1183 icreq->maxr2t = 0; /* single inflight r2t supported */
1184 icreq->hpda = 0; /* no alignment constraint */
1185 if (queue->hdr_digest)
1186 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1187 if (queue->data_digest)
1188 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1189
1190 iov.iov_base = icreq;
1191 iov.iov_len = sizeof(*icreq);
1192 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1193 if (ret < 0)
1194 goto free_icresp;
1195
1196 memset(&msg, 0, sizeof(msg));
1197 iov.iov_base = icresp;
1198 iov.iov_len = sizeof(*icresp);
1199 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1200 iov.iov_len, msg.msg_flags);
1201 if (ret < 0)
1202 goto free_icresp;
1203
1204 ret = -EINVAL;
1205 if (icresp->hdr.type != nvme_tcp_icresp) {
1206 pr_err("queue %d: bad type returned %d\n",
1207 nvme_tcp_queue_id(queue), icresp->hdr.type);
1208 goto free_icresp;
1209 }
1210
1211 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1212 pr_err("queue %d: bad pdu length returned %d\n",
1213 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1214 goto free_icresp;
1215 }
1216
1217 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1218 pr_err("queue %d: bad pfv returned %d\n",
1219 nvme_tcp_queue_id(queue), icresp->pfv);
1220 goto free_icresp;
1221 }
1222
1223 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1224 if ((queue->data_digest && !ctrl_ddgst) ||
1225 (!queue->data_digest && ctrl_ddgst)) {
1226 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1227 nvme_tcp_queue_id(queue),
1228 queue->data_digest ? "enabled" : "disabled",
1229 ctrl_ddgst ? "enabled" : "disabled");
1230 goto free_icresp;
1231 }
1232
1233 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1234 if ((queue->hdr_digest && !ctrl_hdgst) ||
1235 (!queue->hdr_digest && ctrl_hdgst)) {
1236 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1237 nvme_tcp_queue_id(queue),
1238 queue->hdr_digest ? "enabled" : "disabled",
1239 ctrl_hdgst ? "enabled" : "disabled");
1240 goto free_icresp;
1241 }
1242
1243 if (icresp->cpda != 0) {
1244 pr_err("queue %d: unsupported cpda returned %d\n",
1245 nvme_tcp_queue_id(queue), icresp->cpda);
1246 goto free_icresp;
1247 }
1248
1249 ret = 0;
1250free_icresp:
1251 kfree(icresp);
1252free_icreq:
1253 kfree(icreq);
1254 return ret;
1255}
1256
Sagi Grimberg40510a62020-02-25 15:53:09 -08001257static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1258{
1259 return nvme_tcp_queue_id(queue) == 0;
1260}
1261
1262static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1263{
1264 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1265 int qid = nvme_tcp_queue_id(queue);
1266
1267 return !nvme_tcp_admin_queue(queue) &&
1268 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1269}
1270
1271static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1272{
1273 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1274 int qid = nvme_tcp_queue_id(queue);
1275
1276 return !nvme_tcp_admin_queue(queue) &&
1277 !nvme_tcp_default_queue(queue) &&
1278 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1279 ctrl->io_queues[HCTX_TYPE_READ];
1280}
1281
1282static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1283{
1284 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1285 int qid = nvme_tcp_queue_id(queue);
1286
1287 return !nvme_tcp_admin_queue(queue) &&
1288 !nvme_tcp_default_queue(queue) &&
1289 !nvme_tcp_read_queue(queue) &&
1290 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1291 ctrl->io_queues[HCTX_TYPE_READ] +
1292 ctrl->io_queues[HCTX_TYPE_POLL];
1293}
1294
1295static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1296{
1297 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1298 int qid = nvme_tcp_queue_id(queue);
1299 int n = 0;
1300
1301 if (nvme_tcp_default_queue(queue))
1302 n = qid - 1;
1303 else if (nvme_tcp_read_queue(queue))
1304 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1305 else if (nvme_tcp_poll_queue(queue))
1306 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1307 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1308 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1309}
1310
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001311static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1312 int qid, size_t queue_size)
1313{
1314 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1315 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Sagi Grimberg40510a62020-02-25 15:53:09 -08001316 int ret, opt, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001317
1318 queue->ctrl = ctrl;
1319 INIT_LIST_HEAD(&queue->send_list);
1320 spin_lock_init(&queue->lock);
1321 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1322 queue->queue_size = queue_size;
1323
1324 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001325 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001326 else
1327 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1328 NVME_TCP_ADMIN_CCSZ;
1329
1330 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1331 IPPROTO_TCP, &queue->sock);
1332 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001333 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001334 "failed to create socket: %d\n", ret);
1335 return ret;
1336 }
1337
1338 /* Single syn retry */
1339 opt = 1;
1340 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1341 (char *)&opt, sizeof(opt));
1342 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001343 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001344 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1345 goto err_sock;
1346 }
1347
1348 /* Set TCP no delay */
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001349 tcp_sock_set_nodelay(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001350
1351 /*
1352 * Cleanup whatever is sitting in the TCP transmit queue on socket
1353 * close. This is done to prevent stale data from being sent should
1354 * the network connection be restored before TCP times out.
1355 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001356 sock_no_linger(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001357
Christoph Hellwig6e434962020-05-28 07:12:11 +02001358 if (so_priority > 0)
1359 sock_set_priority(queue->sock->sk, so_priority);
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001360
Israel Rukshinbb139852019-08-18 12:08:54 +03001361 /* Set socket type of service */
1362 if (nctrl->opts->tos >= 0) {
1363 opt = nctrl->opts->tos;
1364 ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
1365 (char *)&opt, sizeof(opt));
1366 if (ret) {
1367 dev_err(nctrl->device,
1368 "failed to set IP_TOS sock opt %d\n", ret);
1369 goto err_sock;
1370 }
1371 }
1372
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001373 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001374 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001375 queue->request = NULL;
1376 queue->data_remaining = 0;
1377 queue->ddgst_remaining = 0;
1378 queue->pdu_remaining = 0;
1379 queue->pdu_offset = 0;
1380 sk_set_memalloc(queue->sock->sk);
1381
Israel Rukshin9924b032019-08-18 12:08:53 +03001382 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001383 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1384 sizeof(ctrl->src_addr));
1385 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001386 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001387 "failed to bind queue %d socket %d\n",
1388 qid, ret);
1389 goto err_sock;
1390 }
1391 }
1392
1393 queue->hdr_digest = nctrl->opts->hdr_digest;
1394 queue->data_digest = nctrl->opts->data_digest;
1395 if (queue->hdr_digest || queue->data_digest) {
1396 ret = nvme_tcp_alloc_crypto(queue);
1397 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001398 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001399 "failed to allocate queue %d crypto\n", qid);
1400 goto err_sock;
1401 }
1402 }
1403
1404 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1405 nvme_tcp_hdgst_len(queue);
1406 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1407 if (!queue->pdu) {
1408 ret = -ENOMEM;
1409 goto err_crypto;
1410 }
1411
Israel Rukshin9924b032019-08-18 12:08:53 +03001412 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001413 nvme_tcp_queue_id(queue));
1414
1415 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1416 sizeof(ctrl->addr), 0);
1417 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001418 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001419 "failed to connect socket: %d\n", ret);
1420 goto err_rcv_pdu;
1421 }
1422
1423 ret = nvme_tcp_init_connection(queue);
1424 if (ret)
1425 goto err_init_connect;
1426
1427 queue->rd_enabled = true;
1428 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1429 nvme_tcp_init_recv_ctx(queue);
1430
1431 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1432 queue->sock->sk->sk_user_data = queue;
1433 queue->state_change = queue->sock->sk->sk_state_change;
1434 queue->data_ready = queue->sock->sk->sk_data_ready;
1435 queue->write_space = queue->sock->sk->sk_write_space;
1436 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1437 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1438 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001439#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001440 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001441#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001442 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1443
1444 return 0;
1445
1446err_init_connect:
1447 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1448err_rcv_pdu:
1449 kfree(queue->pdu);
1450err_crypto:
1451 if (queue->hdr_digest || queue->data_digest)
1452 nvme_tcp_free_crypto(queue);
1453err_sock:
1454 sock_release(queue->sock);
1455 queue->sock = NULL;
1456 return ret;
1457}
1458
1459static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1460{
1461 struct socket *sock = queue->sock;
1462
1463 write_lock_bh(&sock->sk->sk_callback_lock);
1464 sock->sk->sk_user_data = NULL;
1465 sock->sk->sk_data_ready = queue->data_ready;
1466 sock->sk->sk_state_change = queue->state_change;
1467 sock->sk->sk_write_space = queue->write_space;
1468 write_unlock_bh(&sock->sk->sk_callback_lock);
1469}
1470
1471static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1472{
1473 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1474 nvme_tcp_restore_sock_calls(queue);
1475 cancel_work_sync(&queue->io_work);
1476}
1477
1478static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1479{
1480 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1481 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1482
1483 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1484 return;
1485
1486 __nvme_tcp_stop_queue(queue);
1487}
1488
1489static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1490{
1491 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1492 int ret;
1493
1494 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001495 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001496 else
1497 ret = nvmf_connect_admin_queue(nctrl);
1498
1499 if (!ret) {
1500 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1501 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001502 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1503 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001504 dev_err(nctrl->device,
1505 "failed to connect queue: %d ret=%d\n", idx, ret);
1506 }
1507 return ret;
1508}
1509
1510static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1511 bool admin)
1512{
1513 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1514 struct blk_mq_tag_set *set;
1515 int ret;
1516
1517 if (admin) {
1518 set = &ctrl->admin_tag_set;
1519 memset(set, 0, sizeof(*set));
1520 set->ops = &nvme_tcp_admin_mq_ops;
1521 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1522 set->reserved_tags = 2; /* connect + keep-alive */
1523 set->numa_node = NUMA_NO_NODE;
1524 set->cmd_size = sizeof(struct nvme_tcp_request);
1525 set->driver_data = ctrl;
1526 set->nr_hw_queues = 1;
1527 set->timeout = ADMIN_TIMEOUT;
1528 } else {
1529 set = &ctrl->tag_set;
1530 memset(set, 0, sizeof(*set));
1531 set->ops = &nvme_tcp_mq_ops;
1532 set->queue_depth = nctrl->sqsize + 1;
1533 set->reserved_tags = 1; /* fabric connect */
1534 set->numa_node = NUMA_NO_NODE;
1535 set->flags = BLK_MQ_F_SHOULD_MERGE;
1536 set->cmd_size = sizeof(struct nvme_tcp_request);
1537 set->driver_data = ctrl;
1538 set->nr_hw_queues = nctrl->queue_count - 1;
1539 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001540 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001541 }
1542
1543 ret = blk_mq_alloc_tag_set(set);
1544 if (ret)
1545 return ERR_PTR(ret);
1546
1547 return set;
1548}
1549
1550static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1551{
1552 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1553 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1554 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1555 }
1556
1557 nvme_tcp_free_queue(ctrl, 0);
1558}
1559
1560static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1561{
1562 int i;
1563
1564 for (i = 1; i < ctrl->queue_count; i++)
1565 nvme_tcp_free_queue(ctrl, i);
1566}
1567
1568static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1569{
1570 int i;
1571
1572 for (i = 1; i < ctrl->queue_count; i++)
1573 nvme_tcp_stop_queue(ctrl, i);
1574}
1575
1576static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1577{
1578 int i, ret = 0;
1579
1580 for (i = 1; i < ctrl->queue_count; i++) {
1581 ret = nvme_tcp_start_queue(ctrl, i);
1582 if (ret)
1583 goto out_stop_queues;
1584 }
1585
1586 return 0;
1587
1588out_stop_queues:
1589 for (i--; i >= 1; i--)
1590 nvme_tcp_stop_queue(ctrl, i);
1591 return ret;
1592}
1593
1594static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1595{
1596 int ret;
1597
1598 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1599 if (ret)
1600 return ret;
1601
1602 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1603 if (ret)
1604 goto out_free_queue;
1605
1606 return 0;
1607
1608out_free_queue:
1609 nvme_tcp_free_queue(ctrl, 0);
1610 return ret;
1611}
1612
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001613static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001614{
1615 int i, ret;
1616
1617 for (i = 1; i < ctrl->queue_count; i++) {
1618 ret = nvme_tcp_alloc_queue(ctrl, i,
1619 ctrl->sqsize + 1);
1620 if (ret)
1621 goto out_free_queues;
1622 }
1623
1624 return 0;
1625
1626out_free_queues:
1627 for (i--; i >= 1; i--)
1628 nvme_tcp_free_queue(ctrl, i);
1629
1630 return ret;
1631}
1632
1633static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1634{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001635 unsigned int nr_io_queues;
1636
1637 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1638 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001639 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001640
1641 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001642}
1643
Sagi Grimberg64861992019-05-28 22:49:05 -07001644static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1645 unsigned int nr_io_queues)
1646{
1647 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1648 struct nvmf_ctrl_options *opts = nctrl->opts;
1649
1650 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1651 /*
1652 * separate read/write queues
1653 * hand out dedicated default queues only after we have
1654 * sufficient read queues.
1655 */
1656 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1657 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1658 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1659 min(opts->nr_write_queues, nr_io_queues);
1660 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1661 } else {
1662 /*
1663 * shared read/write queues
1664 * either no write queues were requested, or we don't have
1665 * sufficient queue count to have dedicated default queues.
1666 */
1667 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1668 min(opts->nr_io_queues, nr_io_queues);
1669 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1670 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001671
1672 if (opts->nr_poll_queues && nr_io_queues) {
1673 /* map dedicated poll queues only if we have queues left */
1674 ctrl->io_queues[HCTX_TYPE_POLL] =
1675 min(opts->nr_poll_queues, nr_io_queues);
1676 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001677}
1678
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001679static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001680{
1681 unsigned int nr_io_queues;
1682 int ret;
1683
1684 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1685 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1686 if (ret)
1687 return ret;
1688
1689 ctrl->queue_count = nr_io_queues + 1;
1690 if (ctrl->queue_count < 2)
1691 return 0;
1692
1693 dev_info(ctrl->device,
1694 "creating %d I/O queues.\n", nr_io_queues);
1695
Sagi Grimberg64861992019-05-28 22:49:05 -07001696 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1697
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001698 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001699}
1700
1701static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1702{
1703 nvme_tcp_stop_io_queues(ctrl);
1704 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001705 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001706 blk_mq_free_tag_set(ctrl->tagset);
1707 }
1708 nvme_tcp_free_io_queues(ctrl);
1709}
1710
1711static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1712{
1713 int ret;
1714
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001715 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001716 if (ret)
1717 return ret;
1718
1719 if (new) {
1720 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1721 if (IS_ERR(ctrl->tagset)) {
1722 ret = PTR_ERR(ctrl->tagset);
1723 goto out_free_io_queues;
1724 }
1725
Sagi Grimberge85037a2018-12-31 23:58:30 -08001726 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1727 if (IS_ERR(ctrl->connect_q)) {
1728 ret = PTR_ERR(ctrl->connect_q);
1729 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001730 }
1731 } else {
1732 blk_mq_update_nr_hw_queues(ctrl->tagset,
1733 ctrl->queue_count - 1);
1734 }
1735
1736 ret = nvme_tcp_start_io_queues(ctrl);
1737 if (ret)
1738 goto out_cleanup_connect_q;
1739
1740 return 0;
1741
1742out_cleanup_connect_q:
Sagi Grimberge85037a2018-12-31 23:58:30 -08001743 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001744 blk_cleanup_queue(ctrl->connect_q);
1745out_free_tag_set:
1746 if (new)
1747 blk_mq_free_tag_set(ctrl->tagset);
1748out_free_io_queues:
1749 nvme_tcp_free_io_queues(ctrl);
1750 return ret;
1751}
1752
1753static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1754{
1755 nvme_tcp_stop_queue(ctrl, 0);
1756 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001757 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001758 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001759 blk_mq_free_tag_set(ctrl->admin_tagset);
1760 }
1761 nvme_tcp_free_admin_queue(ctrl);
1762}
1763
1764static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1765{
1766 int error;
1767
1768 error = nvme_tcp_alloc_admin_queue(ctrl);
1769 if (error)
1770 return error;
1771
1772 if (new) {
1773 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1774 if (IS_ERR(ctrl->admin_tagset)) {
1775 error = PTR_ERR(ctrl->admin_tagset);
1776 goto out_free_queue;
1777 }
1778
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001779 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1780 if (IS_ERR(ctrl->fabrics_q)) {
1781 error = PTR_ERR(ctrl->fabrics_q);
1782 goto out_free_tagset;
1783 }
1784
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001785 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1786 if (IS_ERR(ctrl->admin_q)) {
1787 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001788 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001789 }
1790 }
1791
1792 error = nvme_tcp_start_queue(ctrl, 0);
1793 if (error)
1794 goto out_cleanup_queue;
1795
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001796 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001797 if (error)
1798 goto out_stop_queue;
1799
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001800 blk_mq_unquiesce_queue(ctrl->admin_q);
1801
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001802 error = nvme_init_identify(ctrl);
1803 if (error)
1804 goto out_stop_queue;
1805
1806 return 0;
1807
1808out_stop_queue:
1809 nvme_tcp_stop_queue(ctrl, 0);
1810out_cleanup_queue:
1811 if (new)
1812 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001813out_cleanup_fabrics_q:
1814 if (new)
1815 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001816out_free_tagset:
1817 if (new)
1818 blk_mq_free_tag_set(ctrl->admin_tagset);
1819out_free_queue:
1820 nvme_tcp_free_admin_queue(ctrl);
1821 return error;
1822}
1823
1824static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1825 bool remove)
1826{
1827 blk_mq_quiesce_queue(ctrl->admin_q);
1828 nvme_tcp_stop_queue(ctrl, 0);
Ming Lei622b8b62019-07-24 11:48:42 +08001829 if (ctrl->admin_tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001830 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1831 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001832 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1833 }
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001834 if (remove)
1835 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001836 nvme_tcp_destroy_admin_queue(ctrl, remove);
1837}
1838
1839static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1840 bool remove)
1841{
1842 if (ctrl->queue_count <= 1)
1843 return;
1844 nvme_stop_queues(ctrl);
1845 nvme_tcp_stop_io_queues(ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001846 if (ctrl->tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001847 blk_mq_tagset_busy_iter(ctrl->tagset,
1848 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001849 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1850 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001851 if (remove)
1852 nvme_start_queues(ctrl);
1853 nvme_tcp_destroy_io_queues(ctrl, remove);
1854}
1855
1856static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1857{
1858 /* If we are resetting/deleting then do nothing */
1859 if (ctrl->state != NVME_CTRL_CONNECTING) {
1860 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1861 ctrl->state == NVME_CTRL_LIVE);
1862 return;
1863 }
1864
1865 if (nvmf_should_reconnect(ctrl)) {
1866 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1867 ctrl->opts->reconnect_delay);
1868 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1869 ctrl->opts->reconnect_delay * HZ);
1870 } else {
1871 dev_info(ctrl->device, "Removing controller...\n");
1872 nvme_delete_ctrl(ctrl);
1873 }
1874}
1875
1876static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1877{
1878 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001879 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001880
1881 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1882 if (ret)
1883 return ret;
1884
1885 if (ctrl->icdoff) {
1886 dev_err(ctrl->device, "icdoff is not supported!\n");
1887 goto destroy_admin;
1888 }
1889
1890 if (opts->queue_size > ctrl->sqsize + 1)
1891 dev_warn(ctrl->device,
1892 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1893 opts->queue_size, ctrl->sqsize + 1);
1894
1895 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1896 dev_warn(ctrl->device,
1897 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1898 ctrl->sqsize + 1, ctrl->maxcmd);
1899 ctrl->sqsize = ctrl->maxcmd - 1;
1900 }
1901
1902 if (ctrl->queue_count > 1) {
1903 ret = nvme_tcp_configure_io_queues(ctrl, new);
1904 if (ret)
1905 goto destroy_admin;
1906 }
1907
1908 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001909 /*
1910 * state change failure is ok if we're in DELETING state,
1911 * unless we're during creation of a new controller to
1912 * avoid races with teardown flow.
1913 */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001914 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001915 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001916 ret = -EINVAL;
1917 goto destroy_io;
1918 }
1919
1920 nvme_start_ctrl(ctrl);
1921 return 0;
1922
1923destroy_io:
1924 if (ctrl->queue_count > 1)
1925 nvme_tcp_destroy_io_queues(ctrl, new);
1926destroy_admin:
1927 nvme_tcp_stop_queue(ctrl, 0);
1928 nvme_tcp_destroy_admin_queue(ctrl, new);
1929 return ret;
1930}
1931
1932static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1933{
1934 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1935 struct nvme_tcp_ctrl, connect_work);
1936 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1937
1938 ++ctrl->nr_reconnects;
1939
1940 if (nvme_tcp_setup_ctrl(ctrl, false))
1941 goto requeue;
1942
Colin Ian King56a77d22018-12-14 11:42:43 +00001943 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001944 ctrl->nr_reconnects);
1945
1946 ctrl->nr_reconnects = 0;
1947
1948 return;
1949
1950requeue:
1951 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1952 ctrl->nr_reconnects);
1953 nvme_tcp_reconnect_or_remove(ctrl);
1954}
1955
1956static void nvme_tcp_error_recovery_work(struct work_struct *work)
1957{
1958 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1959 struct nvme_tcp_ctrl, err_work);
1960 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1961
1962 nvme_stop_keep_alive(ctrl);
1963 nvme_tcp_teardown_io_queues(ctrl, false);
1964 /* unquiesce to fail fast pending requests */
1965 nvme_start_queues(ctrl);
1966 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001967 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001968
1969 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1970 /* state change failure is ok if we're in DELETING state */
1971 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1972 return;
1973 }
1974
1975 nvme_tcp_reconnect_or_remove(ctrl);
1976}
1977
1978static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1979{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08001980 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1981 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1982
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001983 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001984 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001985 if (shutdown)
1986 nvme_shutdown_ctrl(ctrl);
1987 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07001988 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001989 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1990}
1991
1992static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1993{
1994 nvme_tcp_teardown_ctrl(ctrl, true);
1995}
1996
1997static void nvme_reset_ctrl_work(struct work_struct *work)
1998{
1999 struct nvme_ctrl *ctrl =
2000 container_of(work, struct nvme_ctrl, reset_work);
2001
2002 nvme_stop_ctrl(ctrl);
2003 nvme_tcp_teardown_ctrl(ctrl, false);
2004
2005 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2006 /* state change failure is ok if we're in DELETING state */
2007 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2008 return;
2009 }
2010
2011 if (nvme_tcp_setup_ctrl(ctrl, false))
2012 goto out_fail;
2013
2014 return;
2015
2016out_fail:
2017 ++ctrl->nr_reconnects;
2018 nvme_tcp_reconnect_or_remove(ctrl);
2019}
2020
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002021static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2022{
2023 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2024
2025 if (list_empty(&ctrl->list))
2026 goto free_ctrl;
2027
2028 mutex_lock(&nvme_tcp_ctrl_mutex);
2029 list_del(&ctrl->list);
2030 mutex_unlock(&nvme_tcp_ctrl_mutex);
2031
2032 nvmf_free_options(nctrl->opts);
2033free_ctrl:
2034 kfree(ctrl->queues);
2035 kfree(ctrl);
2036}
2037
2038static void nvme_tcp_set_sg_null(struct nvme_command *c)
2039{
2040 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2041
2042 sg->addr = 0;
2043 sg->length = 0;
2044 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2045 NVME_SGL_FMT_TRANSPORT_A;
2046}
2047
2048static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2049 struct nvme_command *c, u32 data_len)
2050{
2051 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2052
2053 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2054 sg->length = cpu_to_le32(data_len);
2055 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2056}
2057
2058static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2059 u32 data_len)
2060{
2061 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2062
2063 sg->addr = 0;
2064 sg->length = cpu_to_le32(data_len);
2065 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2066 NVME_SGL_FMT_TRANSPORT_A;
2067}
2068
2069static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2070{
2071 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2072 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2073 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2074 struct nvme_command *cmd = &pdu->cmd;
2075 u8 hdgst = nvme_tcp_hdgst_len(queue);
2076
2077 memset(pdu, 0, sizeof(*pdu));
2078 pdu->hdr.type = nvme_tcp_cmd;
2079 if (queue->hdr_digest)
2080 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2081 pdu->hdr.hlen = sizeof(*pdu);
2082 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2083
2084 cmd->common.opcode = nvme_admin_async_event;
2085 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2086 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2087 nvme_tcp_set_sg_null(cmd);
2088
2089 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2090 ctrl->async_req.offset = 0;
2091 ctrl->async_req.curr_bio = NULL;
2092 ctrl->async_req.data_len = 0;
2093
2094 nvme_tcp_queue_request(&ctrl->async_req);
2095}
2096
2097static enum blk_eh_timer_return
2098nvme_tcp_timeout(struct request *rq, bool reserved)
2099{
2100 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2101 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2102 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2103
Keith Busch92b98e82019-09-05 08:09:33 -06002104 /*
2105 * Restart the timer if a controller reset is already scheduled. Any
2106 * timed out commands would be handled before entering the connecting
2107 * state.
2108 */
2109 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2110 return BLK_EH_RESET_TIMER;
2111
Sagi Grimberg39d57752019-01-08 01:01:30 -08002112 dev_warn(ctrl->ctrl.device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002113 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002114 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002115
2116 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002117 /*
2118 * Teardown immediately if controller times out while starting
2119 * or we are already started error recovery. all outstanding
2120 * requests are completed on shutdown, so we return BLK_EH_DONE.
2121 */
2122 flush_work(&ctrl->err_work);
2123 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2124 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002125 return BLK_EH_DONE;
2126 }
2127
Sagi Grimberg39d57752019-01-08 01:01:30 -08002128 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002129 nvme_tcp_error_recovery(&ctrl->ctrl);
2130
2131 return BLK_EH_RESET_TIMER;
2132}
2133
2134static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2135 struct request *rq)
2136{
2137 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2138 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2139 struct nvme_command *c = &pdu->cmd;
2140
2141 c->common.flags |= NVME_CMD_SGL_METABUF;
2142
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002143 if (!blk_rq_nr_phys_segments(rq))
2144 nvme_tcp_set_sg_null(c);
2145 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002146 req->data_len <= nvme_tcp_inline_data_size(queue))
2147 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2148 else
2149 nvme_tcp_set_sg_host_data(c, req->data_len);
2150
2151 return 0;
2152}
2153
2154static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2155 struct request *rq)
2156{
2157 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2158 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2159 struct nvme_tcp_queue *queue = req->queue;
2160 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2161 blk_status_t ret;
2162
2163 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2164 if (ret)
2165 return ret;
2166
2167 req->state = NVME_TCP_SEND_CMD_PDU;
2168 req->offset = 0;
2169 req->data_sent = 0;
2170 req->pdu_len = 0;
2171 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002172 req->data_len = blk_rq_nr_phys_segments(rq) ?
2173 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002174 req->curr_bio = rq->bio;
2175
2176 if (rq_data_dir(rq) == WRITE &&
2177 req->data_len <= nvme_tcp_inline_data_size(queue))
2178 req->pdu_len = req->data_len;
2179 else if (req->curr_bio)
2180 nvme_tcp_init_iter(req, READ);
2181
2182 pdu->hdr.type = nvme_tcp_cmd;
2183 pdu->hdr.flags = 0;
2184 if (queue->hdr_digest)
2185 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2186 if (queue->data_digest && req->pdu_len) {
2187 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2188 ddgst = nvme_tcp_ddgst_len(queue);
2189 }
2190 pdu->hdr.hlen = sizeof(*pdu);
2191 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2192 pdu->hdr.plen =
2193 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2194
2195 ret = nvme_tcp_map_data(queue, rq);
2196 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002197 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002198 dev_err(queue->ctrl->ctrl.device,
2199 "Failed to map data (%d)\n", ret);
2200 return ret;
2201 }
2202
2203 return 0;
2204}
2205
2206static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2207 const struct blk_mq_queue_data *bd)
2208{
2209 struct nvme_ns *ns = hctx->queue->queuedata;
2210 struct nvme_tcp_queue *queue = hctx->driver_data;
2211 struct request *rq = bd->rq;
2212 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2213 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2214 blk_status_t ret;
2215
2216 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2217 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2218
2219 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2220 if (unlikely(ret))
2221 return ret;
2222
2223 blk_mq_start_request(rq);
2224
2225 nvme_tcp_queue_request(req);
2226
2227 return BLK_STS_OK;
2228}
2229
Sagi Grimberg873946f2018-12-11 23:38:57 -08002230static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2231{
2232 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002233 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002234
Sagi Grimberg64861992019-05-28 22:49:05 -07002235 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002236 /* separate read/write queues */
2237 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002238 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2239 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2240 set->map[HCTX_TYPE_READ].nr_queues =
2241 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002242 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002243 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002244 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002245 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002246 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002247 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2248 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2249 set->map[HCTX_TYPE_READ].nr_queues =
2250 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002251 set->map[HCTX_TYPE_READ].queue_offset = 0;
2252 }
2253 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2254 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002255
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002256 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2257 /* map dedicated poll queues only if we have queues left */
2258 set->map[HCTX_TYPE_POLL].nr_queues =
2259 ctrl->io_queues[HCTX_TYPE_POLL];
2260 set->map[HCTX_TYPE_POLL].queue_offset =
2261 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2262 ctrl->io_queues[HCTX_TYPE_READ];
2263 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2264 }
2265
Sagi Grimberg64861992019-05-28 22:49:05 -07002266 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002267 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002268 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002269 ctrl->io_queues[HCTX_TYPE_READ],
2270 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002271
Sagi Grimberg873946f2018-12-11 23:38:57 -08002272 return 0;
2273}
2274
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002275static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2276{
2277 struct nvme_tcp_queue *queue = hctx->driver_data;
2278 struct sock *sk = queue->sock->sk;
2279
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002280 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2281 return 0;
2282
Eric Dumazet3f926af2019-10-23 22:44:51 -07002283 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002284 sk_busy_loop(sk, true);
2285 nvme_tcp_try_recv(queue);
2286 return queue->nr_cqe;
2287}
2288
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002289static struct blk_mq_ops nvme_tcp_mq_ops = {
2290 .queue_rq = nvme_tcp_queue_rq,
2291 .complete = nvme_complete_rq,
2292 .init_request = nvme_tcp_init_request,
2293 .exit_request = nvme_tcp_exit_request,
2294 .init_hctx = nvme_tcp_init_hctx,
2295 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002296 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002297 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002298};
2299
2300static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2301 .queue_rq = nvme_tcp_queue_rq,
2302 .complete = nvme_complete_rq,
2303 .init_request = nvme_tcp_init_request,
2304 .exit_request = nvme_tcp_exit_request,
2305 .init_hctx = nvme_tcp_init_admin_hctx,
2306 .timeout = nvme_tcp_timeout,
2307};
2308
2309static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2310 .name = "tcp",
2311 .module = THIS_MODULE,
2312 .flags = NVME_F_FABRICS,
2313 .reg_read32 = nvmf_reg_read32,
2314 .reg_read64 = nvmf_reg_read64,
2315 .reg_write32 = nvmf_reg_write32,
2316 .free_ctrl = nvme_tcp_free_ctrl,
2317 .submit_async_event = nvme_tcp_submit_async_event,
2318 .delete_ctrl = nvme_tcp_delete_ctrl,
2319 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002320};
2321
2322static bool
2323nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2324{
2325 struct nvme_tcp_ctrl *ctrl;
2326 bool found = false;
2327
2328 mutex_lock(&nvme_tcp_ctrl_mutex);
2329 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2330 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2331 if (found)
2332 break;
2333 }
2334 mutex_unlock(&nvme_tcp_ctrl_mutex);
2335
2336 return found;
2337}
2338
2339static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2340 struct nvmf_ctrl_options *opts)
2341{
2342 struct nvme_tcp_ctrl *ctrl;
2343 int ret;
2344
2345 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2346 if (!ctrl)
2347 return ERR_PTR(-ENOMEM);
2348
2349 INIT_LIST_HEAD(&ctrl->list);
2350 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002351 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2352 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002353 ctrl->ctrl.sqsize = opts->queue_size - 1;
2354 ctrl->ctrl.kato = opts->kato;
2355
2356 INIT_DELAYED_WORK(&ctrl->connect_work,
2357 nvme_tcp_reconnect_ctrl_work);
2358 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2359 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2360
2361 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2362 opts->trsvcid =
2363 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2364 if (!opts->trsvcid) {
2365 ret = -ENOMEM;
2366 goto out_free_ctrl;
2367 }
2368 opts->mask |= NVMF_OPT_TRSVCID;
2369 }
2370
2371 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2372 opts->traddr, opts->trsvcid, &ctrl->addr);
2373 if (ret) {
2374 pr_err("malformed address passed: %s:%s\n",
2375 opts->traddr, opts->trsvcid);
2376 goto out_free_ctrl;
2377 }
2378
2379 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2380 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2381 opts->host_traddr, NULL, &ctrl->src_addr);
2382 if (ret) {
2383 pr_err("malformed src address passed: %s\n",
2384 opts->host_traddr);
2385 goto out_free_ctrl;
2386 }
2387 }
2388
2389 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2390 ret = -EALREADY;
2391 goto out_free_ctrl;
2392 }
2393
Sagi Grimberg873946f2018-12-11 23:38:57 -08002394 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002395 GFP_KERNEL);
2396 if (!ctrl->queues) {
2397 ret = -ENOMEM;
2398 goto out_free_ctrl;
2399 }
2400
2401 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2402 if (ret)
2403 goto out_kfree_queues;
2404
2405 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2406 WARN_ON_ONCE(1);
2407 ret = -EINTR;
2408 goto out_uninit_ctrl;
2409 }
2410
2411 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2412 if (ret)
2413 goto out_uninit_ctrl;
2414
2415 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2416 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2417
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002418 mutex_lock(&nvme_tcp_ctrl_mutex);
2419 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2420 mutex_unlock(&nvme_tcp_ctrl_mutex);
2421
2422 return &ctrl->ctrl;
2423
2424out_uninit_ctrl:
2425 nvme_uninit_ctrl(&ctrl->ctrl);
2426 nvme_put_ctrl(&ctrl->ctrl);
2427 if (ret > 0)
2428 ret = -EIO;
2429 return ERR_PTR(ret);
2430out_kfree_queues:
2431 kfree(ctrl->queues);
2432out_free_ctrl:
2433 kfree(ctrl);
2434 return ERR_PTR(ret);
2435}
2436
2437static struct nvmf_transport_ops nvme_tcp_transport = {
2438 .name = "tcp",
2439 .module = THIS_MODULE,
2440 .required_opts = NVMF_OPT_TRADDR,
2441 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2442 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002443 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002444 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2445 NVMF_OPT_TOS,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002446 .create_ctrl = nvme_tcp_create_ctrl,
2447};
2448
2449static int __init nvme_tcp_init_module(void)
2450{
2451 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2452 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2453 if (!nvme_tcp_wq)
2454 return -ENOMEM;
2455
2456 nvmf_register_transport(&nvme_tcp_transport);
2457 return 0;
2458}
2459
2460static void __exit nvme_tcp_cleanup_module(void)
2461{
2462 struct nvme_tcp_ctrl *ctrl;
2463
2464 nvmf_unregister_transport(&nvme_tcp_transport);
2465
2466 mutex_lock(&nvme_tcp_ctrl_mutex);
2467 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2468 nvme_delete_ctrl(&ctrl->ctrl);
2469 mutex_unlock(&nvme_tcp_ctrl_mutex);
2470 flush_workqueue(nvme_delete_wq);
2471
2472 destroy_workqueue(nvme_tcp_wq);
2473}
2474
2475module_init(nvme_tcp_init_module);
2476module_exit(nvme_tcp_cleanup_module);
2477
2478MODULE_LICENSE("GPL v2");