blob: 2405bb9c63cc9f8eb0628026aec5243ebfb15467 [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
16
17#include "nvme.h"
18#include "fabrics.h"
19
20struct nvme_tcp_queue;
21
22enum nvme_tcp_send_state {
23 NVME_TCP_SEND_CMD_PDU = 0,
24 NVME_TCP_SEND_H2C_PDU,
25 NVME_TCP_SEND_DATA,
26 NVME_TCP_SEND_DDGST,
27};
28
29struct nvme_tcp_request {
30 struct nvme_request req;
31 void *pdu;
32 struct nvme_tcp_queue *queue;
33 u32 data_len;
34 u32 pdu_len;
35 u32 pdu_sent;
36 u16 ttag;
37 struct list_head entry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010038 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080039
40 struct bio *curr_bio;
41 struct iov_iter iter;
42
43 /* send state */
44 size_t offset;
45 size_t data_sent;
46 enum nvme_tcp_send_state state;
47};
48
49enum nvme_tcp_queue_flags {
50 NVME_TCP_Q_ALLOCATED = 0,
51 NVME_TCP_Q_LIVE = 1,
52};
53
54enum nvme_tcp_recv_state {
55 NVME_TCP_RECV_PDU = 0,
56 NVME_TCP_RECV_DATA,
57 NVME_TCP_RECV_DDGST,
58};
59
60struct nvme_tcp_ctrl;
61struct nvme_tcp_queue {
62 struct socket *sock;
63 struct work_struct io_work;
64 int io_cpu;
65
66 spinlock_t lock;
67 struct list_head send_list;
68
69 /* recv state */
70 void *pdu;
71 int pdu_remaining;
72 int pdu_offset;
73 size_t data_remaining;
74 size_t ddgst_remaining;
75
76 /* send state */
77 struct nvme_tcp_request *request;
78
79 int queue_size;
80 size_t cmnd_capsule_len;
81 struct nvme_tcp_ctrl *ctrl;
82 unsigned long flags;
83 bool rd_enabled;
84
85 bool hdr_digest;
86 bool data_digest;
87 struct ahash_request *rcv_hash;
88 struct ahash_request *snd_hash;
89 __le32 exp_ddgst;
90 __le32 recv_ddgst;
91
92 struct page_frag_cache pf_cache;
93
94 void (*state_change)(struct sock *);
95 void (*data_ready)(struct sock *);
96 void (*write_space)(struct sock *);
97};
98
99struct nvme_tcp_ctrl {
100 /* read only in the hot path */
101 struct nvme_tcp_queue *queues;
102 struct blk_mq_tag_set tag_set;
103
104 /* other member variables */
105 struct list_head list;
106 struct blk_mq_tag_set admin_tag_set;
107 struct sockaddr_storage addr;
108 struct sockaddr_storage src_addr;
109 struct nvme_ctrl ctrl;
110
111 struct work_struct err_work;
112 struct delayed_work connect_work;
113 struct nvme_tcp_request async_req;
114};
115
116static LIST_HEAD(nvme_tcp_ctrl_list);
117static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
118static struct workqueue_struct *nvme_tcp_wq;
119static struct blk_mq_ops nvme_tcp_mq_ops;
120static struct blk_mq_ops nvme_tcp_admin_mq_ops;
121
122static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
123{
124 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
125}
126
127static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
128{
129 return queue - queue->ctrl->queues;
130}
131
132static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
133{
134 u32 queue_idx = nvme_tcp_queue_id(queue);
135
136 if (queue_idx == 0)
137 return queue->ctrl->admin_tag_set.tags[queue_idx];
138 return queue->ctrl->tag_set.tags[queue_idx - 1];
139}
140
141static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
142{
143 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
144}
145
146static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
147{
148 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
149}
150
151static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
152{
153 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
154}
155
156static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
157{
158 return req == &req->queue->ctrl->async_req;
159}
160
161static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
162{
163 struct request *rq;
164 unsigned int bytes;
165
166 if (unlikely(nvme_tcp_async_req(req)))
167 return false; /* async events don't have a request */
168
169 rq = blk_mq_rq_from_pdu(req);
170 bytes = blk_rq_payload_bytes(rq);
171
172 return rq_data_dir(rq) == WRITE && bytes &&
173 bytes <= nvme_tcp_inline_data_size(req->queue);
174}
175
176static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
177{
178 return req->iter.bvec->bv_page;
179}
180
181static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
182{
183 return req->iter.bvec->bv_offset + req->iter.iov_offset;
184}
185
186static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
187{
188 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
189 req->pdu_len - req->pdu_sent);
190}
191
192static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
193{
194 return req->iter.iov_offset;
195}
196
197static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
198{
199 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
200 req->pdu_len - req->pdu_sent : 0;
201}
202
203static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
204 int len)
205{
206 return nvme_tcp_pdu_data_left(req) <= len;
207}
208
209static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
210 unsigned int dir)
211{
212 struct request *rq = blk_mq_rq_from_pdu(req);
213 struct bio_vec *vec;
214 unsigned int size;
215 int nsegs;
216 size_t offset;
217
218 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
219 vec = &rq->special_vec;
220 nsegs = 1;
221 size = blk_rq_payload_bytes(rq);
222 offset = 0;
223 } else {
224 struct bio *bio = req->curr_bio;
225
226 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
227 nsegs = bio_segments(bio);
228 size = bio->bi_iter.bi_size;
229 offset = bio->bi_iter.bi_bvec_done;
230 }
231
232 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
233 req->iter.iov_offset = offset;
234}
235
236static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
237 int len)
238{
239 req->data_sent += len;
240 req->pdu_sent += len;
241 iov_iter_advance(&req->iter, len);
242 if (!iov_iter_count(&req->iter) &&
243 req->data_sent < req->data_len) {
244 req->curr_bio = req->curr_bio->bi_next;
245 nvme_tcp_init_iter(req, WRITE);
246 }
247}
248
249static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
250{
251 struct nvme_tcp_queue *queue = req->queue;
252
253 spin_lock(&queue->lock);
254 list_add_tail(&req->entry, &queue->send_list);
255 spin_unlock(&queue->lock);
256
257 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
258}
259
260static inline struct nvme_tcp_request *
261nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
262{
263 struct nvme_tcp_request *req;
264
265 spin_lock(&queue->lock);
266 req = list_first_entry_or_null(&queue->send_list,
267 struct nvme_tcp_request, entry);
268 if (req)
269 list_del(&req->entry);
270 spin_unlock(&queue->lock);
271
272 return req;
273}
274
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100275static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
276 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800277{
278 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
279 crypto_ahash_final(hash);
280}
281
282static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
283 struct page *page, off_t off, size_t len)
284{
285 struct scatterlist sg;
286
287 sg_init_marker(&sg, 1);
288 sg_set_page(&sg, page, len, off);
289 ahash_request_set_crypt(hash, &sg, NULL, len);
290 crypto_ahash_update(hash);
291}
292
293static inline void nvme_tcp_hdgst(struct ahash_request *hash,
294 void *pdu, size_t len)
295{
296 struct scatterlist sg;
297
298 sg_init_one(&sg, pdu, len);
299 ahash_request_set_crypt(hash, &sg, pdu + len, len);
300 crypto_ahash_digest(hash);
301}
302
303static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
304 void *pdu, size_t pdu_len)
305{
306 struct nvme_tcp_hdr *hdr = pdu;
307 __le32 recv_digest;
308 __le32 exp_digest;
309
310 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
311 dev_err(queue->ctrl->ctrl.device,
312 "queue %d: header digest flag is cleared\n",
313 nvme_tcp_queue_id(queue));
314 return -EPROTO;
315 }
316
317 recv_digest = *(__le32 *)(pdu + hdr->hlen);
318 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
319 exp_digest = *(__le32 *)(pdu + hdr->hlen);
320 if (recv_digest != exp_digest) {
321 dev_err(queue->ctrl->ctrl.device,
322 "header digest error: recv %#x expected %#x\n",
323 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
324 return -EIO;
325 }
326
327 return 0;
328}
329
330static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
331{
332 struct nvme_tcp_hdr *hdr = pdu;
333 u8 digest_len = nvme_tcp_hdgst_len(queue);
334 u32 len;
335
336 len = le32_to_cpu(hdr->plen) - hdr->hlen -
337 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
338
339 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
340 dev_err(queue->ctrl->ctrl.device,
341 "queue %d: data digest flag is cleared\n",
342 nvme_tcp_queue_id(queue));
343 return -EPROTO;
344 }
345 crypto_ahash_init(queue->rcv_hash);
346
347 return 0;
348}
349
350static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
351 struct request *rq, unsigned int hctx_idx)
352{
353 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
354
355 page_frag_free(req->pdu);
356}
357
358static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
359 struct request *rq, unsigned int hctx_idx,
360 unsigned int numa_node)
361{
362 struct nvme_tcp_ctrl *ctrl = set->driver_data;
363 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
364 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
365 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
366 u8 hdgst = nvme_tcp_hdgst_len(queue);
367
368 req->pdu = page_frag_alloc(&queue->pf_cache,
369 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
370 GFP_KERNEL | __GFP_ZERO);
371 if (!req->pdu)
372 return -ENOMEM;
373
374 req->queue = queue;
375 nvme_req(rq)->ctrl = &ctrl->ctrl;
376
377 return 0;
378}
379
380static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
381 unsigned int hctx_idx)
382{
383 struct nvme_tcp_ctrl *ctrl = data;
384 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
385
386 hctx->driver_data = queue;
387 return 0;
388}
389
390static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
391 unsigned int hctx_idx)
392{
393 struct nvme_tcp_ctrl *ctrl = data;
394 struct nvme_tcp_queue *queue = &ctrl->queues[0];
395
396 hctx->driver_data = queue;
397 return 0;
398}
399
400static enum nvme_tcp_recv_state
401nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
402{
403 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
404 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
405 NVME_TCP_RECV_DATA;
406}
407
408static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
409{
410 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
411 nvme_tcp_hdgst_len(queue);
412 queue->pdu_offset = 0;
413 queue->data_remaining = -1;
414 queue->ddgst_remaining = 0;
415}
416
417static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
418{
419 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
420 return;
421
422 queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
423}
424
425static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
426 struct nvme_completion *cqe)
427{
428 struct request *rq;
429
430 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
431 if (!rq) {
432 dev_err(queue->ctrl->ctrl.device,
433 "queue %d tag 0x%x not found\n",
434 nvme_tcp_queue_id(queue), cqe->command_id);
435 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
436 return -EINVAL;
437 }
438
439 nvme_end_request(rq, cqe->status, cqe->result);
440
441 return 0;
442}
443
444static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
445 struct nvme_tcp_data_pdu *pdu)
446{
447 struct request *rq;
448
449 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
450 if (!rq) {
451 dev_err(queue->ctrl->ctrl.device,
452 "queue %d tag %#x not found\n",
453 nvme_tcp_queue_id(queue), pdu->command_id);
454 return -ENOENT;
455 }
456
457 if (!blk_rq_payload_bytes(rq)) {
458 dev_err(queue->ctrl->ctrl.device,
459 "queue %d tag %#x unexpected data\n",
460 nvme_tcp_queue_id(queue), rq->tag);
461 return -EIO;
462 }
463
464 queue->data_remaining = le32_to_cpu(pdu->data_length);
465
Sagi Grimberg602d6742019-03-13 18:55:10 +0100466 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
467 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
468 dev_err(queue->ctrl->ctrl.device,
469 "queue %d tag %#x SUCCESS set but not last PDU\n",
470 nvme_tcp_queue_id(queue), rq->tag);
471 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
472 return -EPROTO;
473 }
474
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800475 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800476}
477
478static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
479 struct nvme_tcp_rsp_pdu *pdu)
480{
481 struct nvme_completion *cqe = &pdu->cqe;
482 int ret = 0;
483
484 /*
485 * AEN requests are special as they don't time out and can
486 * survive any kind of queue freeze and often don't respond to
487 * aborts. We don't even bother to allocate a struct request
488 * for them but rather special case them here.
489 */
490 if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
491 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
492 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
493 &cqe->result);
494 else
495 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
496
497 return ret;
498}
499
500static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
501 struct nvme_tcp_r2t_pdu *pdu)
502{
503 struct nvme_tcp_data_pdu *data = req->pdu;
504 struct nvme_tcp_queue *queue = req->queue;
505 struct request *rq = blk_mq_rq_from_pdu(req);
506 u8 hdgst = nvme_tcp_hdgst_len(queue);
507 u8 ddgst = nvme_tcp_ddgst_len(queue);
508
509 req->pdu_len = le32_to_cpu(pdu->r2t_length);
510 req->pdu_sent = 0;
511
512 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
513 dev_err(queue->ctrl->ctrl.device,
514 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
515 rq->tag, req->pdu_len, req->data_len,
516 req->data_sent);
517 return -EPROTO;
518 }
519
520 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
521 dev_err(queue->ctrl->ctrl.device,
522 "req %d unexpected r2t offset %u (expected %zu)\n",
523 rq->tag, le32_to_cpu(pdu->r2t_offset),
524 req->data_sent);
525 return -EPROTO;
526 }
527
528 memset(data, 0, sizeof(*data));
529 data->hdr.type = nvme_tcp_h2c_data;
530 data->hdr.flags = NVME_TCP_F_DATA_LAST;
531 if (queue->hdr_digest)
532 data->hdr.flags |= NVME_TCP_F_HDGST;
533 if (queue->data_digest)
534 data->hdr.flags |= NVME_TCP_F_DDGST;
535 data->hdr.hlen = sizeof(*data);
536 data->hdr.pdo = data->hdr.hlen + hdgst;
537 data->hdr.plen =
538 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
539 data->ttag = pdu->ttag;
540 data->command_id = rq->tag;
541 data->data_offset = cpu_to_le32(req->data_sent);
542 data->data_length = cpu_to_le32(req->pdu_len);
543 return 0;
544}
545
546static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
547 struct nvme_tcp_r2t_pdu *pdu)
548{
549 struct nvme_tcp_request *req;
550 struct request *rq;
551 int ret;
552
553 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
554 if (!rq) {
555 dev_err(queue->ctrl->ctrl.device,
556 "queue %d tag %#x not found\n",
557 nvme_tcp_queue_id(queue), pdu->command_id);
558 return -ENOENT;
559 }
560 req = blk_mq_rq_to_pdu(rq);
561
562 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
563 if (unlikely(ret))
564 return ret;
565
566 req->state = NVME_TCP_SEND_H2C_PDU;
567 req->offset = 0;
568
569 nvme_tcp_queue_request(req);
570
571 return 0;
572}
573
574static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
575 unsigned int *offset, size_t *len)
576{
577 struct nvme_tcp_hdr *hdr;
578 char *pdu = queue->pdu;
579 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
580 int ret;
581
582 ret = skb_copy_bits(skb, *offset,
583 &pdu[queue->pdu_offset], rcv_len);
584 if (unlikely(ret))
585 return ret;
586
587 queue->pdu_remaining -= rcv_len;
588 queue->pdu_offset += rcv_len;
589 *offset += rcv_len;
590 *len -= rcv_len;
591 if (queue->pdu_remaining)
592 return 0;
593
594 hdr = queue->pdu;
595 if (queue->hdr_digest) {
596 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
597 if (unlikely(ret))
598 return ret;
599 }
600
601
602 if (queue->data_digest) {
603 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
604 if (unlikely(ret))
605 return ret;
606 }
607
608 switch (hdr->type) {
609 case nvme_tcp_c2h_data:
610 ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
611 break;
612 case nvme_tcp_rsp:
613 nvme_tcp_init_recv_ctx(queue);
614 ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
615 break;
616 case nvme_tcp_r2t:
617 nvme_tcp_init_recv_ctx(queue);
618 ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
619 break;
620 default:
621 dev_err(queue->ctrl->ctrl.device,
622 "unsupported pdu type (%d)\n", hdr->type);
623 return -EINVAL;
624 }
625
626 return ret;
627}
628
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100629static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100630{
631 union nvme_result res = {};
632
633 nvme_end_request(rq, cpu_to_le16(status << 1), res);
634}
635
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800636static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
637 unsigned int *offset, size_t *len)
638{
639 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
640 struct nvme_tcp_request *req;
641 struct request *rq;
642
643 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
644 if (!rq) {
645 dev_err(queue->ctrl->ctrl.device,
646 "queue %d tag %#x not found\n",
647 nvme_tcp_queue_id(queue), pdu->command_id);
648 return -ENOENT;
649 }
650 req = blk_mq_rq_to_pdu(rq);
651
652 while (true) {
653 int recv_len, ret;
654
655 recv_len = min_t(size_t, *len, queue->data_remaining);
656 if (!recv_len)
657 break;
658
659 if (!iov_iter_count(&req->iter)) {
660 req->curr_bio = req->curr_bio->bi_next;
661
662 /*
663 * If we don`t have any bios it means that controller
664 * sent more data than we requested, hence error
665 */
666 if (!req->curr_bio) {
667 dev_err(queue->ctrl->ctrl.device,
668 "queue %d no space in request %#x",
669 nvme_tcp_queue_id(queue), rq->tag);
670 nvme_tcp_init_recv_ctx(queue);
671 return -EIO;
672 }
673 nvme_tcp_init_iter(req, READ);
674 }
675
676 /* we can read only from what is left in this bio */
677 recv_len = min_t(size_t, recv_len,
678 iov_iter_count(&req->iter));
679
680 if (queue->data_digest)
681 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
682 &req->iter, recv_len, queue->rcv_hash);
683 else
684 ret = skb_copy_datagram_iter(skb, *offset,
685 &req->iter, recv_len);
686 if (ret) {
687 dev_err(queue->ctrl->ctrl.device,
688 "queue %d failed to copy request %#x data",
689 nvme_tcp_queue_id(queue), rq->tag);
690 return ret;
691 }
692
693 *len -= recv_len;
694 *offset += recv_len;
695 queue->data_remaining -= recv_len;
696 }
697
698 if (!queue->data_remaining) {
699 if (queue->data_digest) {
700 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
701 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
702 } else {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100703 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
704 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800705 nvme_tcp_init_recv_ctx(queue);
706 }
707 }
708
709 return 0;
710}
711
712static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
713 struct sk_buff *skb, unsigned int *offset, size_t *len)
714{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100715 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800716 char *ddgst = (char *)&queue->recv_ddgst;
717 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
718 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
719 int ret;
720
721 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
722 if (unlikely(ret))
723 return ret;
724
725 queue->ddgst_remaining -= recv_len;
726 *offset += recv_len;
727 *len -= recv_len;
728 if (queue->ddgst_remaining)
729 return 0;
730
731 if (queue->recv_ddgst != queue->exp_ddgst) {
732 dev_err(queue->ctrl->ctrl.device,
733 "data digest error: recv %#x expected %#x\n",
734 le32_to_cpu(queue->recv_ddgst),
735 le32_to_cpu(queue->exp_ddgst));
736 return -EIO;
737 }
738
Sagi Grimberg602d6742019-03-13 18:55:10 +0100739 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
740 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
741 pdu->command_id);
742
743 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
744 }
745
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800746 nvme_tcp_init_recv_ctx(queue);
747 return 0;
748}
749
750static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
751 unsigned int offset, size_t len)
752{
753 struct nvme_tcp_queue *queue = desc->arg.data;
754 size_t consumed = len;
755 int result;
756
757 while (len) {
758 switch (nvme_tcp_recv_state(queue)) {
759 case NVME_TCP_RECV_PDU:
760 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
761 break;
762 case NVME_TCP_RECV_DATA:
763 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
764 break;
765 case NVME_TCP_RECV_DDGST:
766 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
767 break;
768 default:
769 result = -EFAULT;
770 }
771 if (result) {
772 dev_err(queue->ctrl->ctrl.device,
773 "receive failed: %d\n", result);
774 queue->rd_enabled = false;
775 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
776 return result;
777 }
778 }
779
780 return consumed;
781}
782
783static void nvme_tcp_data_ready(struct sock *sk)
784{
785 struct nvme_tcp_queue *queue;
786
787 read_lock(&sk->sk_callback_lock);
788 queue = sk->sk_user_data;
789 if (likely(queue && queue->rd_enabled))
790 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
791 read_unlock(&sk->sk_callback_lock);
792}
793
794static void nvme_tcp_write_space(struct sock *sk)
795{
796 struct nvme_tcp_queue *queue;
797
798 read_lock_bh(&sk->sk_callback_lock);
799 queue = sk->sk_user_data;
800 if (likely(queue && sk_stream_is_writeable(sk))) {
801 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
802 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
803 }
804 read_unlock_bh(&sk->sk_callback_lock);
805}
806
807static void nvme_tcp_state_change(struct sock *sk)
808{
809 struct nvme_tcp_queue *queue;
810
811 read_lock(&sk->sk_callback_lock);
812 queue = sk->sk_user_data;
813 if (!queue)
814 goto done;
815
816 switch (sk->sk_state) {
817 case TCP_CLOSE:
818 case TCP_CLOSE_WAIT:
819 case TCP_LAST_ACK:
820 case TCP_FIN_WAIT1:
821 case TCP_FIN_WAIT2:
822 /* fallthrough */
823 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
824 break;
825 default:
826 dev_info(queue->ctrl->ctrl.device,
827 "queue %d socket state %d\n",
828 nvme_tcp_queue_id(queue), sk->sk_state);
829 }
830
831 queue->state_change(sk);
832done:
833 read_unlock(&sk->sk_callback_lock);
834}
835
836static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
837{
838 queue->request = NULL;
839}
840
841static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
842{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100843 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800844}
845
846static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
847{
848 struct nvme_tcp_queue *queue = req->queue;
849
850 while (true) {
851 struct page *page = nvme_tcp_req_cur_page(req);
852 size_t offset = nvme_tcp_req_cur_offset(req);
853 size_t len = nvme_tcp_req_cur_length(req);
854 bool last = nvme_tcp_pdu_last_send(req, len);
855 int ret, flags = MSG_DONTWAIT;
856
857 if (last && !queue->data_digest)
858 flags |= MSG_EOR;
859 else
860 flags |= MSG_MORE;
861
862 ret = kernel_sendpage(queue->sock, page, offset, len, flags);
863 if (ret <= 0)
864 return ret;
865
866 nvme_tcp_advance_req(req, ret);
867 if (queue->data_digest)
868 nvme_tcp_ddgst_update(queue->snd_hash, page,
869 offset, ret);
870
871 /* fully successful last write*/
872 if (last && ret == len) {
873 if (queue->data_digest) {
874 nvme_tcp_ddgst_final(queue->snd_hash,
875 &req->ddgst);
876 req->state = NVME_TCP_SEND_DDGST;
877 req->offset = 0;
878 } else {
879 nvme_tcp_done_send_req(queue);
880 }
881 return 1;
882 }
883 }
884 return -EAGAIN;
885}
886
887static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
888{
889 struct nvme_tcp_queue *queue = req->queue;
890 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
891 bool inline_data = nvme_tcp_has_inline_data(req);
892 int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
893 u8 hdgst = nvme_tcp_hdgst_len(queue);
894 int len = sizeof(*pdu) + hdgst - req->offset;
895 int ret;
896
897 if (queue->hdr_digest && !req->offset)
898 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
899
900 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
901 offset_in_page(pdu) + req->offset, len, flags);
902 if (unlikely(ret <= 0))
903 return ret;
904
905 len -= ret;
906 if (!len) {
907 if (inline_data) {
908 req->state = NVME_TCP_SEND_DATA;
909 if (queue->data_digest)
910 crypto_ahash_init(queue->snd_hash);
911 nvme_tcp_init_iter(req, WRITE);
912 } else {
913 nvme_tcp_done_send_req(queue);
914 }
915 return 1;
916 }
917 req->offset += ret;
918
919 return -EAGAIN;
920}
921
922static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
923{
924 struct nvme_tcp_queue *queue = req->queue;
925 struct nvme_tcp_data_pdu *pdu = req->pdu;
926 u8 hdgst = nvme_tcp_hdgst_len(queue);
927 int len = sizeof(*pdu) - req->offset + hdgst;
928 int ret;
929
930 if (queue->hdr_digest && !req->offset)
931 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
932
933 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
934 offset_in_page(pdu) + req->offset, len,
935 MSG_DONTWAIT | MSG_MORE);
936 if (unlikely(ret <= 0))
937 return ret;
938
939 len -= ret;
940 if (!len) {
941 req->state = NVME_TCP_SEND_DATA;
942 if (queue->data_digest)
943 crypto_ahash_init(queue->snd_hash);
944 if (!req->data_sent)
945 nvme_tcp_init_iter(req, WRITE);
946 return 1;
947 }
948 req->offset += ret;
949
950 return -EAGAIN;
951}
952
953static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
954{
955 struct nvme_tcp_queue *queue = req->queue;
956 int ret;
957 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
958 struct kvec iov = {
959 .iov_base = &req->ddgst + req->offset,
960 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
961 };
962
963 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
964 if (unlikely(ret <= 0))
965 return ret;
966
967 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
968 nvme_tcp_done_send_req(queue);
969 return 1;
970 }
971
972 req->offset += ret;
973 return -EAGAIN;
974}
975
976static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
977{
978 struct nvme_tcp_request *req;
979 int ret = 1;
980
981 if (!queue->request) {
982 queue->request = nvme_tcp_fetch_request(queue);
983 if (!queue->request)
984 return 0;
985 }
986 req = queue->request;
987
988 if (req->state == NVME_TCP_SEND_CMD_PDU) {
989 ret = nvme_tcp_try_send_cmd_pdu(req);
990 if (ret <= 0)
991 goto done;
992 if (!nvme_tcp_has_inline_data(req))
993 return ret;
994 }
995
996 if (req->state == NVME_TCP_SEND_H2C_PDU) {
997 ret = nvme_tcp_try_send_data_pdu(req);
998 if (ret <= 0)
999 goto done;
1000 }
1001
1002 if (req->state == NVME_TCP_SEND_DATA) {
1003 ret = nvme_tcp_try_send_data(req);
1004 if (ret <= 0)
1005 goto done;
1006 }
1007
1008 if (req->state == NVME_TCP_SEND_DDGST)
1009 ret = nvme_tcp_try_send_ddgst(req);
1010done:
1011 if (ret == -EAGAIN)
1012 ret = 0;
1013 return ret;
1014}
1015
1016static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1017{
1018 struct sock *sk = queue->sock->sk;
1019 read_descriptor_t rd_desc;
1020 int consumed;
1021
1022 rd_desc.arg.data = queue;
1023 rd_desc.count = 1;
1024 lock_sock(sk);
1025 consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1026 release_sock(sk);
1027 return consumed;
1028}
1029
1030static void nvme_tcp_io_work(struct work_struct *w)
1031{
1032 struct nvme_tcp_queue *queue =
1033 container_of(w, struct nvme_tcp_queue, io_work);
1034 unsigned long start = jiffies + msecs_to_jiffies(1);
1035
1036 do {
1037 bool pending = false;
1038 int result;
1039
1040 result = nvme_tcp_try_send(queue);
1041 if (result > 0) {
1042 pending = true;
1043 } else if (unlikely(result < 0)) {
1044 dev_err(queue->ctrl->ctrl.device,
1045 "failed to send request %d\n", result);
1046 if (result != -EPIPE)
1047 nvme_tcp_fail_request(queue->request);
1048 nvme_tcp_done_send_req(queue);
1049 return;
1050 }
1051
1052 result = nvme_tcp_try_recv(queue);
1053 if (result > 0)
1054 pending = true;
1055
1056 if (!pending)
1057 return;
1058
1059 } while (time_after(jiffies, start)); /* quota is exhausted */
1060
1061 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1062}
1063
1064static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1065{
1066 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1067
1068 ahash_request_free(queue->rcv_hash);
1069 ahash_request_free(queue->snd_hash);
1070 crypto_free_ahash(tfm);
1071}
1072
1073static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1074{
1075 struct crypto_ahash *tfm;
1076
1077 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1078 if (IS_ERR(tfm))
1079 return PTR_ERR(tfm);
1080
1081 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1082 if (!queue->snd_hash)
1083 goto free_tfm;
1084 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1085
1086 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1087 if (!queue->rcv_hash)
1088 goto free_snd_hash;
1089 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1090
1091 return 0;
1092free_snd_hash:
1093 ahash_request_free(queue->snd_hash);
1094free_tfm:
1095 crypto_free_ahash(tfm);
1096 return -ENOMEM;
1097}
1098
1099static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1100{
1101 struct nvme_tcp_request *async = &ctrl->async_req;
1102
1103 page_frag_free(async->pdu);
1104}
1105
1106static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1107{
1108 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1109 struct nvme_tcp_request *async = &ctrl->async_req;
1110 u8 hdgst = nvme_tcp_hdgst_len(queue);
1111
1112 async->pdu = page_frag_alloc(&queue->pf_cache,
1113 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1114 GFP_KERNEL | __GFP_ZERO);
1115 if (!async->pdu)
1116 return -ENOMEM;
1117
1118 async->queue = &ctrl->queues[0];
1119 return 0;
1120}
1121
1122static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1123{
1124 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1125 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1126
1127 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1128 return;
1129
1130 if (queue->hdr_digest || queue->data_digest)
1131 nvme_tcp_free_crypto(queue);
1132
1133 sock_release(queue->sock);
1134 kfree(queue->pdu);
1135}
1136
1137static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1138{
1139 struct nvme_tcp_icreq_pdu *icreq;
1140 struct nvme_tcp_icresp_pdu *icresp;
1141 struct msghdr msg = {};
1142 struct kvec iov;
1143 bool ctrl_hdgst, ctrl_ddgst;
1144 int ret;
1145
1146 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1147 if (!icreq)
1148 return -ENOMEM;
1149
1150 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1151 if (!icresp) {
1152 ret = -ENOMEM;
1153 goto free_icreq;
1154 }
1155
1156 icreq->hdr.type = nvme_tcp_icreq;
1157 icreq->hdr.hlen = sizeof(*icreq);
1158 icreq->hdr.pdo = 0;
1159 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1160 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1161 icreq->maxr2t = 0; /* single inflight r2t supported */
1162 icreq->hpda = 0; /* no alignment constraint */
1163 if (queue->hdr_digest)
1164 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1165 if (queue->data_digest)
1166 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1167
1168 iov.iov_base = icreq;
1169 iov.iov_len = sizeof(*icreq);
1170 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1171 if (ret < 0)
1172 goto free_icresp;
1173
1174 memset(&msg, 0, sizeof(msg));
1175 iov.iov_base = icresp;
1176 iov.iov_len = sizeof(*icresp);
1177 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1178 iov.iov_len, msg.msg_flags);
1179 if (ret < 0)
1180 goto free_icresp;
1181
1182 ret = -EINVAL;
1183 if (icresp->hdr.type != nvme_tcp_icresp) {
1184 pr_err("queue %d: bad type returned %d\n",
1185 nvme_tcp_queue_id(queue), icresp->hdr.type);
1186 goto free_icresp;
1187 }
1188
1189 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1190 pr_err("queue %d: bad pdu length returned %d\n",
1191 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1192 goto free_icresp;
1193 }
1194
1195 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1196 pr_err("queue %d: bad pfv returned %d\n",
1197 nvme_tcp_queue_id(queue), icresp->pfv);
1198 goto free_icresp;
1199 }
1200
1201 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1202 if ((queue->data_digest && !ctrl_ddgst) ||
1203 (!queue->data_digest && ctrl_ddgst)) {
1204 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1205 nvme_tcp_queue_id(queue),
1206 queue->data_digest ? "enabled" : "disabled",
1207 ctrl_ddgst ? "enabled" : "disabled");
1208 goto free_icresp;
1209 }
1210
1211 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1212 if ((queue->hdr_digest && !ctrl_hdgst) ||
1213 (!queue->hdr_digest && ctrl_hdgst)) {
1214 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1215 nvme_tcp_queue_id(queue),
1216 queue->hdr_digest ? "enabled" : "disabled",
1217 ctrl_hdgst ? "enabled" : "disabled");
1218 goto free_icresp;
1219 }
1220
1221 if (icresp->cpda != 0) {
1222 pr_err("queue %d: unsupported cpda returned %d\n",
1223 nvme_tcp_queue_id(queue), icresp->cpda);
1224 goto free_icresp;
1225 }
1226
1227 ret = 0;
1228free_icresp:
1229 kfree(icresp);
1230free_icreq:
1231 kfree(icreq);
1232 return ret;
1233}
1234
1235static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1236 int qid, size_t queue_size)
1237{
1238 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1239 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1240 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
Sagi Grimberg873946f2018-12-11 23:38:57 -08001241 int ret, opt, rcv_pdu_size, n;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001242
1243 queue->ctrl = ctrl;
1244 INIT_LIST_HEAD(&queue->send_list);
1245 spin_lock_init(&queue->lock);
1246 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1247 queue->queue_size = queue_size;
1248
1249 if (qid > 0)
1250 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1251 else
1252 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1253 NVME_TCP_ADMIN_CCSZ;
1254
1255 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1256 IPPROTO_TCP, &queue->sock);
1257 if (ret) {
1258 dev_err(ctrl->ctrl.device,
1259 "failed to create socket: %d\n", ret);
1260 return ret;
1261 }
1262
1263 /* Single syn retry */
1264 opt = 1;
1265 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1266 (char *)&opt, sizeof(opt));
1267 if (ret) {
1268 dev_err(ctrl->ctrl.device,
1269 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1270 goto err_sock;
1271 }
1272
1273 /* Set TCP no delay */
1274 opt = 1;
1275 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1276 TCP_NODELAY, (char *)&opt, sizeof(opt));
1277 if (ret) {
1278 dev_err(ctrl->ctrl.device,
1279 "failed to set TCP_NODELAY sock opt %d\n", ret);
1280 goto err_sock;
1281 }
1282
1283 /*
1284 * Cleanup whatever is sitting in the TCP transmit queue on socket
1285 * close. This is done to prevent stale data from being sent should
1286 * the network connection be restored before TCP times out.
1287 */
1288 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1289 (char *)&sol, sizeof(sol));
1290 if (ret) {
1291 dev_err(ctrl->ctrl.device,
1292 "failed to set SO_LINGER sock opt %d\n", ret);
1293 goto err_sock;
1294 }
1295
1296 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg873946f2018-12-11 23:38:57 -08001297 if (!qid)
1298 n = 0;
1299 else
1300 n = (qid - 1) % num_online_cpus();
1301 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001302 queue->request = NULL;
1303 queue->data_remaining = 0;
1304 queue->ddgst_remaining = 0;
1305 queue->pdu_remaining = 0;
1306 queue->pdu_offset = 0;
1307 sk_set_memalloc(queue->sock->sk);
1308
1309 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1310 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1311 sizeof(ctrl->src_addr));
1312 if (ret) {
1313 dev_err(ctrl->ctrl.device,
1314 "failed to bind queue %d socket %d\n",
1315 qid, ret);
1316 goto err_sock;
1317 }
1318 }
1319
1320 queue->hdr_digest = nctrl->opts->hdr_digest;
1321 queue->data_digest = nctrl->opts->data_digest;
1322 if (queue->hdr_digest || queue->data_digest) {
1323 ret = nvme_tcp_alloc_crypto(queue);
1324 if (ret) {
1325 dev_err(ctrl->ctrl.device,
1326 "failed to allocate queue %d crypto\n", qid);
1327 goto err_sock;
1328 }
1329 }
1330
1331 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1332 nvme_tcp_hdgst_len(queue);
1333 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1334 if (!queue->pdu) {
1335 ret = -ENOMEM;
1336 goto err_crypto;
1337 }
1338
1339 dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1340 nvme_tcp_queue_id(queue));
1341
1342 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1343 sizeof(ctrl->addr), 0);
1344 if (ret) {
1345 dev_err(ctrl->ctrl.device,
1346 "failed to connect socket: %d\n", ret);
1347 goto err_rcv_pdu;
1348 }
1349
1350 ret = nvme_tcp_init_connection(queue);
1351 if (ret)
1352 goto err_init_connect;
1353
1354 queue->rd_enabled = true;
1355 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1356 nvme_tcp_init_recv_ctx(queue);
1357
1358 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1359 queue->sock->sk->sk_user_data = queue;
1360 queue->state_change = queue->sock->sk->sk_state_change;
1361 queue->data_ready = queue->sock->sk->sk_data_ready;
1362 queue->write_space = queue->sock->sk->sk_write_space;
1363 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1364 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1365 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1366 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1367
1368 return 0;
1369
1370err_init_connect:
1371 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1372err_rcv_pdu:
1373 kfree(queue->pdu);
1374err_crypto:
1375 if (queue->hdr_digest || queue->data_digest)
1376 nvme_tcp_free_crypto(queue);
1377err_sock:
1378 sock_release(queue->sock);
1379 queue->sock = NULL;
1380 return ret;
1381}
1382
1383static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1384{
1385 struct socket *sock = queue->sock;
1386
1387 write_lock_bh(&sock->sk->sk_callback_lock);
1388 sock->sk->sk_user_data = NULL;
1389 sock->sk->sk_data_ready = queue->data_ready;
1390 sock->sk->sk_state_change = queue->state_change;
1391 sock->sk->sk_write_space = queue->write_space;
1392 write_unlock_bh(&sock->sk->sk_callback_lock);
1393}
1394
1395static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1396{
1397 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1398 nvme_tcp_restore_sock_calls(queue);
1399 cancel_work_sync(&queue->io_work);
1400}
1401
1402static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1403{
1404 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1405 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1406
1407 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1408 return;
1409
1410 __nvme_tcp_stop_queue(queue);
1411}
1412
1413static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1414{
1415 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1416 int ret;
1417
1418 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001419 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001420 else
1421 ret = nvmf_connect_admin_queue(nctrl);
1422
1423 if (!ret) {
1424 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1425 } else {
1426 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1427 dev_err(nctrl->device,
1428 "failed to connect queue: %d ret=%d\n", idx, ret);
1429 }
1430 return ret;
1431}
1432
1433static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1434 bool admin)
1435{
1436 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1437 struct blk_mq_tag_set *set;
1438 int ret;
1439
1440 if (admin) {
1441 set = &ctrl->admin_tag_set;
1442 memset(set, 0, sizeof(*set));
1443 set->ops = &nvme_tcp_admin_mq_ops;
1444 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1445 set->reserved_tags = 2; /* connect + keep-alive */
1446 set->numa_node = NUMA_NO_NODE;
1447 set->cmd_size = sizeof(struct nvme_tcp_request);
1448 set->driver_data = ctrl;
1449 set->nr_hw_queues = 1;
1450 set->timeout = ADMIN_TIMEOUT;
1451 } else {
1452 set = &ctrl->tag_set;
1453 memset(set, 0, sizeof(*set));
1454 set->ops = &nvme_tcp_mq_ops;
1455 set->queue_depth = nctrl->sqsize + 1;
1456 set->reserved_tags = 1; /* fabric connect */
1457 set->numa_node = NUMA_NO_NODE;
1458 set->flags = BLK_MQ_F_SHOULD_MERGE;
1459 set->cmd_size = sizeof(struct nvme_tcp_request);
1460 set->driver_data = ctrl;
1461 set->nr_hw_queues = nctrl->queue_count - 1;
1462 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg873946f2018-12-11 23:38:57 -08001463 set->nr_maps = 2 /* default + read */;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001464 }
1465
1466 ret = blk_mq_alloc_tag_set(set);
1467 if (ret)
1468 return ERR_PTR(ret);
1469
1470 return set;
1471}
1472
1473static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1474{
1475 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1476 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1477 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1478 }
1479
1480 nvme_tcp_free_queue(ctrl, 0);
1481}
1482
1483static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1484{
1485 int i;
1486
1487 for (i = 1; i < ctrl->queue_count; i++)
1488 nvme_tcp_free_queue(ctrl, i);
1489}
1490
1491static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1492{
1493 int i;
1494
1495 for (i = 1; i < ctrl->queue_count; i++)
1496 nvme_tcp_stop_queue(ctrl, i);
1497}
1498
1499static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1500{
1501 int i, ret = 0;
1502
1503 for (i = 1; i < ctrl->queue_count; i++) {
1504 ret = nvme_tcp_start_queue(ctrl, i);
1505 if (ret)
1506 goto out_stop_queues;
1507 }
1508
1509 return 0;
1510
1511out_stop_queues:
1512 for (i--; i >= 1; i--)
1513 nvme_tcp_stop_queue(ctrl, i);
1514 return ret;
1515}
1516
1517static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1518{
1519 int ret;
1520
1521 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1522 if (ret)
1523 return ret;
1524
1525 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1526 if (ret)
1527 goto out_free_queue;
1528
1529 return 0;
1530
1531out_free_queue:
1532 nvme_tcp_free_queue(ctrl, 0);
1533 return ret;
1534}
1535
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001536static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001537{
1538 int i, ret;
1539
1540 for (i = 1; i < ctrl->queue_count; i++) {
1541 ret = nvme_tcp_alloc_queue(ctrl, i,
1542 ctrl->sqsize + 1);
1543 if (ret)
1544 goto out_free_queues;
1545 }
1546
1547 return 0;
1548
1549out_free_queues:
1550 for (i--; i >= 1; i--)
1551 nvme_tcp_free_queue(ctrl, i);
1552
1553 return ret;
1554}
1555
1556static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1557{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001558 unsigned int nr_io_queues;
1559
1560 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1561 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1562
1563 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001564}
1565
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001566static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001567{
1568 unsigned int nr_io_queues;
1569 int ret;
1570
1571 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1572 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1573 if (ret)
1574 return ret;
1575
1576 ctrl->queue_count = nr_io_queues + 1;
1577 if (ctrl->queue_count < 2)
1578 return 0;
1579
1580 dev_info(ctrl->device,
1581 "creating %d I/O queues.\n", nr_io_queues);
1582
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001583 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001584}
1585
1586static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1587{
1588 nvme_tcp_stop_io_queues(ctrl);
1589 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001590 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001591 blk_mq_free_tag_set(ctrl->tagset);
1592 }
1593 nvme_tcp_free_io_queues(ctrl);
1594}
1595
1596static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1597{
1598 int ret;
1599
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001600 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001601 if (ret)
1602 return ret;
1603
1604 if (new) {
1605 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1606 if (IS_ERR(ctrl->tagset)) {
1607 ret = PTR_ERR(ctrl->tagset);
1608 goto out_free_io_queues;
1609 }
1610
Sagi Grimberge85037a2018-12-31 23:58:30 -08001611 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1612 if (IS_ERR(ctrl->connect_q)) {
1613 ret = PTR_ERR(ctrl->connect_q);
1614 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001615 }
1616 } else {
1617 blk_mq_update_nr_hw_queues(ctrl->tagset,
1618 ctrl->queue_count - 1);
1619 }
1620
1621 ret = nvme_tcp_start_io_queues(ctrl);
1622 if (ret)
1623 goto out_cleanup_connect_q;
1624
1625 return 0;
1626
1627out_cleanup_connect_q:
Sagi Grimberge85037a2018-12-31 23:58:30 -08001628 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001629 blk_cleanup_queue(ctrl->connect_q);
1630out_free_tag_set:
1631 if (new)
1632 blk_mq_free_tag_set(ctrl->tagset);
1633out_free_io_queues:
1634 nvme_tcp_free_io_queues(ctrl);
1635 return ret;
1636}
1637
1638static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1639{
1640 nvme_tcp_stop_queue(ctrl, 0);
1641 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001642 blk_cleanup_queue(ctrl->admin_q);
1643 blk_mq_free_tag_set(ctrl->admin_tagset);
1644 }
1645 nvme_tcp_free_admin_queue(ctrl);
1646}
1647
1648static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1649{
1650 int error;
1651
1652 error = nvme_tcp_alloc_admin_queue(ctrl);
1653 if (error)
1654 return error;
1655
1656 if (new) {
1657 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1658 if (IS_ERR(ctrl->admin_tagset)) {
1659 error = PTR_ERR(ctrl->admin_tagset);
1660 goto out_free_queue;
1661 }
1662
1663 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1664 if (IS_ERR(ctrl->admin_q)) {
1665 error = PTR_ERR(ctrl->admin_q);
1666 goto out_free_tagset;
1667 }
1668 }
1669
1670 error = nvme_tcp_start_queue(ctrl, 0);
1671 if (error)
1672 goto out_cleanup_queue;
1673
1674 error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
1675 if (error) {
1676 dev_err(ctrl->device,
1677 "prop_get NVME_REG_CAP failed\n");
1678 goto out_stop_queue;
1679 }
1680
1681 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
1682
1683 error = nvme_enable_ctrl(ctrl, ctrl->cap);
1684 if (error)
1685 goto out_stop_queue;
1686
1687 error = nvme_init_identify(ctrl);
1688 if (error)
1689 goto out_stop_queue;
1690
1691 return 0;
1692
1693out_stop_queue:
1694 nvme_tcp_stop_queue(ctrl, 0);
1695out_cleanup_queue:
1696 if (new)
1697 blk_cleanup_queue(ctrl->admin_q);
1698out_free_tagset:
1699 if (new)
1700 blk_mq_free_tag_set(ctrl->admin_tagset);
1701out_free_queue:
1702 nvme_tcp_free_admin_queue(ctrl);
1703 return error;
1704}
1705
1706static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1707 bool remove)
1708{
1709 blk_mq_quiesce_queue(ctrl->admin_q);
1710 nvme_tcp_stop_queue(ctrl, 0);
Sagi Grimberg7a425892019-04-24 11:53:17 -07001711 if (ctrl->admin_tagset)
1712 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1713 nvme_cancel_request, ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001714 blk_mq_unquiesce_queue(ctrl->admin_q);
1715 nvme_tcp_destroy_admin_queue(ctrl, remove);
1716}
1717
1718static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1719 bool remove)
1720{
1721 if (ctrl->queue_count <= 1)
1722 return;
1723 nvme_stop_queues(ctrl);
1724 nvme_tcp_stop_io_queues(ctrl);
Sagi Grimberg7a425892019-04-24 11:53:17 -07001725 if (ctrl->tagset)
1726 blk_mq_tagset_busy_iter(ctrl->tagset,
1727 nvme_cancel_request, ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001728 if (remove)
1729 nvme_start_queues(ctrl);
1730 nvme_tcp_destroy_io_queues(ctrl, remove);
1731}
1732
1733static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1734{
1735 /* If we are resetting/deleting then do nothing */
1736 if (ctrl->state != NVME_CTRL_CONNECTING) {
1737 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1738 ctrl->state == NVME_CTRL_LIVE);
1739 return;
1740 }
1741
1742 if (nvmf_should_reconnect(ctrl)) {
1743 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1744 ctrl->opts->reconnect_delay);
1745 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1746 ctrl->opts->reconnect_delay * HZ);
1747 } else {
1748 dev_info(ctrl->device, "Removing controller...\n");
1749 nvme_delete_ctrl(ctrl);
1750 }
1751}
1752
1753static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1754{
1755 struct nvmf_ctrl_options *opts = ctrl->opts;
1756 int ret = -EINVAL;
1757
1758 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1759 if (ret)
1760 return ret;
1761
1762 if (ctrl->icdoff) {
1763 dev_err(ctrl->device, "icdoff is not supported!\n");
1764 goto destroy_admin;
1765 }
1766
1767 if (opts->queue_size > ctrl->sqsize + 1)
1768 dev_warn(ctrl->device,
1769 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1770 opts->queue_size, ctrl->sqsize + 1);
1771
1772 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1773 dev_warn(ctrl->device,
1774 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1775 ctrl->sqsize + 1, ctrl->maxcmd);
1776 ctrl->sqsize = ctrl->maxcmd - 1;
1777 }
1778
1779 if (ctrl->queue_count > 1) {
1780 ret = nvme_tcp_configure_io_queues(ctrl, new);
1781 if (ret)
1782 goto destroy_admin;
1783 }
1784
1785 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1786 /* state change failure is ok if we're in DELETING state */
1787 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1788 ret = -EINVAL;
1789 goto destroy_io;
1790 }
1791
1792 nvme_start_ctrl(ctrl);
1793 return 0;
1794
1795destroy_io:
1796 if (ctrl->queue_count > 1)
1797 nvme_tcp_destroy_io_queues(ctrl, new);
1798destroy_admin:
1799 nvme_tcp_stop_queue(ctrl, 0);
1800 nvme_tcp_destroy_admin_queue(ctrl, new);
1801 return ret;
1802}
1803
1804static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1805{
1806 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1807 struct nvme_tcp_ctrl, connect_work);
1808 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1809
1810 ++ctrl->nr_reconnects;
1811
1812 if (nvme_tcp_setup_ctrl(ctrl, false))
1813 goto requeue;
1814
Colin Ian King56a77d22018-12-14 11:42:43 +00001815 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001816 ctrl->nr_reconnects);
1817
1818 ctrl->nr_reconnects = 0;
1819
1820 return;
1821
1822requeue:
1823 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1824 ctrl->nr_reconnects);
1825 nvme_tcp_reconnect_or_remove(ctrl);
1826}
1827
1828static void nvme_tcp_error_recovery_work(struct work_struct *work)
1829{
1830 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1831 struct nvme_tcp_ctrl, err_work);
1832 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1833
1834 nvme_stop_keep_alive(ctrl);
1835 nvme_tcp_teardown_io_queues(ctrl, false);
1836 /* unquiesce to fail fast pending requests */
1837 nvme_start_queues(ctrl);
1838 nvme_tcp_teardown_admin_queue(ctrl, false);
1839
1840 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1841 /* state change failure is ok if we're in DELETING state */
1842 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1843 return;
1844 }
1845
1846 nvme_tcp_reconnect_or_remove(ctrl);
1847}
1848
1849static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1850{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08001851 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1852 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1853
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001854 nvme_tcp_teardown_io_queues(ctrl, shutdown);
1855 if (shutdown)
1856 nvme_shutdown_ctrl(ctrl);
1857 else
1858 nvme_disable_ctrl(ctrl, ctrl->cap);
1859 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1860}
1861
1862static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1863{
1864 nvme_tcp_teardown_ctrl(ctrl, true);
1865}
1866
1867static void nvme_reset_ctrl_work(struct work_struct *work)
1868{
1869 struct nvme_ctrl *ctrl =
1870 container_of(work, struct nvme_ctrl, reset_work);
1871
1872 nvme_stop_ctrl(ctrl);
1873 nvme_tcp_teardown_ctrl(ctrl, false);
1874
1875 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1876 /* state change failure is ok if we're in DELETING state */
1877 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1878 return;
1879 }
1880
1881 if (nvme_tcp_setup_ctrl(ctrl, false))
1882 goto out_fail;
1883
1884 return;
1885
1886out_fail:
1887 ++ctrl->nr_reconnects;
1888 nvme_tcp_reconnect_or_remove(ctrl);
1889}
1890
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001891static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1892{
1893 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1894
1895 if (list_empty(&ctrl->list))
1896 goto free_ctrl;
1897
1898 mutex_lock(&nvme_tcp_ctrl_mutex);
1899 list_del(&ctrl->list);
1900 mutex_unlock(&nvme_tcp_ctrl_mutex);
1901
1902 nvmf_free_options(nctrl->opts);
1903free_ctrl:
1904 kfree(ctrl->queues);
1905 kfree(ctrl);
1906}
1907
1908static void nvme_tcp_set_sg_null(struct nvme_command *c)
1909{
1910 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1911
1912 sg->addr = 0;
1913 sg->length = 0;
1914 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1915 NVME_SGL_FMT_TRANSPORT_A;
1916}
1917
1918static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1919 struct nvme_command *c, u32 data_len)
1920{
1921 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1922
1923 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1924 sg->length = cpu_to_le32(data_len);
1925 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1926}
1927
1928static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1929 u32 data_len)
1930{
1931 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1932
1933 sg->addr = 0;
1934 sg->length = cpu_to_le32(data_len);
1935 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1936 NVME_SGL_FMT_TRANSPORT_A;
1937}
1938
1939static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1940{
1941 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1942 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1943 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1944 struct nvme_command *cmd = &pdu->cmd;
1945 u8 hdgst = nvme_tcp_hdgst_len(queue);
1946
1947 memset(pdu, 0, sizeof(*pdu));
1948 pdu->hdr.type = nvme_tcp_cmd;
1949 if (queue->hdr_digest)
1950 pdu->hdr.flags |= NVME_TCP_F_HDGST;
1951 pdu->hdr.hlen = sizeof(*pdu);
1952 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1953
1954 cmd->common.opcode = nvme_admin_async_event;
1955 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1956 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1957 nvme_tcp_set_sg_null(cmd);
1958
1959 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
1960 ctrl->async_req.offset = 0;
1961 ctrl->async_req.curr_bio = NULL;
1962 ctrl->async_req.data_len = 0;
1963
1964 nvme_tcp_queue_request(&ctrl->async_req);
1965}
1966
1967static enum blk_eh_timer_return
1968nvme_tcp_timeout(struct request *rq, bool reserved)
1969{
1970 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
1971 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
1972 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1973
Sagi Grimberg39d57752019-01-08 01:01:30 -08001974 dev_warn(ctrl->ctrl.device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001975 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08001976 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001977
1978 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08001979 /*
1980 * Teardown immediately if controller times out while starting
1981 * or we are already started error recovery. all outstanding
1982 * requests are completed on shutdown, so we return BLK_EH_DONE.
1983 */
1984 flush_work(&ctrl->err_work);
1985 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
1986 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001987 return BLK_EH_DONE;
1988 }
1989
Sagi Grimberg39d57752019-01-08 01:01:30 -08001990 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001991 nvme_tcp_error_recovery(&ctrl->ctrl);
1992
1993 return BLK_EH_RESET_TIMER;
1994}
1995
1996static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
1997 struct request *rq)
1998{
1999 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2000 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2001 struct nvme_command *c = &pdu->cmd;
2002
2003 c->common.flags |= NVME_CMD_SGL_METABUF;
2004
2005 if (rq_data_dir(rq) == WRITE && req->data_len &&
2006 req->data_len <= nvme_tcp_inline_data_size(queue))
2007 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2008 else
2009 nvme_tcp_set_sg_host_data(c, req->data_len);
2010
2011 return 0;
2012}
2013
2014static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2015 struct request *rq)
2016{
2017 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2018 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2019 struct nvme_tcp_queue *queue = req->queue;
2020 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2021 blk_status_t ret;
2022
2023 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2024 if (ret)
2025 return ret;
2026
2027 req->state = NVME_TCP_SEND_CMD_PDU;
2028 req->offset = 0;
2029 req->data_sent = 0;
2030 req->pdu_len = 0;
2031 req->pdu_sent = 0;
2032 req->data_len = blk_rq_payload_bytes(rq);
2033 req->curr_bio = rq->bio;
2034
2035 if (rq_data_dir(rq) == WRITE &&
2036 req->data_len <= nvme_tcp_inline_data_size(queue))
2037 req->pdu_len = req->data_len;
2038 else if (req->curr_bio)
2039 nvme_tcp_init_iter(req, READ);
2040
2041 pdu->hdr.type = nvme_tcp_cmd;
2042 pdu->hdr.flags = 0;
2043 if (queue->hdr_digest)
2044 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2045 if (queue->data_digest && req->pdu_len) {
2046 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2047 ddgst = nvme_tcp_ddgst_len(queue);
2048 }
2049 pdu->hdr.hlen = sizeof(*pdu);
2050 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2051 pdu->hdr.plen =
2052 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2053
2054 ret = nvme_tcp_map_data(queue, rq);
2055 if (unlikely(ret)) {
2056 dev_err(queue->ctrl->ctrl.device,
2057 "Failed to map data (%d)\n", ret);
2058 return ret;
2059 }
2060
2061 return 0;
2062}
2063
2064static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2065 const struct blk_mq_queue_data *bd)
2066{
2067 struct nvme_ns *ns = hctx->queue->queuedata;
2068 struct nvme_tcp_queue *queue = hctx->driver_data;
2069 struct request *rq = bd->rq;
2070 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2071 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2072 blk_status_t ret;
2073
2074 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2075 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2076
2077 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2078 if (unlikely(ret))
2079 return ret;
2080
2081 blk_mq_start_request(rq);
2082
2083 nvme_tcp_queue_request(req);
2084
2085 return BLK_STS_OK;
2086}
2087
Sagi Grimberg873946f2018-12-11 23:38:57 -08002088static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2089{
2090 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2091
2092 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2093 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
2094 if (ctrl->ctrl.opts->nr_write_queues) {
2095 /* separate read/write queues */
2096 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2097 ctrl->ctrl.opts->nr_write_queues;
2098 set->map[HCTX_TYPE_READ].queue_offset =
2099 ctrl->ctrl.opts->nr_write_queues;
2100 } else {
2101 /* mixed read/write queues */
2102 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2103 ctrl->ctrl.opts->nr_io_queues;
2104 set->map[HCTX_TYPE_READ].queue_offset = 0;
2105 }
2106 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2107 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2108 return 0;
2109}
2110
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002111static struct blk_mq_ops nvme_tcp_mq_ops = {
2112 .queue_rq = nvme_tcp_queue_rq,
2113 .complete = nvme_complete_rq,
2114 .init_request = nvme_tcp_init_request,
2115 .exit_request = nvme_tcp_exit_request,
2116 .init_hctx = nvme_tcp_init_hctx,
2117 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002118 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002119};
2120
2121static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2122 .queue_rq = nvme_tcp_queue_rq,
2123 .complete = nvme_complete_rq,
2124 .init_request = nvme_tcp_init_request,
2125 .exit_request = nvme_tcp_exit_request,
2126 .init_hctx = nvme_tcp_init_admin_hctx,
2127 .timeout = nvme_tcp_timeout,
2128};
2129
2130static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2131 .name = "tcp",
2132 .module = THIS_MODULE,
2133 .flags = NVME_F_FABRICS,
2134 .reg_read32 = nvmf_reg_read32,
2135 .reg_read64 = nvmf_reg_read64,
2136 .reg_write32 = nvmf_reg_write32,
2137 .free_ctrl = nvme_tcp_free_ctrl,
2138 .submit_async_event = nvme_tcp_submit_async_event,
2139 .delete_ctrl = nvme_tcp_delete_ctrl,
2140 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002141};
2142
2143static bool
2144nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2145{
2146 struct nvme_tcp_ctrl *ctrl;
2147 bool found = false;
2148
2149 mutex_lock(&nvme_tcp_ctrl_mutex);
2150 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2151 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2152 if (found)
2153 break;
2154 }
2155 mutex_unlock(&nvme_tcp_ctrl_mutex);
2156
2157 return found;
2158}
2159
2160static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2161 struct nvmf_ctrl_options *opts)
2162{
2163 struct nvme_tcp_ctrl *ctrl;
2164 int ret;
2165
2166 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2167 if (!ctrl)
2168 return ERR_PTR(-ENOMEM);
2169
2170 INIT_LIST_HEAD(&ctrl->list);
2171 ctrl->ctrl.opts = opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002172 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002173 ctrl->ctrl.sqsize = opts->queue_size - 1;
2174 ctrl->ctrl.kato = opts->kato;
2175
2176 INIT_DELAYED_WORK(&ctrl->connect_work,
2177 nvme_tcp_reconnect_ctrl_work);
2178 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2179 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2180
2181 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2182 opts->trsvcid =
2183 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2184 if (!opts->trsvcid) {
2185 ret = -ENOMEM;
2186 goto out_free_ctrl;
2187 }
2188 opts->mask |= NVMF_OPT_TRSVCID;
2189 }
2190
2191 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2192 opts->traddr, opts->trsvcid, &ctrl->addr);
2193 if (ret) {
2194 pr_err("malformed address passed: %s:%s\n",
2195 opts->traddr, opts->trsvcid);
2196 goto out_free_ctrl;
2197 }
2198
2199 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2200 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2201 opts->host_traddr, NULL, &ctrl->src_addr);
2202 if (ret) {
2203 pr_err("malformed src address passed: %s\n",
2204 opts->host_traddr);
2205 goto out_free_ctrl;
2206 }
2207 }
2208
2209 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2210 ret = -EALREADY;
2211 goto out_free_ctrl;
2212 }
2213
Sagi Grimberg873946f2018-12-11 23:38:57 -08002214 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002215 GFP_KERNEL);
2216 if (!ctrl->queues) {
2217 ret = -ENOMEM;
2218 goto out_free_ctrl;
2219 }
2220
2221 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2222 if (ret)
2223 goto out_kfree_queues;
2224
2225 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2226 WARN_ON_ONCE(1);
2227 ret = -EINTR;
2228 goto out_uninit_ctrl;
2229 }
2230
2231 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2232 if (ret)
2233 goto out_uninit_ctrl;
2234
2235 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2236 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2237
2238 nvme_get_ctrl(&ctrl->ctrl);
2239
2240 mutex_lock(&nvme_tcp_ctrl_mutex);
2241 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2242 mutex_unlock(&nvme_tcp_ctrl_mutex);
2243
2244 return &ctrl->ctrl;
2245
2246out_uninit_ctrl:
2247 nvme_uninit_ctrl(&ctrl->ctrl);
2248 nvme_put_ctrl(&ctrl->ctrl);
2249 if (ret > 0)
2250 ret = -EIO;
2251 return ERR_PTR(ret);
2252out_kfree_queues:
2253 kfree(ctrl->queues);
2254out_free_ctrl:
2255 kfree(ctrl);
2256 return ERR_PTR(ret);
2257}
2258
2259static struct nvmf_transport_ops nvme_tcp_transport = {
2260 .name = "tcp",
2261 .module = THIS_MODULE,
2262 .required_opts = NVMF_OPT_TRADDR,
2263 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2264 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002265 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2266 NVMF_OPT_NR_WRITE_QUEUES,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002267 .create_ctrl = nvme_tcp_create_ctrl,
2268};
2269
2270static int __init nvme_tcp_init_module(void)
2271{
2272 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2273 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2274 if (!nvme_tcp_wq)
2275 return -ENOMEM;
2276
2277 nvmf_register_transport(&nvme_tcp_transport);
2278 return 0;
2279}
2280
2281static void __exit nvme_tcp_cleanup_module(void)
2282{
2283 struct nvme_tcp_ctrl *ctrl;
2284
2285 nvmf_unregister_transport(&nvme_tcp_transport);
2286
2287 mutex_lock(&nvme_tcp_ctrl_mutex);
2288 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2289 nvme_delete_ctrl(&ctrl->ctrl);
2290 mutex_unlock(&nvme_tcp_ctrl_mutex);
2291 flush_workqueue(nvme_delete_wq);
2292
2293 destroy_workqueue(nvme_tcp_wq);
2294}
2295
2296module_init(nvme_tcp_init_module);
2297module_exit(nvme_tcp_cleanup_module);
2298
2299MODULE_LICENSE("GPL v2");