blob: 7c7c1886642f8cb81da5b568289cd7206b7afa9d [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010049 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080050
51 struct bio *curr_bio;
52 struct iov_iter iter;
53
54 /* send state */
55 size_t offset;
56 size_t data_sent;
57 enum nvme_tcp_send_state state;
58};
59
60enum nvme_tcp_queue_flags {
61 NVME_TCP_Q_ALLOCATED = 0,
62 NVME_TCP_Q_LIVE = 1,
Sagi Grimberg72e5d752020-05-01 14:25:44 -070063 NVME_TCP_Q_POLLING = 2,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080064};
65
66enum nvme_tcp_recv_state {
67 NVME_TCP_RECV_PDU = 0,
68 NVME_TCP_RECV_DATA,
69 NVME_TCP_RECV_DDGST,
70};
71
72struct nvme_tcp_ctrl;
73struct nvme_tcp_queue {
74 struct socket *sock;
75 struct work_struct io_work;
76 int io_cpu;
77
78 spinlock_t lock;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -070079 struct mutex send_mutex;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080080 struct list_head send_list;
81
82 /* recv state */
83 void *pdu;
84 int pdu_remaining;
85 int pdu_offset;
86 size_t data_remaining;
87 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070088 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080089
90 /* send state */
91 struct nvme_tcp_request *request;
92
93 int queue_size;
94 size_t cmnd_capsule_len;
95 struct nvme_tcp_ctrl *ctrl;
96 unsigned long flags;
97 bool rd_enabled;
98
99 bool hdr_digest;
100 bool data_digest;
101 struct ahash_request *rcv_hash;
102 struct ahash_request *snd_hash;
103 __le32 exp_ddgst;
104 __le32 recv_ddgst;
105
106 struct page_frag_cache pf_cache;
107
108 void (*state_change)(struct sock *);
109 void (*data_ready)(struct sock *);
110 void (*write_space)(struct sock *);
111};
112
113struct nvme_tcp_ctrl {
114 /* read only in the hot path */
115 struct nvme_tcp_queue *queues;
116 struct blk_mq_tag_set tag_set;
117
118 /* other member variables */
119 struct list_head list;
120 struct blk_mq_tag_set admin_tag_set;
121 struct sockaddr_storage addr;
122 struct sockaddr_storage src_addr;
123 struct nvme_ctrl ctrl;
124
125 struct work_struct err_work;
126 struct delayed_work connect_work;
127 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700128 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800129};
130
131static LIST_HEAD(nvme_tcp_ctrl_list);
132static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
133static struct workqueue_struct *nvme_tcp_wq;
134static struct blk_mq_ops nvme_tcp_mq_ops;
135static struct blk_mq_ops nvme_tcp_admin_mq_ops;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700136static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800137
138static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
139{
140 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
141}
142
143static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
144{
145 return queue - queue->ctrl->queues;
146}
147
148static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
149{
150 u32 queue_idx = nvme_tcp_queue_id(queue);
151
152 if (queue_idx == 0)
153 return queue->ctrl->admin_tag_set.tags[queue_idx];
154 return queue->ctrl->tag_set.tags[queue_idx - 1];
155}
156
157static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
158{
159 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
160}
161
162static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
163{
164 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
165}
166
167static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
168{
169 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
170}
171
172static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
173{
174 return req == &req->queue->ctrl->async_req;
175}
176
177static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
178{
179 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800180
181 if (unlikely(nvme_tcp_async_req(req)))
182 return false; /* async events don't have a request */
183
184 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800185
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700186 return rq_data_dir(rq) == WRITE && req->data_len &&
187 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800188}
189
190static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
191{
192 return req->iter.bvec->bv_page;
193}
194
195static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
196{
197 return req->iter.bvec->bv_offset + req->iter.iov_offset;
198}
199
200static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
201{
202 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
203 req->pdu_len - req->pdu_sent);
204}
205
206static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
207{
208 return req->iter.iov_offset;
209}
210
211static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
212{
213 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
214 req->pdu_len - req->pdu_sent : 0;
215}
216
217static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
218 int len)
219{
220 return nvme_tcp_pdu_data_left(req) <= len;
221}
222
223static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
224 unsigned int dir)
225{
226 struct request *rq = blk_mq_rq_from_pdu(req);
227 struct bio_vec *vec;
228 unsigned int size;
229 int nsegs;
230 size_t offset;
231
232 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
233 vec = &rq->special_vec;
234 nsegs = 1;
235 size = blk_rq_payload_bytes(rq);
236 offset = 0;
237 } else {
238 struct bio *bio = req->curr_bio;
239
240 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
241 nsegs = bio_segments(bio);
242 size = bio->bi_iter.bi_size;
243 offset = bio->bi_iter.bi_bvec_done;
244 }
245
246 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
247 req->iter.iov_offset = offset;
248}
249
250static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
251 int len)
252{
253 req->data_sent += len;
254 req->pdu_sent += len;
255 iov_iter_advance(&req->iter, len);
256 if (!iov_iter_count(&req->iter) &&
257 req->data_sent < req->data_len) {
258 req->curr_bio = req->curr_bio->bi_next;
259 nvme_tcp_init_iter(req, WRITE);
260 }
261}
262
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700263static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
264 bool sync)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800265{
266 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700267 bool empty;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800268
269 spin_lock(&queue->lock);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700270 empty = list_empty(&queue->send_list) && !queue->request;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800271 list_add_tail(&req->entry, &queue->send_list);
272 spin_unlock(&queue->lock);
273
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700274 /*
275 * if we're the first on the send_list and we can try to send
276 * directly, otherwise queue io_work. Also, only do that if we
277 * are on the same cpu, so we don't introduce contention.
278 */
279 if (queue->io_cpu == smp_processor_id() &&
280 sync && empty && mutex_trylock(&queue->send_mutex)) {
281 nvme_tcp_try_send(queue);
282 mutex_unlock(&queue->send_mutex);
283 } else {
284 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
285 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800286}
287
288static inline struct nvme_tcp_request *
289nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
290{
291 struct nvme_tcp_request *req;
292
293 spin_lock(&queue->lock);
294 req = list_first_entry_or_null(&queue->send_list,
295 struct nvme_tcp_request, entry);
296 if (req)
297 list_del(&req->entry);
298 spin_unlock(&queue->lock);
299
300 return req;
301}
302
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100303static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
304 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800305{
306 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
307 crypto_ahash_final(hash);
308}
309
310static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
311 struct page *page, off_t off, size_t len)
312{
313 struct scatterlist sg;
314
315 sg_init_marker(&sg, 1);
316 sg_set_page(&sg, page, len, off);
317 ahash_request_set_crypt(hash, &sg, NULL, len);
318 crypto_ahash_update(hash);
319}
320
321static inline void nvme_tcp_hdgst(struct ahash_request *hash,
322 void *pdu, size_t len)
323{
324 struct scatterlist sg;
325
326 sg_init_one(&sg, pdu, len);
327 ahash_request_set_crypt(hash, &sg, pdu + len, len);
328 crypto_ahash_digest(hash);
329}
330
331static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
332 void *pdu, size_t pdu_len)
333{
334 struct nvme_tcp_hdr *hdr = pdu;
335 __le32 recv_digest;
336 __le32 exp_digest;
337
338 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
339 dev_err(queue->ctrl->ctrl.device,
340 "queue %d: header digest flag is cleared\n",
341 nvme_tcp_queue_id(queue));
342 return -EPROTO;
343 }
344
345 recv_digest = *(__le32 *)(pdu + hdr->hlen);
346 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
347 exp_digest = *(__le32 *)(pdu + hdr->hlen);
348 if (recv_digest != exp_digest) {
349 dev_err(queue->ctrl->ctrl.device,
350 "header digest error: recv %#x expected %#x\n",
351 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
352 return -EIO;
353 }
354
355 return 0;
356}
357
358static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
359{
360 struct nvme_tcp_hdr *hdr = pdu;
361 u8 digest_len = nvme_tcp_hdgst_len(queue);
362 u32 len;
363
364 len = le32_to_cpu(hdr->plen) - hdr->hlen -
365 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
366
367 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
368 dev_err(queue->ctrl->ctrl.device,
369 "queue %d: data digest flag is cleared\n",
370 nvme_tcp_queue_id(queue));
371 return -EPROTO;
372 }
373 crypto_ahash_init(queue->rcv_hash);
374
375 return 0;
376}
377
378static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
379 struct request *rq, unsigned int hctx_idx)
380{
381 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
382
383 page_frag_free(req->pdu);
384}
385
386static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
387 struct request *rq, unsigned int hctx_idx,
388 unsigned int numa_node)
389{
390 struct nvme_tcp_ctrl *ctrl = set->driver_data;
391 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
392 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
393 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
394 u8 hdgst = nvme_tcp_hdgst_len(queue);
395
396 req->pdu = page_frag_alloc(&queue->pf_cache,
397 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
398 GFP_KERNEL | __GFP_ZERO);
399 if (!req->pdu)
400 return -ENOMEM;
401
402 req->queue = queue;
403 nvme_req(rq)->ctrl = &ctrl->ctrl;
404
405 return 0;
406}
407
408static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
409 unsigned int hctx_idx)
410{
411 struct nvme_tcp_ctrl *ctrl = data;
412 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
413
414 hctx->driver_data = queue;
415 return 0;
416}
417
418static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
419 unsigned int hctx_idx)
420{
421 struct nvme_tcp_ctrl *ctrl = data;
422 struct nvme_tcp_queue *queue = &ctrl->queues[0];
423
424 hctx->driver_data = queue;
425 return 0;
426}
427
428static enum nvme_tcp_recv_state
429nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
430{
431 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
432 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
433 NVME_TCP_RECV_DATA;
434}
435
436static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
437{
438 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
439 nvme_tcp_hdgst_len(queue);
440 queue->pdu_offset = 0;
441 queue->data_remaining = -1;
442 queue->ddgst_remaining = 0;
443}
444
445static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
446{
447 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
448 return;
449
Nigel Kirkland97b25122020-02-10 16:01:45 -0800450 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800451}
452
453static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
454 struct nvme_completion *cqe)
455{
456 struct request *rq;
457
458 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
459 if (!rq) {
460 dev_err(queue->ctrl->ctrl.device,
461 "queue %d tag 0x%x not found\n",
462 nvme_tcp_queue_id(queue), cqe->command_id);
463 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
464 return -EINVAL;
465 }
466
467 nvme_end_request(rq, cqe->status, cqe->result);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700468 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800469
470 return 0;
471}
472
473static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
474 struct nvme_tcp_data_pdu *pdu)
475{
476 struct request *rq;
477
478 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
479 if (!rq) {
480 dev_err(queue->ctrl->ctrl.device,
481 "queue %d tag %#x not found\n",
482 nvme_tcp_queue_id(queue), pdu->command_id);
483 return -ENOENT;
484 }
485
486 if (!blk_rq_payload_bytes(rq)) {
487 dev_err(queue->ctrl->ctrl.device,
488 "queue %d tag %#x unexpected data\n",
489 nvme_tcp_queue_id(queue), rq->tag);
490 return -EIO;
491 }
492
493 queue->data_remaining = le32_to_cpu(pdu->data_length);
494
Sagi Grimberg602d6742019-03-13 18:55:10 +0100495 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
496 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
497 dev_err(queue->ctrl->ctrl.device,
498 "queue %d tag %#x SUCCESS set but not last PDU\n",
499 nvme_tcp_queue_id(queue), rq->tag);
500 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
501 return -EPROTO;
502 }
503
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800504 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800505}
506
507static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
508 struct nvme_tcp_rsp_pdu *pdu)
509{
510 struct nvme_completion *cqe = &pdu->cqe;
511 int ret = 0;
512
513 /*
514 * AEN requests are special as they don't time out and can
515 * survive any kind of queue freeze and often don't respond to
516 * aborts. We don't even bother to allocate a struct request
517 * for them but rather special case them here.
518 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300519 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
520 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800521 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
522 &cqe->result);
523 else
524 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
525
526 return ret;
527}
528
529static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
530 struct nvme_tcp_r2t_pdu *pdu)
531{
532 struct nvme_tcp_data_pdu *data = req->pdu;
533 struct nvme_tcp_queue *queue = req->queue;
534 struct request *rq = blk_mq_rq_from_pdu(req);
535 u8 hdgst = nvme_tcp_hdgst_len(queue);
536 u8 ddgst = nvme_tcp_ddgst_len(queue);
537
538 req->pdu_len = le32_to_cpu(pdu->r2t_length);
539 req->pdu_sent = 0;
540
541 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
542 dev_err(queue->ctrl->ctrl.device,
543 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
544 rq->tag, req->pdu_len, req->data_len,
545 req->data_sent);
546 return -EPROTO;
547 }
548
549 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
550 dev_err(queue->ctrl->ctrl.device,
551 "req %d unexpected r2t offset %u (expected %zu)\n",
552 rq->tag, le32_to_cpu(pdu->r2t_offset),
553 req->data_sent);
554 return -EPROTO;
555 }
556
557 memset(data, 0, sizeof(*data));
558 data->hdr.type = nvme_tcp_h2c_data;
559 data->hdr.flags = NVME_TCP_F_DATA_LAST;
560 if (queue->hdr_digest)
561 data->hdr.flags |= NVME_TCP_F_HDGST;
562 if (queue->data_digest)
563 data->hdr.flags |= NVME_TCP_F_DDGST;
564 data->hdr.hlen = sizeof(*data);
565 data->hdr.pdo = data->hdr.hlen + hdgst;
566 data->hdr.plen =
567 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
568 data->ttag = pdu->ttag;
569 data->command_id = rq->tag;
570 data->data_offset = cpu_to_le32(req->data_sent);
571 data->data_length = cpu_to_le32(req->pdu_len);
572 return 0;
573}
574
575static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
576 struct nvme_tcp_r2t_pdu *pdu)
577{
578 struct nvme_tcp_request *req;
579 struct request *rq;
580 int ret;
581
582 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
583 if (!rq) {
584 dev_err(queue->ctrl->ctrl.device,
585 "queue %d tag %#x not found\n",
586 nvme_tcp_queue_id(queue), pdu->command_id);
587 return -ENOENT;
588 }
589 req = blk_mq_rq_to_pdu(rq);
590
591 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
592 if (unlikely(ret))
593 return ret;
594
595 req->state = NVME_TCP_SEND_H2C_PDU;
596 req->offset = 0;
597
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700598 nvme_tcp_queue_request(req, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800599
600 return 0;
601}
602
603static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
604 unsigned int *offset, size_t *len)
605{
606 struct nvme_tcp_hdr *hdr;
607 char *pdu = queue->pdu;
608 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
609 int ret;
610
611 ret = skb_copy_bits(skb, *offset,
612 &pdu[queue->pdu_offset], rcv_len);
613 if (unlikely(ret))
614 return ret;
615
616 queue->pdu_remaining -= rcv_len;
617 queue->pdu_offset += rcv_len;
618 *offset += rcv_len;
619 *len -= rcv_len;
620 if (queue->pdu_remaining)
621 return 0;
622
623 hdr = queue->pdu;
624 if (queue->hdr_digest) {
625 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
626 if (unlikely(ret))
627 return ret;
628 }
629
630
631 if (queue->data_digest) {
632 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
633 if (unlikely(ret))
634 return ret;
635 }
636
637 switch (hdr->type) {
638 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700639 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800640 case nvme_tcp_rsp:
641 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700642 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800643 case nvme_tcp_r2t:
644 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700645 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800646 default:
647 dev_err(queue->ctrl->ctrl.device,
648 "unsupported pdu type (%d)\n", hdr->type);
649 return -EINVAL;
650 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800651}
652
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100653static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100654{
655 union nvme_result res = {};
656
657 nvme_end_request(rq, cpu_to_le16(status << 1), res);
658}
659
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800660static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
661 unsigned int *offset, size_t *len)
662{
663 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
664 struct nvme_tcp_request *req;
665 struct request *rq;
666
667 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
668 if (!rq) {
669 dev_err(queue->ctrl->ctrl.device,
670 "queue %d tag %#x not found\n",
671 nvme_tcp_queue_id(queue), pdu->command_id);
672 return -ENOENT;
673 }
674 req = blk_mq_rq_to_pdu(rq);
675
676 while (true) {
677 int recv_len, ret;
678
679 recv_len = min_t(size_t, *len, queue->data_remaining);
680 if (!recv_len)
681 break;
682
683 if (!iov_iter_count(&req->iter)) {
684 req->curr_bio = req->curr_bio->bi_next;
685
686 /*
687 * If we don`t have any bios it means that controller
688 * sent more data than we requested, hence error
689 */
690 if (!req->curr_bio) {
691 dev_err(queue->ctrl->ctrl.device,
692 "queue %d no space in request %#x",
693 nvme_tcp_queue_id(queue), rq->tag);
694 nvme_tcp_init_recv_ctx(queue);
695 return -EIO;
696 }
697 nvme_tcp_init_iter(req, READ);
698 }
699
700 /* we can read only from what is left in this bio */
701 recv_len = min_t(size_t, recv_len,
702 iov_iter_count(&req->iter));
703
704 if (queue->data_digest)
705 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
706 &req->iter, recv_len, queue->rcv_hash);
707 else
708 ret = skb_copy_datagram_iter(skb, *offset,
709 &req->iter, recv_len);
710 if (ret) {
711 dev_err(queue->ctrl->ctrl.device,
712 "queue %d failed to copy request %#x data",
713 nvme_tcp_queue_id(queue), rq->tag);
714 return ret;
715 }
716
717 *len -= recv_len;
718 *offset += recv_len;
719 queue->data_remaining -= recv_len;
720 }
721
722 if (!queue->data_remaining) {
723 if (queue->data_digest) {
724 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
725 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
726 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700727 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100728 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700729 queue->nr_cqe++;
730 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800731 nvme_tcp_init_recv_ctx(queue);
732 }
733 }
734
735 return 0;
736}
737
738static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
739 struct sk_buff *skb, unsigned int *offset, size_t *len)
740{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100741 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800742 char *ddgst = (char *)&queue->recv_ddgst;
743 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
744 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
745 int ret;
746
747 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
748 if (unlikely(ret))
749 return ret;
750
751 queue->ddgst_remaining -= recv_len;
752 *offset += recv_len;
753 *len -= recv_len;
754 if (queue->ddgst_remaining)
755 return 0;
756
757 if (queue->recv_ddgst != queue->exp_ddgst) {
758 dev_err(queue->ctrl->ctrl.device,
759 "data digest error: recv %#x expected %#x\n",
760 le32_to_cpu(queue->recv_ddgst),
761 le32_to_cpu(queue->exp_ddgst));
762 return -EIO;
763 }
764
Sagi Grimberg602d6742019-03-13 18:55:10 +0100765 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
766 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
767 pdu->command_id);
768
769 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700770 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100771 }
772
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800773 nvme_tcp_init_recv_ctx(queue);
774 return 0;
775}
776
777static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
778 unsigned int offset, size_t len)
779{
780 struct nvme_tcp_queue *queue = desc->arg.data;
781 size_t consumed = len;
782 int result;
783
784 while (len) {
785 switch (nvme_tcp_recv_state(queue)) {
786 case NVME_TCP_RECV_PDU:
787 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
788 break;
789 case NVME_TCP_RECV_DATA:
790 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
791 break;
792 case NVME_TCP_RECV_DDGST:
793 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
794 break;
795 default:
796 result = -EFAULT;
797 }
798 if (result) {
799 dev_err(queue->ctrl->ctrl.device,
800 "receive failed: %d\n", result);
801 queue->rd_enabled = false;
802 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
803 return result;
804 }
805 }
806
807 return consumed;
808}
809
810static void nvme_tcp_data_ready(struct sock *sk)
811{
812 struct nvme_tcp_queue *queue;
813
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700814 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800815 queue = sk->sk_user_data;
Sagi Grimberg72e5d752020-05-01 14:25:44 -0700816 if (likely(queue && queue->rd_enabled) &&
817 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800818 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700819 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800820}
821
822static void nvme_tcp_write_space(struct sock *sk)
823{
824 struct nvme_tcp_queue *queue;
825
826 read_lock_bh(&sk->sk_callback_lock);
827 queue = sk->sk_user_data;
828 if (likely(queue && sk_stream_is_writeable(sk))) {
829 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
830 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
831 }
832 read_unlock_bh(&sk->sk_callback_lock);
833}
834
835static void nvme_tcp_state_change(struct sock *sk)
836{
837 struct nvme_tcp_queue *queue;
838
839 read_lock(&sk->sk_callback_lock);
840 queue = sk->sk_user_data;
841 if (!queue)
842 goto done;
843
844 switch (sk->sk_state) {
845 case TCP_CLOSE:
846 case TCP_CLOSE_WAIT:
847 case TCP_LAST_ACK:
848 case TCP_FIN_WAIT1:
849 case TCP_FIN_WAIT2:
850 /* fallthrough */
851 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
852 break;
853 default:
854 dev_info(queue->ctrl->ctrl.device,
855 "queue %d socket state %d\n",
856 nvme_tcp_queue_id(queue), sk->sk_state);
857 }
858
859 queue->state_change(sk);
860done:
861 read_unlock(&sk->sk_callback_lock);
862}
863
864static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
865{
866 queue->request = NULL;
867}
868
869static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
870{
Sagi Grimberg16686012019-08-02 18:17:52 -0700871 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800872}
873
874static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
875{
876 struct nvme_tcp_queue *queue = req->queue;
877
878 while (true) {
879 struct page *page = nvme_tcp_req_cur_page(req);
880 size_t offset = nvme_tcp_req_cur_offset(req);
881 size_t len = nvme_tcp_req_cur_length(req);
882 bool last = nvme_tcp_pdu_last_send(req, len);
883 int ret, flags = MSG_DONTWAIT;
884
885 if (last && !queue->data_digest)
886 flags |= MSG_EOR;
887 else
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700888 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800889
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200890 /* can't zcopy slab pages */
891 if (unlikely(PageSlab(page))) {
892 ret = sock_no_sendpage(queue->sock, page, offset, len,
893 flags);
894 } else {
895 ret = kernel_sendpage(queue->sock, page, offset, len,
896 flags);
897 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800898 if (ret <= 0)
899 return ret;
900
901 nvme_tcp_advance_req(req, ret);
902 if (queue->data_digest)
903 nvme_tcp_ddgst_update(queue->snd_hash, page,
904 offset, ret);
905
906 /* fully successful last write*/
907 if (last && ret == len) {
908 if (queue->data_digest) {
909 nvme_tcp_ddgst_final(queue->snd_hash,
910 &req->ddgst);
911 req->state = NVME_TCP_SEND_DDGST;
912 req->offset = 0;
913 } else {
914 nvme_tcp_done_send_req(queue);
915 }
916 return 1;
917 }
918 }
919 return -EAGAIN;
920}
921
922static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
923{
924 struct nvme_tcp_queue *queue = req->queue;
925 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
926 bool inline_data = nvme_tcp_has_inline_data(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800927 u8 hdgst = nvme_tcp_hdgst_len(queue);
928 int len = sizeof(*pdu) + hdgst - req->offset;
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700929 int flags = MSG_DONTWAIT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800930 int ret;
931
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700932 if (inline_data)
933 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
934 else
935 flags |= MSG_EOR;
936
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800937 if (queue->hdr_digest && !req->offset)
938 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
939
940 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
941 offset_in_page(pdu) + req->offset, len, flags);
942 if (unlikely(ret <= 0))
943 return ret;
944
945 len -= ret;
946 if (!len) {
947 if (inline_data) {
948 req->state = NVME_TCP_SEND_DATA;
949 if (queue->data_digest)
950 crypto_ahash_init(queue->snd_hash);
951 nvme_tcp_init_iter(req, WRITE);
952 } else {
953 nvme_tcp_done_send_req(queue);
954 }
955 return 1;
956 }
957 req->offset += ret;
958
959 return -EAGAIN;
960}
961
962static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
963{
964 struct nvme_tcp_queue *queue = req->queue;
965 struct nvme_tcp_data_pdu *pdu = req->pdu;
966 u8 hdgst = nvme_tcp_hdgst_len(queue);
967 int len = sizeof(*pdu) - req->offset + hdgst;
968 int ret;
969
970 if (queue->hdr_digest && !req->offset)
971 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
972
973 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
974 offset_in_page(pdu) + req->offset, len,
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700975 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800976 if (unlikely(ret <= 0))
977 return ret;
978
979 len -= ret;
980 if (!len) {
981 req->state = NVME_TCP_SEND_DATA;
982 if (queue->data_digest)
983 crypto_ahash_init(queue->snd_hash);
984 if (!req->data_sent)
985 nvme_tcp_init_iter(req, WRITE);
986 return 1;
987 }
988 req->offset += ret;
989
990 return -EAGAIN;
991}
992
993static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
994{
995 struct nvme_tcp_queue *queue = req->queue;
996 int ret;
997 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
998 struct kvec iov = {
999 .iov_base = &req->ddgst + req->offset,
1000 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1001 };
1002
1003 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1004 if (unlikely(ret <= 0))
1005 return ret;
1006
1007 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1008 nvme_tcp_done_send_req(queue);
1009 return 1;
1010 }
1011
1012 req->offset += ret;
1013 return -EAGAIN;
1014}
1015
1016static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1017{
1018 struct nvme_tcp_request *req;
1019 int ret = 1;
1020
1021 if (!queue->request) {
1022 queue->request = nvme_tcp_fetch_request(queue);
1023 if (!queue->request)
1024 return 0;
1025 }
1026 req = queue->request;
1027
1028 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1029 ret = nvme_tcp_try_send_cmd_pdu(req);
1030 if (ret <= 0)
1031 goto done;
1032 if (!nvme_tcp_has_inline_data(req))
1033 return ret;
1034 }
1035
1036 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1037 ret = nvme_tcp_try_send_data_pdu(req);
1038 if (ret <= 0)
1039 goto done;
1040 }
1041
1042 if (req->state == NVME_TCP_SEND_DATA) {
1043 ret = nvme_tcp_try_send_data(req);
1044 if (ret <= 0)
1045 goto done;
1046 }
1047
1048 if (req->state == NVME_TCP_SEND_DDGST)
1049 ret = nvme_tcp_try_send_ddgst(req);
1050done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001051 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001052 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001053 } else if (ret < 0) {
1054 dev_err(queue->ctrl->ctrl.device,
1055 "failed to send request %d\n", ret);
1056 if (ret != -EPIPE && ret != -ECONNRESET)
1057 nvme_tcp_fail_request(queue->request);
1058 nvme_tcp_done_send_req(queue);
1059 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001060 return ret;
1061}
1062
1063static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1064{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301065 struct socket *sock = queue->sock;
1066 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001067 read_descriptor_t rd_desc;
1068 int consumed;
1069
1070 rd_desc.arg.data = queue;
1071 rd_desc.count = 1;
1072 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001073 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301074 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001075 release_sock(sk);
1076 return consumed;
1077}
1078
1079static void nvme_tcp_io_work(struct work_struct *w)
1080{
1081 struct nvme_tcp_queue *queue =
1082 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001083 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001084
1085 do {
1086 bool pending = false;
1087 int result;
1088
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001089 if (mutex_trylock(&queue->send_mutex)) {
1090 result = nvme_tcp_try_send(queue);
1091 mutex_unlock(&queue->send_mutex);
1092 if (result > 0)
1093 pending = true;
1094 else if (unlikely(result < 0))
1095 break;
1096 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001097
1098 result = nvme_tcp_try_recv(queue);
1099 if (result > 0)
1100 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001101 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001102 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001103
1104 if (!pending)
1105 return;
1106
Wunderlich, Markddef2952019-09-18 23:36:37 +00001107 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001108
1109 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1110}
1111
1112static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1113{
1114 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1115
1116 ahash_request_free(queue->rcv_hash);
1117 ahash_request_free(queue->snd_hash);
1118 crypto_free_ahash(tfm);
1119}
1120
1121static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1122{
1123 struct crypto_ahash *tfm;
1124
1125 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1126 if (IS_ERR(tfm))
1127 return PTR_ERR(tfm);
1128
1129 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1130 if (!queue->snd_hash)
1131 goto free_tfm;
1132 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1133
1134 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1135 if (!queue->rcv_hash)
1136 goto free_snd_hash;
1137 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1138
1139 return 0;
1140free_snd_hash:
1141 ahash_request_free(queue->snd_hash);
1142free_tfm:
1143 crypto_free_ahash(tfm);
1144 return -ENOMEM;
1145}
1146
1147static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1148{
1149 struct nvme_tcp_request *async = &ctrl->async_req;
1150
1151 page_frag_free(async->pdu);
1152}
1153
1154static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1155{
1156 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1157 struct nvme_tcp_request *async = &ctrl->async_req;
1158 u8 hdgst = nvme_tcp_hdgst_len(queue);
1159
1160 async->pdu = page_frag_alloc(&queue->pf_cache,
1161 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1162 GFP_KERNEL | __GFP_ZERO);
1163 if (!async->pdu)
1164 return -ENOMEM;
1165
1166 async->queue = &ctrl->queues[0];
1167 return 0;
1168}
1169
1170static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1171{
1172 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1173 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1174
1175 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1176 return;
1177
1178 if (queue->hdr_digest || queue->data_digest)
1179 nvme_tcp_free_crypto(queue);
1180
1181 sock_release(queue->sock);
1182 kfree(queue->pdu);
1183}
1184
1185static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1186{
1187 struct nvme_tcp_icreq_pdu *icreq;
1188 struct nvme_tcp_icresp_pdu *icresp;
1189 struct msghdr msg = {};
1190 struct kvec iov;
1191 bool ctrl_hdgst, ctrl_ddgst;
1192 int ret;
1193
1194 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1195 if (!icreq)
1196 return -ENOMEM;
1197
1198 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1199 if (!icresp) {
1200 ret = -ENOMEM;
1201 goto free_icreq;
1202 }
1203
1204 icreq->hdr.type = nvme_tcp_icreq;
1205 icreq->hdr.hlen = sizeof(*icreq);
1206 icreq->hdr.pdo = 0;
1207 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1208 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1209 icreq->maxr2t = 0; /* single inflight r2t supported */
1210 icreq->hpda = 0; /* no alignment constraint */
1211 if (queue->hdr_digest)
1212 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1213 if (queue->data_digest)
1214 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1215
1216 iov.iov_base = icreq;
1217 iov.iov_len = sizeof(*icreq);
1218 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1219 if (ret < 0)
1220 goto free_icresp;
1221
1222 memset(&msg, 0, sizeof(msg));
1223 iov.iov_base = icresp;
1224 iov.iov_len = sizeof(*icresp);
1225 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1226 iov.iov_len, msg.msg_flags);
1227 if (ret < 0)
1228 goto free_icresp;
1229
1230 ret = -EINVAL;
1231 if (icresp->hdr.type != nvme_tcp_icresp) {
1232 pr_err("queue %d: bad type returned %d\n",
1233 nvme_tcp_queue_id(queue), icresp->hdr.type);
1234 goto free_icresp;
1235 }
1236
1237 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1238 pr_err("queue %d: bad pdu length returned %d\n",
1239 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1240 goto free_icresp;
1241 }
1242
1243 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1244 pr_err("queue %d: bad pfv returned %d\n",
1245 nvme_tcp_queue_id(queue), icresp->pfv);
1246 goto free_icresp;
1247 }
1248
1249 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1250 if ((queue->data_digest && !ctrl_ddgst) ||
1251 (!queue->data_digest && ctrl_ddgst)) {
1252 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1253 nvme_tcp_queue_id(queue),
1254 queue->data_digest ? "enabled" : "disabled",
1255 ctrl_ddgst ? "enabled" : "disabled");
1256 goto free_icresp;
1257 }
1258
1259 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1260 if ((queue->hdr_digest && !ctrl_hdgst) ||
1261 (!queue->hdr_digest && ctrl_hdgst)) {
1262 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1263 nvme_tcp_queue_id(queue),
1264 queue->hdr_digest ? "enabled" : "disabled",
1265 ctrl_hdgst ? "enabled" : "disabled");
1266 goto free_icresp;
1267 }
1268
1269 if (icresp->cpda != 0) {
1270 pr_err("queue %d: unsupported cpda returned %d\n",
1271 nvme_tcp_queue_id(queue), icresp->cpda);
1272 goto free_icresp;
1273 }
1274
1275 ret = 0;
1276free_icresp:
1277 kfree(icresp);
1278free_icreq:
1279 kfree(icreq);
1280 return ret;
1281}
1282
Sagi Grimberg40510a62020-02-25 15:53:09 -08001283static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1284{
1285 return nvme_tcp_queue_id(queue) == 0;
1286}
1287
1288static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1289{
1290 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1291 int qid = nvme_tcp_queue_id(queue);
1292
1293 return !nvme_tcp_admin_queue(queue) &&
1294 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1295}
1296
1297static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1298{
1299 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1300 int qid = nvme_tcp_queue_id(queue);
1301
1302 return !nvme_tcp_admin_queue(queue) &&
1303 !nvme_tcp_default_queue(queue) &&
1304 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1305 ctrl->io_queues[HCTX_TYPE_READ];
1306}
1307
1308static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1309{
1310 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1311 int qid = nvme_tcp_queue_id(queue);
1312
1313 return !nvme_tcp_admin_queue(queue) &&
1314 !nvme_tcp_default_queue(queue) &&
1315 !nvme_tcp_read_queue(queue) &&
1316 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1317 ctrl->io_queues[HCTX_TYPE_READ] +
1318 ctrl->io_queues[HCTX_TYPE_POLL];
1319}
1320
1321static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1322{
1323 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1324 int qid = nvme_tcp_queue_id(queue);
1325 int n = 0;
1326
1327 if (nvme_tcp_default_queue(queue))
1328 n = qid - 1;
1329 else if (nvme_tcp_read_queue(queue))
1330 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1331 else if (nvme_tcp_poll_queue(queue))
1332 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1333 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1334 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1335}
1336
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001337static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1338 int qid, size_t queue_size)
1339{
1340 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1341 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1342 struct linger sol = { .l_onoff = 1, .l_linger = 0 };
Sagi Grimberg40510a62020-02-25 15:53:09 -08001343 int ret, opt, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001344
1345 queue->ctrl = ctrl;
1346 INIT_LIST_HEAD(&queue->send_list);
1347 spin_lock_init(&queue->lock);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001348 mutex_init(&queue->send_mutex);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001349 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1350 queue->queue_size = queue_size;
1351
1352 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001353 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001354 else
1355 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1356 NVME_TCP_ADMIN_CCSZ;
1357
1358 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1359 IPPROTO_TCP, &queue->sock);
1360 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001361 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001362 "failed to create socket: %d\n", ret);
1363 return ret;
1364 }
1365
1366 /* Single syn retry */
1367 opt = 1;
1368 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1369 (char *)&opt, sizeof(opt));
1370 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001371 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001372 "failed to set TCP_SYNCNT sock opt %d\n", ret);
1373 goto err_sock;
1374 }
1375
1376 /* Set TCP no delay */
1377 opt = 1;
1378 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1379 TCP_NODELAY, (char *)&opt, sizeof(opt));
1380 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001381 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001382 "failed to set TCP_NODELAY sock opt %d\n", ret);
1383 goto err_sock;
1384 }
1385
1386 /*
1387 * Cleanup whatever is sitting in the TCP transmit queue on socket
1388 * close. This is done to prevent stale data from being sent should
1389 * the network connection be restored before TCP times out.
1390 */
1391 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1392 (char *)&sol, sizeof(sol));
1393 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001394 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001395 "failed to set SO_LINGER sock opt %d\n", ret);
1396 goto err_sock;
1397 }
1398
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001399 if (so_priority > 0) {
1400 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY,
1401 (char *)&so_priority, sizeof(so_priority));
1402 if (ret) {
1403 dev_err(ctrl->ctrl.device,
1404 "failed to set SO_PRIORITY sock opt, ret %d\n",
1405 ret);
1406 goto err_sock;
1407 }
1408 }
1409
Israel Rukshinbb139852019-08-18 12:08:54 +03001410 /* Set socket type of service */
1411 if (nctrl->opts->tos >= 0) {
1412 opt = nctrl->opts->tos;
1413 ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
1414 (char *)&opt, sizeof(opt));
1415 if (ret) {
1416 dev_err(nctrl->device,
1417 "failed to set IP_TOS sock opt %d\n", ret);
1418 goto err_sock;
1419 }
1420 }
1421
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001422 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001423 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001424 queue->request = NULL;
1425 queue->data_remaining = 0;
1426 queue->ddgst_remaining = 0;
1427 queue->pdu_remaining = 0;
1428 queue->pdu_offset = 0;
1429 sk_set_memalloc(queue->sock->sk);
1430
Israel Rukshin9924b032019-08-18 12:08:53 +03001431 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001432 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1433 sizeof(ctrl->src_addr));
1434 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001435 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001436 "failed to bind queue %d socket %d\n",
1437 qid, ret);
1438 goto err_sock;
1439 }
1440 }
1441
1442 queue->hdr_digest = nctrl->opts->hdr_digest;
1443 queue->data_digest = nctrl->opts->data_digest;
1444 if (queue->hdr_digest || queue->data_digest) {
1445 ret = nvme_tcp_alloc_crypto(queue);
1446 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001447 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001448 "failed to allocate queue %d crypto\n", qid);
1449 goto err_sock;
1450 }
1451 }
1452
1453 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1454 nvme_tcp_hdgst_len(queue);
1455 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1456 if (!queue->pdu) {
1457 ret = -ENOMEM;
1458 goto err_crypto;
1459 }
1460
Israel Rukshin9924b032019-08-18 12:08:53 +03001461 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001462 nvme_tcp_queue_id(queue));
1463
1464 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1465 sizeof(ctrl->addr), 0);
1466 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001467 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001468 "failed to connect socket: %d\n", ret);
1469 goto err_rcv_pdu;
1470 }
1471
1472 ret = nvme_tcp_init_connection(queue);
1473 if (ret)
1474 goto err_init_connect;
1475
1476 queue->rd_enabled = true;
1477 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1478 nvme_tcp_init_recv_ctx(queue);
1479
1480 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1481 queue->sock->sk->sk_user_data = queue;
1482 queue->state_change = queue->sock->sk->sk_state_change;
1483 queue->data_ready = queue->sock->sk->sk_data_ready;
1484 queue->write_space = queue->sock->sk->sk_write_space;
1485 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1486 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1487 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001488#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001489 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001490#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001491 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1492
1493 return 0;
1494
1495err_init_connect:
1496 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1497err_rcv_pdu:
1498 kfree(queue->pdu);
1499err_crypto:
1500 if (queue->hdr_digest || queue->data_digest)
1501 nvme_tcp_free_crypto(queue);
1502err_sock:
1503 sock_release(queue->sock);
1504 queue->sock = NULL;
1505 return ret;
1506}
1507
1508static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1509{
1510 struct socket *sock = queue->sock;
1511
1512 write_lock_bh(&sock->sk->sk_callback_lock);
1513 sock->sk->sk_user_data = NULL;
1514 sock->sk->sk_data_ready = queue->data_ready;
1515 sock->sk->sk_state_change = queue->state_change;
1516 sock->sk->sk_write_space = queue->write_space;
1517 write_unlock_bh(&sock->sk->sk_callback_lock);
1518}
1519
1520static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1521{
1522 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1523 nvme_tcp_restore_sock_calls(queue);
1524 cancel_work_sync(&queue->io_work);
1525}
1526
1527static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1528{
1529 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1530 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1531
1532 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1533 return;
1534
1535 __nvme_tcp_stop_queue(queue);
1536}
1537
1538static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1539{
1540 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1541 int ret;
1542
1543 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001544 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001545 else
1546 ret = nvmf_connect_admin_queue(nctrl);
1547
1548 if (!ret) {
1549 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1550 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001551 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1552 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001553 dev_err(nctrl->device,
1554 "failed to connect queue: %d ret=%d\n", idx, ret);
1555 }
1556 return ret;
1557}
1558
1559static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1560 bool admin)
1561{
1562 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1563 struct blk_mq_tag_set *set;
1564 int ret;
1565
1566 if (admin) {
1567 set = &ctrl->admin_tag_set;
1568 memset(set, 0, sizeof(*set));
1569 set->ops = &nvme_tcp_admin_mq_ops;
1570 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1571 set->reserved_tags = 2; /* connect + keep-alive */
1572 set->numa_node = NUMA_NO_NODE;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001573 set->flags = BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001574 set->cmd_size = sizeof(struct nvme_tcp_request);
1575 set->driver_data = ctrl;
1576 set->nr_hw_queues = 1;
1577 set->timeout = ADMIN_TIMEOUT;
1578 } else {
1579 set = &ctrl->tag_set;
1580 memset(set, 0, sizeof(*set));
1581 set->ops = &nvme_tcp_mq_ops;
1582 set->queue_depth = nctrl->sqsize + 1;
1583 set->reserved_tags = 1; /* fabric connect */
1584 set->numa_node = NUMA_NO_NODE;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001585 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001586 set->cmd_size = sizeof(struct nvme_tcp_request);
1587 set->driver_data = ctrl;
1588 set->nr_hw_queues = nctrl->queue_count - 1;
1589 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001590 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001591 }
1592
1593 ret = blk_mq_alloc_tag_set(set);
1594 if (ret)
1595 return ERR_PTR(ret);
1596
1597 return set;
1598}
1599
1600static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1601{
1602 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1603 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1604 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1605 }
1606
1607 nvme_tcp_free_queue(ctrl, 0);
1608}
1609
1610static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1611{
1612 int i;
1613
1614 for (i = 1; i < ctrl->queue_count; i++)
1615 nvme_tcp_free_queue(ctrl, i);
1616}
1617
1618static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1619{
1620 int i;
1621
1622 for (i = 1; i < ctrl->queue_count; i++)
1623 nvme_tcp_stop_queue(ctrl, i);
1624}
1625
1626static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1627{
1628 int i, ret = 0;
1629
1630 for (i = 1; i < ctrl->queue_count; i++) {
1631 ret = nvme_tcp_start_queue(ctrl, i);
1632 if (ret)
1633 goto out_stop_queues;
1634 }
1635
1636 return 0;
1637
1638out_stop_queues:
1639 for (i--; i >= 1; i--)
1640 nvme_tcp_stop_queue(ctrl, i);
1641 return ret;
1642}
1643
1644static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1645{
1646 int ret;
1647
1648 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1649 if (ret)
1650 return ret;
1651
1652 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1653 if (ret)
1654 goto out_free_queue;
1655
1656 return 0;
1657
1658out_free_queue:
1659 nvme_tcp_free_queue(ctrl, 0);
1660 return ret;
1661}
1662
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001663static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001664{
1665 int i, ret;
1666
1667 for (i = 1; i < ctrl->queue_count; i++) {
1668 ret = nvme_tcp_alloc_queue(ctrl, i,
1669 ctrl->sqsize + 1);
1670 if (ret)
1671 goto out_free_queues;
1672 }
1673
1674 return 0;
1675
1676out_free_queues:
1677 for (i--; i >= 1; i--)
1678 nvme_tcp_free_queue(ctrl, i);
1679
1680 return ret;
1681}
1682
1683static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1684{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001685 unsigned int nr_io_queues;
1686
1687 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1688 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001689 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001690
1691 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001692}
1693
Sagi Grimberg64861992019-05-28 22:49:05 -07001694static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1695 unsigned int nr_io_queues)
1696{
1697 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1698 struct nvmf_ctrl_options *opts = nctrl->opts;
1699
1700 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1701 /*
1702 * separate read/write queues
1703 * hand out dedicated default queues only after we have
1704 * sufficient read queues.
1705 */
1706 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1707 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1708 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1709 min(opts->nr_write_queues, nr_io_queues);
1710 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1711 } else {
1712 /*
1713 * shared read/write queues
1714 * either no write queues were requested, or we don't have
1715 * sufficient queue count to have dedicated default queues.
1716 */
1717 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1718 min(opts->nr_io_queues, nr_io_queues);
1719 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1720 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001721
1722 if (opts->nr_poll_queues && nr_io_queues) {
1723 /* map dedicated poll queues only if we have queues left */
1724 ctrl->io_queues[HCTX_TYPE_POLL] =
1725 min(opts->nr_poll_queues, nr_io_queues);
1726 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001727}
1728
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001729static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001730{
1731 unsigned int nr_io_queues;
1732 int ret;
1733
1734 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1735 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1736 if (ret)
1737 return ret;
1738
1739 ctrl->queue_count = nr_io_queues + 1;
1740 if (ctrl->queue_count < 2)
1741 return 0;
1742
1743 dev_info(ctrl->device,
1744 "creating %d I/O queues.\n", nr_io_queues);
1745
Sagi Grimberg64861992019-05-28 22:49:05 -07001746 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1747
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001748 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001749}
1750
1751static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1752{
1753 nvme_tcp_stop_io_queues(ctrl);
1754 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001755 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001756 blk_mq_free_tag_set(ctrl->tagset);
1757 }
1758 nvme_tcp_free_io_queues(ctrl);
1759}
1760
1761static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1762{
1763 int ret;
1764
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001765 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001766 if (ret)
1767 return ret;
1768
1769 if (new) {
1770 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1771 if (IS_ERR(ctrl->tagset)) {
1772 ret = PTR_ERR(ctrl->tagset);
1773 goto out_free_io_queues;
1774 }
1775
Sagi Grimberge85037a2018-12-31 23:58:30 -08001776 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1777 if (IS_ERR(ctrl->connect_q)) {
1778 ret = PTR_ERR(ctrl->connect_q);
1779 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001780 }
1781 } else {
1782 blk_mq_update_nr_hw_queues(ctrl->tagset,
1783 ctrl->queue_count - 1);
1784 }
1785
1786 ret = nvme_tcp_start_io_queues(ctrl);
1787 if (ret)
1788 goto out_cleanup_connect_q;
1789
1790 return 0;
1791
1792out_cleanup_connect_q:
Sagi Grimberge85037a2018-12-31 23:58:30 -08001793 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001794 blk_cleanup_queue(ctrl->connect_q);
1795out_free_tag_set:
1796 if (new)
1797 blk_mq_free_tag_set(ctrl->tagset);
1798out_free_io_queues:
1799 nvme_tcp_free_io_queues(ctrl);
1800 return ret;
1801}
1802
1803static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1804{
1805 nvme_tcp_stop_queue(ctrl, 0);
1806 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001807 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001808 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001809 blk_mq_free_tag_set(ctrl->admin_tagset);
1810 }
1811 nvme_tcp_free_admin_queue(ctrl);
1812}
1813
1814static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1815{
1816 int error;
1817
1818 error = nvme_tcp_alloc_admin_queue(ctrl);
1819 if (error)
1820 return error;
1821
1822 if (new) {
1823 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1824 if (IS_ERR(ctrl->admin_tagset)) {
1825 error = PTR_ERR(ctrl->admin_tagset);
1826 goto out_free_queue;
1827 }
1828
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001829 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1830 if (IS_ERR(ctrl->fabrics_q)) {
1831 error = PTR_ERR(ctrl->fabrics_q);
1832 goto out_free_tagset;
1833 }
1834
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001835 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1836 if (IS_ERR(ctrl->admin_q)) {
1837 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001838 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001839 }
1840 }
1841
1842 error = nvme_tcp_start_queue(ctrl, 0);
1843 if (error)
1844 goto out_cleanup_queue;
1845
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001846 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001847 if (error)
1848 goto out_stop_queue;
1849
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001850 blk_mq_unquiesce_queue(ctrl->admin_q);
1851
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001852 error = nvme_init_identify(ctrl);
1853 if (error)
1854 goto out_stop_queue;
1855
1856 return 0;
1857
1858out_stop_queue:
1859 nvme_tcp_stop_queue(ctrl, 0);
1860out_cleanup_queue:
1861 if (new)
1862 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001863out_cleanup_fabrics_q:
1864 if (new)
1865 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001866out_free_tagset:
1867 if (new)
1868 blk_mq_free_tag_set(ctrl->admin_tagset);
1869out_free_queue:
1870 nvme_tcp_free_admin_queue(ctrl);
1871 return error;
1872}
1873
1874static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1875 bool remove)
1876{
1877 blk_mq_quiesce_queue(ctrl->admin_q);
1878 nvme_tcp_stop_queue(ctrl, 0);
Ming Lei622b8b62019-07-24 11:48:42 +08001879 if (ctrl->admin_tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001880 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1881 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001882 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1883 }
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001884 if (remove)
1885 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001886 nvme_tcp_destroy_admin_queue(ctrl, remove);
1887}
1888
1889static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1890 bool remove)
1891{
1892 if (ctrl->queue_count <= 1)
1893 return;
1894 nvme_stop_queues(ctrl);
1895 nvme_tcp_stop_io_queues(ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001896 if (ctrl->tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001897 blk_mq_tagset_busy_iter(ctrl->tagset,
1898 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001899 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1900 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001901 if (remove)
1902 nvme_start_queues(ctrl);
1903 nvme_tcp_destroy_io_queues(ctrl, remove);
1904}
1905
1906static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1907{
1908 /* If we are resetting/deleting then do nothing */
1909 if (ctrl->state != NVME_CTRL_CONNECTING) {
1910 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1911 ctrl->state == NVME_CTRL_LIVE);
1912 return;
1913 }
1914
1915 if (nvmf_should_reconnect(ctrl)) {
1916 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1917 ctrl->opts->reconnect_delay);
1918 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1919 ctrl->opts->reconnect_delay * HZ);
1920 } else {
1921 dev_info(ctrl->device, "Removing controller...\n");
1922 nvme_delete_ctrl(ctrl);
1923 }
1924}
1925
1926static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1927{
1928 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001929 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001930
1931 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1932 if (ret)
1933 return ret;
1934
1935 if (ctrl->icdoff) {
1936 dev_err(ctrl->device, "icdoff is not supported!\n");
1937 goto destroy_admin;
1938 }
1939
1940 if (opts->queue_size > ctrl->sqsize + 1)
1941 dev_warn(ctrl->device,
1942 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1943 opts->queue_size, ctrl->sqsize + 1);
1944
1945 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1946 dev_warn(ctrl->device,
1947 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1948 ctrl->sqsize + 1, ctrl->maxcmd);
1949 ctrl->sqsize = ctrl->maxcmd - 1;
1950 }
1951
1952 if (ctrl->queue_count > 1) {
1953 ret = nvme_tcp_configure_io_queues(ctrl, new);
1954 if (ret)
1955 goto destroy_admin;
1956 }
1957
1958 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001959 /*
1960 * state change failure is ok if we're in DELETING state,
1961 * unless we're during creation of a new controller to
1962 * avoid races with teardown flow.
1963 */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001964 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001965 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001966 ret = -EINVAL;
1967 goto destroy_io;
1968 }
1969
1970 nvme_start_ctrl(ctrl);
1971 return 0;
1972
1973destroy_io:
1974 if (ctrl->queue_count > 1)
1975 nvme_tcp_destroy_io_queues(ctrl, new);
1976destroy_admin:
1977 nvme_tcp_stop_queue(ctrl, 0);
1978 nvme_tcp_destroy_admin_queue(ctrl, new);
1979 return ret;
1980}
1981
1982static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1983{
1984 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1985 struct nvme_tcp_ctrl, connect_work);
1986 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1987
1988 ++ctrl->nr_reconnects;
1989
1990 if (nvme_tcp_setup_ctrl(ctrl, false))
1991 goto requeue;
1992
Colin Ian King56a77d22018-12-14 11:42:43 +00001993 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001994 ctrl->nr_reconnects);
1995
1996 ctrl->nr_reconnects = 0;
1997
1998 return;
1999
2000requeue:
2001 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2002 ctrl->nr_reconnects);
2003 nvme_tcp_reconnect_or_remove(ctrl);
2004}
2005
2006static void nvme_tcp_error_recovery_work(struct work_struct *work)
2007{
2008 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2009 struct nvme_tcp_ctrl, err_work);
2010 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2011
2012 nvme_stop_keep_alive(ctrl);
2013 nvme_tcp_teardown_io_queues(ctrl, false);
2014 /* unquiesce to fail fast pending requests */
2015 nvme_start_queues(ctrl);
2016 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002017 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002018
2019 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2020 /* state change failure is ok if we're in DELETING state */
2021 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2022 return;
2023 }
2024
2025 nvme_tcp_reconnect_or_remove(ctrl);
2026}
2027
2028static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2029{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002030 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2031 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2032
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002033 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002034 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002035 if (shutdown)
2036 nvme_shutdown_ctrl(ctrl);
2037 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002038 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002039 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2040}
2041
2042static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2043{
2044 nvme_tcp_teardown_ctrl(ctrl, true);
2045}
2046
2047static void nvme_reset_ctrl_work(struct work_struct *work)
2048{
2049 struct nvme_ctrl *ctrl =
2050 container_of(work, struct nvme_ctrl, reset_work);
2051
2052 nvme_stop_ctrl(ctrl);
2053 nvme_tcp_teardown_ctrl(ctrl, false);
2054
2055 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2056 /* state change failure is ok if we're in DELETING state */
2057 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2058 return;
2059 }
2060
2061 if (nvme_tcp_setup_ctrl(ctrl, false))
2062 goto out_fail;
2063
2064 return;
2065
2066out_fail:
2067 ++ctrl->nr_reconnects;
2068 nvme_tcp_reconnect_or_remove(ctrl);
2069}
2070
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002071static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2072{
2073 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2074
2075 if (list_empty(&ctrl->list))
2076 goto free_ctrl;
2077
2078 mutex_lock(&nvme_tcp_ctrl_mutex);
2079 list_del(&ctrl->list);
2080 mutex_unlock(&nvme_tcp_ctrl_mutex);
2081
2082 nvmf_free_options(nctrl->opts);
2083free_ctrl:
2084 kfree(ctrl->queues);
2085 kfree(ctrl);
2086}
2087
2088static void nvme_tcp_set_sg_null(struct nvme_command *c)
2089{
2090 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2091
2092 sg->addr = 0;
2093 sg->length = 0;
2094 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2095 NVME_SGL_FMT_TRANSPORT_A;
2096}
2097
2098static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2099 struct nvme_command *c, u32 data_len)
2100{
2101 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2102
2103 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2104 sg->length = cpu_to_le32(data_len);
2105 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2106}
2107
2108static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2109 u32 data_len)
2110{
2111 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2112
2113 sg->addr = 0;
2114 sg->length = cpu_to_le32(data_len);
2115 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2116 NVME_SGL_FMT_TRANSPORT_A;
2117}
2118
2119static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2120{
2121 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2122 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2123 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2124 struct nvme_command *cmd = &pdu->cmd;
2125 u8 hdgst = nvme_tcp_hdgst_len(queue);
2126
2127 memset(pdu, 0, sizeof(*pdu));
2128 pdu->hdr.type = nvme_tcp_cmd;
2129 if (queue->hdr_digest)
2130 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2131 pdu->hdr.hlen = sizeof(*pdu);
2132 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2133
2134 cmd->common.opcode = nvme_admin_async_event;
2135 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2136 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2137 nvme_tcp_set_sg_null(cmd);
2138
2139 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2140 ctrl->async_req.offset = 0;
2141 ctrl->async_req.curr_bio = NULL;
2142 ctrl->async_req.data_len = 0;
2143
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07002144 nvme_tcp_queue_request(&ctrl->async_req, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002145}
2146
2147static enum blk_eh_timer_return
2148nvme_tcp_timeout(struct request *rq, bool reserved)
2149{
2150 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2151 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2152 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2153
Keith Busch92b98e82019-09-05 08:09:33 -06002154 /*
2155 * Restart the timer if a controller reset is already scheduled. Any
2156 * timed out commands would be handled before entering the connecting
2157 * state.
2158 */
2159 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2160 return BLK_EH_RESET_TIMER;
2161
Sagi Grimberg39d57752019-01-08 01:01:30 -08002162 dev_warn(ctrl->ctrl.device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002163 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002164 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002165
2166 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002167 /*
2168 * Teardown immediately if controller times out while starting
2169 * or we are already started error recovery. all outstanding
2170 * requests are completed on shutdown, so we return BLK_EH_DONE.
2171 */
2172 flush_work(&ctrl->err_work);
2173 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2174 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002175 return BLK_EH_DONE;
2176 }
2177
Sagi Grimberg39d57752019-01-08 01:01:30 -08002178 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002179 nvme_tcp_error_recovery(&ctrl->ctrl);
2180
2181 return BLK_EH_RESET_TIMER;
2182}
2183
2184static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2185 struct request *rq)
2186{
2187 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2188 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2189 struct nvme_command *c = &pdu->cmd;
2190
2191 c->common.flags |= NVME_CMD_SGL_METABUF;
2192
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002193 if (!blk_rq_nr_phys_segments(rq))
2194 nvme_tcp_set_sg_null(c);
2195 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002196 req->data_len <= nvme_tcp_inline_data_size(queue))
2197 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2198 else
2199 nvme_tcp_set_sg_host_data(c, req->data_len);
2200
2201 return 0;
2202}
2203
2204static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2205 struct request *rq)
2206{
2207 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2208 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2209 struct nvme_tcp_queue *queue = req->queue;
2210 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2211 blk_status_t ret;
2212
2213 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2214 if (ret)
2215 return ret;
2216
2217 req->state = NVME_TCP_SEND_CMD_PDU;
2218 req->offset = 0;
2219 req->data_sent = 0;
2220 req->pdu_len = 0;
2221 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002222 req->data_len = blk_rq_nr_phys_segments(rq) ?
2223 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002224 req->curr_bio = rq->bio;
2225
2226 if (rq_data_dir(rq) == WRITE &&
2227 req->data_len <= nvme_tcp_inline_data_size(queue))
2228 req->pdu_len = req->data_len;
2229 else if (req->curr_bio)
2230 nvme_tcp_init_iter(req, READ);
2231
2232 pdu->hdr.type = nvme_tcp_cmd;
2233 pdu->hdr.flags = 0;
2234 if (queue->hdr_digest)
2235 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2236 if (queue->data_digest && req->pdu_len) {
2237 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2238 ddgst = nvme_tcp_ddgst_len(queue);
2239 }
2240 pdu->hdr.hlen = sizeof(*pdu);
2241 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2242 pdu->hdr.plen =
2243 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2244
2245 ret = nvme_tcp_map_data(queue, rq);
2246 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002247 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002248 dev_err(queue->ctrl->ctrl.device,
2249 "Failed to map data (%d)\n", ret);
2250 return ret;
2251 }
2252
2253 return 0;
2254}
2255
2256static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2257 const struct blk_mq_queue_data *bd)
2258{
2259 struct nvme_ns *ns = hctx->queue->queuedata;
2260 struct nvme_tcp_queue *queue = hctx->driver_data;
2261 struct request *rq = bd->rq;
2262 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2263 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2264 blk_status_t ret;
2265
2266 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2267 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2268
2269 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2270 if (unlikely(ret))
2271 return ret;
2272
2273 blk_mq_start_request(rq);
2274
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07002275 nvme_tcp_queue_request(req, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002276
2277 return BLK_STS_OK;
2278}
2279
Sagi Grimberg873946f2018-12-11 23:38:57 -08002280static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2281{
2282 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002283 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002284
Sagi Grimberg64861992019-05-28 22:49:05 -07002285 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002286 /* separate read/write queues */
2287 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002288 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2289 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2290 set->map[HCTX_TYPE_READ].nr_queues =
2291 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002292 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002293 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002294 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002295 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002296 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002297 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2298 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2299 set->map[HCTX_TYPE_READ].nr_queues =
2300 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002301 set->map[HCTX_TYPE_READ].queue_offset = 0;
2302 }
2303 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2304 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002305
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002306 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2307 /* map dedicated poll queues only if we have queues left */
2308 set->map[HCTX_TYPE_POLL].nr_queues =
2309 ctrl->io_queues[HCTX_TYPE_POLL];
2310 set->map[HCTX_TYPE_POLL].queue_offset =
2311 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2312 ctrl->io_queues[HCTX_TYPE_READ];
2313 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2314 }
2315
Sagi Grimberg64861992019-05-28 22:49:05 -07002316 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002317 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002318 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002319 ctrl->io_queues[HCTX_TYPE_READ],
2320 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002321
Sagi Grimberg873946f2018-12-11 23:38:57 -08002322 return 0;
2323}
2324
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002325static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2326{
2327 struct nvme_tcp_queue *queue = hctx->driver_data;
2328 struct sock *sk = queue->sock->sk;
2329
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002330 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2331 return 0;
2332
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002333 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
Eric Dumazet3f926af2019-10-23 22:44:51 -07002334 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002335 sk_busy_loop(sk, true);
2336 nvme_tcp_try_recv(queue);
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002337 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002338 return queue->nr_cqe;
2339}
2340
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002341static struct blk_mq_ops nvme_tcp_mq_ops = {
2342 .queue_rq = nvme_tcp_queue_rq,
2343 .complete = nvme_complete_rq,
2344 .init_request = nvme_tcp_init_request,
2345 .exit_request = nvme_tcp_exit_request,
2346 .init_hctx = nvme_tcp_init_hctx,
2347 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002348 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002349 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002350};
2351
2352static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2353 .queue_rq = nvme_tcp_queue_rq,
2354 .complete = nvme_complete_rq,
2355 .init_request = nvme_tcp_init_request,
2356 .exit_request = nvme_tcp_exit_request,
2357 .init_hctx = nvme_tcp_init_admin_hctx,
2358 .timeout = nvme_tcp_timeout,
2359};
2360
2361static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2362 .name = "tcp",
2363 .module = THIS_MODULE,
2364 .flags = NVME_F_FABRICS,
2365 .reg_read32 = nvmf_reg_read32,
2366 .reg_read64 = nvmf_reg_read64,
2367 .reg_write32 = nvmf_reg_write32,
2368 .free_ctrl = nvme_tcp_free_ctrl,
2369 .submit_async_event = nvme_tcp_submit_async_event,
2370 .delete_ctrl = nvme_tcp_delete_ctrl,
2371 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002372};
2373
2374static bool
2375nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2376{
2377 struct nvme_tcp_ctrl *ctrl;
2378 bool found = false;
2379
2380 mutex_lock(&nvme_tcp_ctrl_mutex);
2381 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2382 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2383 if (found)
2384 break;
2385 }
2386 mutex_unlock(&nvme_tcp_ctrl_mutex);
2387
2388 return found;
2389}
2390
2391static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2392 struct nvmf_ctrl_options *opts)
2393{
2394 struct nvme_tcp_ctrl *ctrl;
2395 int ret;
2396
2397 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2398 if (!ctrl)
2399 return ERR_PTR(-ENOMEM);
2400
2401 INIT_LIST_HEAD(&ctrl->list);
2402 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002403 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2404 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002405 ctrl->ctrl.sqsize = opts->queue_size - 1;
2406 ctrl->ctrl.kato = opts->kato;
2407
2408 INIT_DELAYED_WORK(&ctrl->connect_work,
2409 nvme_tcp_reconnect_ctrl_work);
2410 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2411 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2412
2413 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2414 opts->trsvcid =
2415 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2416 if (!opts->trsvcid) {
2417 ret = -ENOMEM;
2418 goto out_free_ctrl;
2419 }
2420 opts->mask |= NVMF_OPT_TRSVCID;
2421 }
2422
2423 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2424 opts->traddr, opts->trsvcid, &ctrl->addr);
2425 if (ret) {
2426 pr_err("malformed address passed: %s:%s\n",
2427 opts->traddr, opts->trsvcid);
2428 goto out_free_ctrl;
2429 }
2430
2431 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2432 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2433 opts->host_traddr, NULL, &ctrl->src_addr);
2434 if (ret) {
2435 pr_err("malformed src address passed: %s\n",
2436 opts->host_traddr);
2437 goto out_free_ctrl;
2438 }
2439 }
2440
2441 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2442 ret = -EALREADY;
2443 goto out_free_ctrl;
2444 }
2445
Sagi Grimberg873946f2018-12-11 23:38:57 -08002446 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002447 GFP_KERNEL);
2448 if (!ctrl->queues) {
2449 ret = -ENOMEM;
2450 goto out_free_ctrl;
2451 }
2452
2453 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2454 if (ret)
2455 goto out_kfree_queues;
2456
2457 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2458 WARN_ON_ONCE(1);
2459 ret = -EINTR;
2460 goto out_uninit_ctrl;
2461 }
2462
2463 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2464 if (ret)
2465 goto out_uninit_ctrl;
2466
2467 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2468 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2469
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002470 mutex_lock(&nvme_tcp_ctrl_mutex);
2471 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2472 mutex_unlock(&nvme_tcp_ctrl_mutex);
2473
2474 return &ctrl->ctrl;
2475
2476out_uninit_ctrl:
2477 nvme_uninit_ctrl(&ctrl->ctrl);
2478 nvme_put_ctrl(&ctrl->ctrl);
2479 if (ret > 0)
2480 ret = -EIO;
2481 return ERR_PTR(ret);
2482out_kfree_queues:
2483 kfree(ctrl->queues);
2484out_free_ctrl:
2485 kfree(ctrl);
2486 return ERR_PTR(ret);
2487}
2488
2489static struct nvmf_transport_ops nvme_tcp_transport = {
2490 .name = "tcp",
2491 .module = THIS_MODULE,
2492 .required_opts = NVMF_OPT_TRADDR,
2493 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2494 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002495 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002496 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2497 NVMF_OPT_TOS,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002498 .create_ctrl = nvme_tcp_create_ctrl,
2499};
2500
2501static int __init nvme_tcp_init_module(void)
2502{
2503 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2504 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2505 if (!nvme_tcp_wq)
2506 return -ENOMEM;
2507
2508 nvmf_register_transport(&nvme_tcp_transport);
2509 return 0;
2510}
2511
2512static void __exit nvme_tcp_cleanup_module(void)
2513{
2514 struct nvme_tcp_ctrl *ctrl;
2515
2516 nvmf_unregister_transport(&nvme_tcp_transport);
2517
2518 mutex_lock(&nvme_tcp_ctrl_mutex);
2519 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2520 nvme_delete_ctrl(&ctrl->ctrl);
2521 mutex_unlock(&nvme_tcp_ctrl_mutex);
2522 flush_workqueue(nvme_delete_wq);
2523
2524 destroy_workqueue(nvme_tcp_wq);
2525}
2526
2527module_init(nvme_tcp_init_module);
2528module_exit(nvme_tcp_cleanup_module);
2529
2530MODULE_LICENSE("GPL v2");