blob: 35a78682f8ded3cf3fa0276592cb1d87808eecba [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070049 struct llist_node lentry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010050 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080051
52 struct bio *curr_bio;
53 struct iov_iter iter;
54
55 /* send state */
56 size_t offset;
57 size_t data_sent;
58 enum nvme_tcp_send_state state;
59};
60
61enum nvme_tcp_queue_flags {
62 NVME_TCP_Q_ALLOCATED = 0,
63 NVME_TCP_Q_LIVE = 1,
Sagi Grimberg72e5d752020-05-01 14:25:44 -070064 NVME_TCP_Q_POLLING = 2,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080065};
66
67enum nvme_tcp_recv_state {
68 NVME_TCP_RECV_PDU = 0,
69 NVME_TCP_RECV_DATA,
70 NVME_TCP_RECV_DDGST,
71};
72
73struct nvme_tcp_ctrl;
74struct nvme_tcp_queue {
75 struct socket *sock;
76 struct work_struct io_work;
77 int io_cpu;
78
Chao Leng9ebbfe42021-01-14 17:09:26 +080079 struct mutex queue_lock;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -070080 struct mutex send_mutex;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070081 struct llist_head req_list;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080082 struct list_head send_list;
Sagi Grimberg122e5b92020-06-18 17:30:24 -070083 bool more_requests;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080084
85 /* recv state */
86 void *pdu;
87 int pdu_remaining;
88 int pdu_offset;
89 size_t data_remaining;
90 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070091 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080092
93 /* send state */
94 struct nvme_tcp_request *request;
95
96 int queue_size;
97 size_t cmnd_capsule_len;
98 struct nvme_tcp_ctrl *ctrl;
99 unsigned long flags;
100 bool rd_enabled;
101
102 bool hdr_digest;
103 bool data_digest;
104 struct ahash_request *rcv_hash;
105 struct ahash_request *snd_hash;
106 __le32 exp_ddgst;
107 __le32 recv_ddgst;
108
109 struct page_frag_cache pf_cache;
110
111 void (*state_change)(struct sock *);
112 void (*data_ready)(struct sock *);
113 void (*write_space)(struct sock *);
114};
115
116struct nvme_tcp_ctrl {
117 /* read only in the hot path */
118 struct nvme_tcp_queue *queues;
119 struct blk_mq_tag_set tag_set;
120
121 /* other member variables */
122 struct list_head list;
123 struct blk_mq_tag_set admin_tag_set;
124 struct sockaddr_storage addr;
125 struct sockaddr_storage src_addr;
126 struct nvme_ctrl ctrl;
127
128 struct work_struct err_work;
129 struct delayed_work connect_work;
130 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700131 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800132};
133
134static LIST_HEAD(nvme_tcp_ctrl_list);
135static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
136static struct workqueue_struct *nvme_tcp_wq;
Rikard Falkeborn6acbd962020-05-29 00:25:07 +0200137static const struct blk_mq_ops nvme_tcp_mq_ops;
138static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700139static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800140
141static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
142{
143 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
144}
145
146static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
147{
148 return queue - queue->ctrl->queues;
149}
150
151static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
152{
153 u32 queue_idx = nvme_tcp_queue_id(queue);
154
155 if (queue_idx == 0)
156 return queue->ctrl->admin_tag_set.tags[queue_idx];
157 return queue->ctrl->tag_set.tags[queue_idx - 1];
158}
159
160static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
161{
162 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
163}
164
165static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
166{
167 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
168}
169
170static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
171{
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
173}
174
175static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
176{
177 return req == &req->queue->ctrl->async_req;
178}
179
180static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
181{
182 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800183
184 if (unlikely(nvme_tcp_async_req(req)))
185 return false; /* async events don't have a request */
186
187 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800188
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700189 return rq_data_dir(rq) == WRITE && req->data_len &&
190 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800191}
192
193static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
194{
195 return req->iter.bvec->bv_page;
196}
197
198static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
199{
200 return req->iter.bvec->bv_offset + req->iter.iov_offset;
201}
202
203static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
204{
Sagi Grimbergca1ff672021-01-13 13:56:57 -0800205 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800206 req->pdu_len - req->pdu_sent);
207}
208
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800209static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
210{
211 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
212 req->pdu_len - req->pdu_sent : 0;
213}
214
215static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
216 int len)
217{
218 return nvme_tcp_pdu_data_left(req) <= len;
219}
220
221static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
222 unsigned int dir)
223{
224 struct request *rq = blk_mq_rq_from_pdu(req);
225 struct bio_vec *vec;
226 unsigned int size;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800227 int nr_bvec;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800228 size_t offset;
229
230 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
231 vec = &rq->special_vec;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800232 nr_bvec = 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800233 size = blk_rq_payload_bytes(rq);
234 offset = 0;
235 } else {
236 struct bio *bio = req->curr_bio;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800237 struct bvec_iter bi;
238 struct bio_vec bv;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800239
240 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800241 nr_bvec = 0;
242 bio_for_each_bvec(bv, bio, bi) {
243 nr_bvec++;
244 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800245 size = bio->bi_iter.bi_size;
246 offset = bio->bi_iter.bi_bvec_done;
247 }
248
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800249 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800250 req->iter.iov_offset = offset;
251}
252
253static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
254 int len)
255{
256 req->data_sent += len;
257 req->pdu_sent += len;
258 iov_iter_advance(&req->iter, len);
259 if (!iov_iter_count(&req->iter) &&
260 req->data_sent < req->data_len) {
261 req->curr_bio = req->curr_bio->bi_next;
262 nvme_tcp_init_iter(req, WRITE);
263 }
264}
265
Sagi Grimberg5c11f7d2020-12-21 00:03:39 -0800266static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
267{
268 int ret;
269
270 /* drain the send queue as much as we can... */
271 do {
272 ret = nvme_tcp_try_send(queue);
273 } while (ret > 0);
274}
275
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700276static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
Sagi Grimberg86f03482020-06-18 17:30:23 -0700277 bool sync, bool last)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800278{
279 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700280 bool empty;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800281
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700282 empty = llist_add(&req->lentry, &queue->req_list) &&
283 list_empty(&queue->send_list) && !queue->request;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800284
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700285 /*
286 * if we're the first on the send_list and we can try to send
287 * directly, otherwise queue io_work. Also, only do that if we
288 * are on the same cpu, so we don't introduce contention.
289 */
Sagi Grimbergbb833372021-03-15 13:53:47 -0700290 if (queue->io_cpu == raw_smp_processor_id() &&
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700291 sync && empty && mutex_trylock(&queue->send_mutex)) {
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700292 queue->more_requests = !last;
Sagi Grimberg5c11f7d2020-12-21 00:03:39 -0800293 nvme_tcp_send_all(queue);
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700294 queue->more_requests = false;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700295 mutex_unlock(&queue->send_mutex);
Sagi Grimberg86f03482020-06-18 17:30:23 -0700296 } else if (last) {
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700297 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
298 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800299}
300
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700301static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
302{
303 struct nvme_tcp_request *req;
304 struct llist_node *node;
305
306 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
307 req = llist_entry(node, struct nvme_tcp_request, lentry);
308 list_add(&req->entry, &queue->send_list);
309 }
310}
311
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800312static inline struct nvme_tcp_request *
313nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
314{
315 struct nvme_tcp_request *req;
316
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800317 req = list_first_entry_or_null(&queue->send_list,
318 struct nvme_tcp_request, entry);
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700319 if (!req) {
320 nvme_tcp_process_req_list(queue);
321 req = list_first_entry_or_null(&queue->send_list,
322 struct nvme_tcp_request, entry);
323 if (unlikely(!req))
324 return NULL;
325 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800326
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700327 list_del(&req->entry);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800328 return req;
329}
330
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100331static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
332 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800333{
334 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
335 crypto_ahash_final(hash);
336}
337
338static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
339 struct page *page, off_t off, size_t len)
340{
341 struct scatterlist sg;
342
343 sg_init_marker(&sg, 1);
344 sg_set_page(&sg, page, len, off);
345 ahash_request_set_crypt(hash, &sg, NULL, len);
346 crypto_ahash_update(hash);
347}
348
349static inline void nvme_tcp_hdgst(struct ahash_request *hash,
350 void *pdu, size_t len)
351{
352 struct scatterlist sg;
353
354 sg_init_one(&sg, pdu, len);
355 ahash_request_set_crypt(hash, &sg, pdu + len, len);
356 crypto_ahash_digest(hash);
357}
358
359static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
360 void *pdu, size_t pdu_len)
361{
362 struct nvme_tcp_hdr *hdr = pdu;
363 __le32 recv_digest;
364 __le32 exp_digest;
365
366 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
367 dev_err(queue->ctrl->ctrl.device,
368 "queue %d: header digest flag is cleared\n",
369 nvme_tcp_queue_id(queue));
370 return -EPROTO;
371 }
372
373 recv_digest = *(__le32 *)(pdu + hdr->hlen);
374 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
375 exp_digest = *(__le32 *)(pdu + hdr->hlen);
376 if (recv_digest != exp_digest) {
377 dev_err(queue->ctrl->ctrl.device,
378 "header digest error: recv %#x expected %#x\n",
379 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
380 return -EIO;
381 }
382
383 return 0;
384}
385
386static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
387{
388 struct nvme_tcp_hdr *hdr = pdu;
389 u8 digest_len = nvme_tcp_hdgst_len(queue);
390 u32 len;
391
392 len = le32_to_cpu(hdr->plen) - hdr->hlen -
393 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
394
395 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
396 dev_err(queue->ctrl->ctrl.device,
397 "queue %d: data digest flag is cleared\n",
398 nvme_tcp_queue_id(queue));
399 return -EPROTO;
400 }
401 crypto_ahash_init(queue->rcv_hash);
402
403 return 0;
404}
405
406static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
407 struct request *rq, unsigned int hctx_idx)
408{
409 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
410
411 page_frag_free(req->pdu);
412}
413
414static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
415 struct request *rq, unsigned int hctx_idx,
416 unsigned int numa_node)
417{
418 struct nvme_tcp_ctrl *ctrl = set->driver_data;
419 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
420 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
421 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
422 u8 hdgst = nvme_tcp_hdgst_len(queue);
423
424 req->pdu = page_frag_alloc(&queue->pf_cache,
425 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
426 GFP_KERNEL | __GFP_ZERO);
427 if (!req->pdu)
428 return -ENOMEM;
429
430 req->queue = queue;
431 nvme_req(rq)->ctrl = &ctrl->ctrl;
432
433 return 0;
434}
435
436static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
437 unsigned int hctx_idx)
438{
439 struct nvme_tcp_ctrl *ctrl = data;
440 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
441
442 hctx->driver_data = queue;
443 return 0;
444}
445
446static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
447 unsigned int hctx_idx)
448{
449 struct nvme_tcp_ctrl *ctrl = data;
450 struct nvme_tcp_queue *queue = &ctrl->queues[0];
451
452 hctx->driver_data = queue;
453 return 0;
454}
455
456static enum nvme_tcp_recv_state
457nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
458{
459 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
460 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
461 NVME_TCP_RECV_DATA;
462}
463
464static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
465{
466 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
467 nvme_tcp_hdgst_len(queue);
468 queue->pdu_offset = 0;
469 queue->data_remaining = -1;
470 queue->ddgst_remaining = 0;
471}
472
473static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
474{
475 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
476 return;
477
Sagi Grimberg236187c2020-07-28 13:16:36 -0700478 dev_warn(ctrl->device, "starting error recovery\n");
Nigel Kirkland97b25122020-02-10 16:01:45 -0800479 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800480}
481
482static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
483 struct nvme_completion *cqe)
484{
485 struct request *rq;
486
487 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
488 if (!rq) {
489 dev_err(queue->ctrl->ctrl.device,
490 "queue %d tag 0x%x not found\n",
491 nvme_tcp_queue_id(queue), cqe->command_id);
492 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
493 return -EINVAL;
494 }
495
Christoph Hellwig2eb81a32020-08-18 09:11:29 +0200496 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
Christoph Hellwigff029452020-06-11 08:44:52 +0200497 nvme_complete_rq(rq);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700498 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800499
500 return 0;
501}
502
503static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
504 struct nvme_tcp_data_pdu *pdu)
505{
506 struct request *rq;
507
508 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
509 if (!rq) {
510 dev_err(queue->ctrl->ctrl.device,
511 "queue %d tag %#x not found\n",
512 nvme_tcp_queue_id(queue), pdu->command_id);
513 return -ENOENT;
514 }
515
516 if (!blk_rq_payload_bytes(rq)) {
517 dev_err(queue->ctrl->ctrl.device,
518 "queue %d tag %#x unexpected data\n",
519 nvme_tcp_queue_id(queue), rq->tag);
520 return -EIO;
521 }
522
523 queue->data_remaining = le32_to_cpu(pdu->data_length);
524
Sagi Grimberg602d6742019-03-13 18:55:10 +0100525 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
526 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
527 dev_err(queue->ctrl->ctrl.device,
528 "queue %d tag %#x SUCCESS set but not last PDU\n",
529 nvme_tcp_queue_id(queue), rq->tag);
530 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
531 return -EPROTO;
532 }
533
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800534 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800535}
536
537static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
538 struct nvme_tcp_rsp_pdu *pdu)
539{
540 struct nvme_completion *cqe = &pdu->cqe;
541 int ret = 0;
542
543 /*
544 * AEN requests are special as they don't time out and can
545 * survive any kind of queue freeze and often don't respond to
546 * aborts. We don't even bother to allocate a struct request
547 * for them but rather special case them here.
548 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300549 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
550 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800551 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
552 &cqe->result);
553 else
554 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
555
556 return ret;
557}
558
559static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
560 struct nvme_tcp_r2t_pdu *pdu)
561{
562 struct nvme_tcp_data_pdu *data = req->pdu;
563 struct nvme_tcp_queue *queue = req->queue;
564 struct request *rq = blk_mq_rq_from_pdu(req);
565 u8 hdgst = nvme_tcp_hdgst_len(queue);
566 u8 ddgst = nvme_tcp_ddgst_len(queue);
567
568 req->pdu_len = le32_to_cpu(pdu->r2t_length);
569 req->pdu_sent = 0;
570
Sagi Grimbergfd0823f2021-03-15 14:08:11 -0700571 if (unlikely(!req->pdu_len)) {
572 dev_err(queue->ctrl->ctrl.device,
573 "req %d r2t len is %u, probably a bug...\n",
574 rq->tag, req->pdu_len);
575 return -EPROTO;
576 }
577
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800578 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
579 dev_err(queue->ctrl->ctrl.device,
580 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
581 rq->tag, req->pdu_len, req->data_len,
582 req->data_sent);
583 return -EPROTO;
584 }
585
586 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
587 dev_err(queue->ctrl->ctrl.device,
588 "req %d unexpected r2t offset %u (expected %zu)\n",
589 rq->tag, le32_to_cpu(pdu->r2t_offset),
590 req->data_sent);
591 return -EPROTO;
592 }
593
594 memset(data, 0, sizeof(*data));
595 data->hdr.type = nvme_tcp_h2c_data;
596 data->hdr.flags = NVME_TCP_F_DATA_LAST;
597 if (queue->hdr_digest)
598 data->hdr.flags |= NVME_TCP_F_HDGST;
599 if (queue->data_digest)
600 data->hdr.flags |= NVME_TCP_F_DDGST;
601 data->hdr.hlen = sizeof(*data);
602 data->hdr.pdo = data->hdr.hlen + hdgst;
603 data->hdr.plen =
604 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
605 data->ttag = pdu->ttag;
606 data->command_id = rq->tag;
607 data->data_offset = cpu_to_le32(req->data_sent);
608 data->data_length = cpu_to_le32(req->pdu_len);
609 return 0;
610}
611
612static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
613 struct nvme_tcp_r2t_pdu *pdu)
614{
615 struct nvme_tcp_request *req;
616 struct request *rq;
617 int ret;
618
619 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
620 if (!rq) {
621 dev_err(queue->ctrl->ctrl.device,
622 "queue %d tag %#x not found\n",
623 nvme_tcp_queue_id(queue), pdu->command_id);
624 return -ENOENT;
625 }
626 req = blk_mq_rq_to_pdu(rq);
627
628 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
629 if (unlikely(ret))
630 return ret;
631
632 req->state = NVME_TCP_SEND_H2C_PDU;
633 req->offset = 0;
634
Sagi Grimberg86f03482020-06-18 17:30:23 -0700635 nvme_tcp_queue_request(req, false, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800636
637 return 0;
638}
639
640static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
641 unsigned int *offset, size_t *len)
642{
643 struct nvme_tcp_hdr *hdr;
644 char *pdu = queue->pdu;
645 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
646 int ret;
647
648 ret = skb_copy_bits(skb, *offset,
649 &pdu[queue->pdu_offset], rcv_len);
650 if (unlikely(ret))
651 return ret;
652
653 queue->pdu_remaining -= rcv_len;
654 queue->pdu_offset += rcv_len;
655 *offset += rcv_len;
656 *len -= rcv_len;
657 if (queue->pdu_remaining)
658 return 0;
659
660 hdr = queue->pdu;
661 if (queue->hdr_digest) {
662 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
663 if (unlikely(ret))
664 return ret;
665 }
666
667
668 if (queue->data_digest) {
669 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
670 if (unlikely(ret))
671 return ret;
672 }
673
674 switch (hdr->type) {
675 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700676 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800677 case nvme_tcp_rsp:
678 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700679 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800680 case nvme_tcp_r2t:
681 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700682 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800683 default:
684 dev_err(queue->ctrl->ctrl.device,
685 "unsupported pdu type (%d)\n", hdr->type);
686 return -EINVAL;
687 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800688}
689
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100690static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100691{
692 union nvme_result res = {};
693
Christoph Hellwig2eb81a32020-08-18 09:11:29 +0200694 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
Christoph Hellwigff029452020-06-11 08:44:52 +0200695 nvme_complete_rq(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100696}
697
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800698static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
699 unsigned int *offset, size_t *len)
700{
701 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
702 struct nvme_tcp_request *req;
703 struct request *rq;
704
705 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
706 if (!rq) {
707 dev_err(queue->ctrl->ctrl.device,
708 "queue %d tag %#x not found\n",
709 nvme_tcp_queue_id(queue), pdu->command_id);
710 return -ENOENT;
711 }
712 req = blk_mq_rq_to_pdu(rq);
713
714 while (true) {
715 int recv_len, ret;
716
717 recv_len = min_t(size_t, *len, queue->data_remaining);
718 if (!recv_len)
719 break;
720
721 if (!iov_iter_count(&req->iter)) {
722 req->curr_bio = req->curr_bio->bi_next;
723
724 /*
725 * If we don`t have any bios it means that controller
726 * sent more data than we requested, hence error
727 */
728 if (!req->curr_bio) {
729 dev_err(queue->ctrl->ctrl.device,
730 "queue %d no space in request %#x",
731 nvme_tcp_queue_id(queue), rq->tag);
732 nvme_tcp_init_recv_ctx(queue);
733 return -EIO;
734 }
735 nvme_tcp_init_iter(req, READ);
736 }
737
738 /* we can read only from what is left in this bio */
739 recv_len = min_t(size_t, recv_len,
740 iov_iter_count(&req->iter));
741
742 if (queue->data_digest)
743 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
744 &req->iter, recv_len, queue->rcv_hash);
745 else
746 ret = skb_copy_datagram_iter(skb, *offset,
747 &req->iter, recv_len);
748 if (ret) {
749 dev_err(queue->ctrl->ctrl.device,
750 "queue %d failed to copy request %#x data",
751 nvme_tcp_queue_id(queue), rq->tag);
752 return ret;
753 }
754
755 *len -= recv_len;
756 *offset += recv_len;
757 queue->data_remaining -= recv_len;
758 }
759
760 if (!queue->data_remaining) {
761 if (queue->data_digest) {
762 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
763 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
764 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700765 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100766 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700767 queue->nr_cqe++;
768 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800769 nvme_tcp_init_recv_ctx(queue);
770 }
771 }
772
773 return 0;
774}
775
776static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
777 struct sk_buff *skb, unsigned int *offset, size_t *len)
778{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100779 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800780 char *ddgst = (char *)&queue->recv_ddgst;
781 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
782 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
783 int ret;
784
785 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
786 if (unlikely(ret))
787 return ret;
788
789 queue->ddgst_remaining -= recv_len;
790 *offset += recv_len;
791 *len -= recv_len;
792 if (queue->ddgst_remaining)
793 return 0;
794
795 if (queue->recv_ddgst != queue->exp_ddgst) {
796 dev_err(queue->ctrl->ctrl.device,
797 "data digest error: recv %#x expected %#x\n",
798 le32_to_cpu(queue->recv_ddgst),
799 le32_to_cpu(queue->exp_ddgst));
800 return -EIO;
801 }
802
Sagi Grimberg602d6742019-03-13 18:55:10 +0100803 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
804 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
805 pdu->command_id);
806
807 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700808 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100809 }
810
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800811 nvme_tcp_init_recv_ctx(queue);
812 return 0;
813}
814
815static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
816 unsigned int offset, size_t len)
817{
818 struct nvme_tcp_queue *queue = desc->arg.data;
819 size_t consumed = len;
820 int result;
821
822 while (len) {
823 switch (nvme_tcp_recv_state(queue)) {
824 case NVME_TCP_RECV_PDU:
825 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
826 break;
827 case NVME_TCP_RECV_DATA:
828 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
829 break;
830 case NVME_TCP_RECV_DDGST:
831 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
832 break;
833 default:
834 result = -EFAULT;
835 }
836 if (result) {
837 dev_err(queue->ctrl->ctrl.device,
838 "receive failed: %d\n", result);
839 queue->rd_enabled = false;
840 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
841 return result;
842 }
843 }
844
845 return consumed;
846}
847
848static void nvme_tcp_data_ready(struct sock *sk)
849{
850 struct nvme_tcp_queue *queue;
851
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700852 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800853 queue = sk->sk_user_data;
Sagi Grimberg72e5d752020-05-01 14:25:44 -0700854 if (likely(queue && queue->rd_enabled) &&
855 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800856 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700857 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800858}
859
860static void nvme_tcp_write_space(struct sock *sk)
861{
862 struct nvme_tcp_queue *queue;
863
864 read_lock_bh(&sk->sk_callback_lock);
865 queue = sk->sk_user_data;
866 if (likely(queue && sk_stream_is_writeable(sk))) {
867 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
868 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
869 }
870 read_unlock_bh(&sk->sk_callback_lock);
871}
872
873static void nvme_tcp_state_change(struct sock *sk)
874{
875 struct nvme_tcp_queue *queue;
876
877 read_lock(&sk->sk_callback_lock);
878 queue = sk->sk_user_data;
879 if (!queue)
880 goto done;
881
882 switch (sk->sk_state) {
883 case TCP_CLOSE:
884 case TCP_CLOSE_WAIT:
885 case TCP_LAST_ACK:
886 case TCP_FIN_WAIT1:
887 case TCP_FIN_WAIT2:
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800888 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
889 break;
890 default:
891 dev_info(queue->ctrl->ctrl.device,
892 "queue %d socket state %d\n",
893 nvme_tcp_queue_id(queue), sk->sk_state);
894 }
895
896 queue->state_change(sk);
897done:
898 read_unlock(&sk->sk_callback_lock);
899}
900
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700901static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
902{
903 return !list_empty(&queue->send_list) ||
904 !llist_empty(&queue->req_list) || queue->more_requests;
905}
906
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800907static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
908{
909 queue->request = NULL;
910}
911
912static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
913{
Sagi Grimberg16686012019-08-02 18:17:52 -0700914 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800915}
916
917static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
918{
919 struct nvme_tcp_queue *queue = req->queue;
920
921 while (true) {
922 struct page *page = nvme_tcp_req_cur_page(req);
923 size_t offset = nvme_tcp_req_cur_offset(req);
924 size_t len = nvme_tcp_req_cur_length(req);
925 bool last = nvme_tcp_pdu_last_send(req, len);
926 int ret, flags = MSG_DONTWAIT;
927
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700928 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800929 flags |= MSG_EOR;
930 else
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700931 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800932
Coly Li7d4194a2020-10-02 16:27:30 +0800933 if (sendpage_ok(page)) {
934 ret = kernel_sendpage(queue->sock, page, offset, len,
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200935 flags);
936 } else {
Coly Li7d4194a2020-10-02 16:27:30 +0800937 ret = sock_no_sendpage(queue->sock, page, offset, len,
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200938 flags);
939 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800940 if (ret <= 0)
941 return ret;
942
943 nvme_tcp_advance_req(req, ret);
944 if (queue->data_digest)
945 nvme_tcp_ddgst_update(queue->snd_hash, page,
946 offset, ret);
947
948 /* fully successful last write*/
949 if (last && ret == len) {
950 if (queue->data_digest) {
951 nvme_tcp_ddgst_final(queue->snd_hash,
952 &req->ddgst);
953 req->state = NVME_TCP_SEND_DDGST;
954 req->offset = 0;
955 } else {
956 nvme_tcp_done_send_req(queue);
957 }
958 return 1;
959 }
960 }
961 return -EAGAIN;
962}
963
964static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
965{
966 struct nvme_tcp_queue *queue = req->queue;
967 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
968 bool inline_data = nvme_tcp_has_inline_data(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800969 u8 hdgst = nvme_tcp_hdgst_len(queue);
970 int len = sizeof(*pdu) + hdgst - req->offset;
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700971 int flags = MSG_DONTWAIT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800972 int ret;
973
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700974 if (inline_data || nvme_tcp_queue_more(queue))
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700975 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
976 else
977 flags |= MSG_EOR;
978
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800979 if (queue->hdr_digest && !req->offset)
980 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
981
982 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
983 offset_in_page(pdu) + req->offset, len, flags);
984 if (unlikely(ret <= 0))
985 return ret;
986
987 len -= ret;
988 if (!len) {
989 if (inline_data) {
990 req->state = NVME_TCP_SEND_DATA;
991 if (queue->data_digest)
992 crypto_ahash_init(queue->snd_hash);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800993 } else {
994 nvme_tcp_done_send_req(queue);
995 }
996 return 1;
997 }
998 req->offset += ret;
999
1000 return -EAGAIN;
1001}
1002
1003static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1004{
1005 struct nvme_tcp_queue *queue = req->queue;
1006 struct nvme_tcp_data_pdu *pdu = req->pdu;
1007 u8 hdgst = nvme_tcp_hdgst_len(queue);
1008 int len = sizeof(*pdu) - req->offset + hdgst;
1009 int ret;
1010
1011 if (queue->hdr_digest && !req->offset)
1012 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1013
1014 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1015 offset_in_page(pdu) + req->offset, len,
Sagi Grimberg5bb052d2020-05-04 22:20:01 -07001016 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001017 if (unlikely(ret <= 0))
1018 return ret;
1019
1020 len -= ret;
1021 if (!len) {
1022 req->state = NVME_TCP_SEND_DATA;
1023 if (queue->data_digest)
1024 crypto_ahash_init(queue->snd_hash);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001025 return 1;
1026 }
1027 req->offset += ret;
1028
1029 return -EAGAIN;
1030}
1031
1032static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1033{
1034 struct nvme_tcp_queue *queue = req->queue;
1035 int ret;
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001036 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001037 struct kvec iov = {
1038 .iov_base = &req->ddgst + req->offset,
1039 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1040 };
1041
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001042 if (nvme_tcp_queue_more(queue))
1043 msg.msg_flags |= MSG_MORE;
1044 else
1045 msg.msg_flags |= MSG_EOR;
1046
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001047 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1048 if (unlikely(ret <= 0))
1049 return ret;
1050
1051 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1052 nvme_tcp_done_send_req(queue);
1053 return 1;
1054 }
1055
1056 req->offset += ret;
1057 return -EAGAIN;
1058}
1059
1060static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1061{
1062 struct nvme_tcp_request *req;
1063 int ret = 1;
1064
1065 if (!queue->request) {
1066 queue->request = nvme_tcp_fetch_request(queue);
1067 if (!queue->request)
1068 return 0;
1069 }
1070 req = queue->request;
1071
1072 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1073 ret = nvme_tcp_try_send_cmd_pdu(req);
1074 if (ret <= 0)
1075 goto done;
1076 if (!nvme_tcp_has_inline_data(req))
1077 return ret;
1078 }
1079
1080 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1081 ret = nvme_tcp_try_send_data_pdu(req);
1082 if (ret <= 0)
1083 goto done;
1084 }
1085
1086 if (req->state == NVME_TCP_SEND_DATA) {
1087 ret = nvme_tcp_try_send_data(req);
1088 if (ret <= 0)
1089 goto done;
1090 }
1091
1092 if (req->state == NVME_TCP_SEND_DDGST)
1093 ret = nvme_tcp_try_send_ddgst(req);
1094done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001095 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001096 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001097 } else if (ret < 0) {
1098 dev_err(queue->ctrl->ctrl.device,
1099 "failed to send request %d\n", ret);
1100 if (ret != -EPIPE && ret != -ECONNRESET)
1101 nvme_tcp_fail_request(queue->request);
1102 nvme_tcp_done_send_req(queue);
1103 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001104 return ret;
1105}
1106
1107static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1108{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301109 struct socket *sock = queue->sock;
1110 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001111 read_descriptor_t rd_desc;
1112 int consumed;
1113
1114 rd_desc.arg.data = queue;
1115 rd_desc.count = 1;
1116 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001117 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301118 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001119 release_sock(sk);
1120 return consumed;
1121}
1122
1123static void nvme_tcp_io_work(struct work_struct *w)
1124{
1125 struct nvme_tcp_queue *queue =
1126 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001127 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001128
1129 do {
1130 bool pending = false;
1131 int result;
1132
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001133 if (mutex_trylock(&queue->send_mutex)) {
1134 result = nvme_tcp_try_send(queue);
1135 mutex_unlock(&queue->send_mutex);
1136 if (result > 0)
1137 pending = true;
1138 else if (unlikely(result < 0))
1139 break;
1140 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001141
1142 result = nvme_tcp_try_recv(queue);
1143 if (result > 0)
1144 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001145 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001146 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001147
1148 if (!pending)
1149 return;
1150
Wunderlich, Markddef2952019-09-18 23:36:37 +00001151 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001152
1153 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1154}
1155
1156static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1157{
1158 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1159
1160 ahash_request_free(queue->rcv_hash);
1161 ahash_request_free(queue->snd_hash);
1162 crypto_free_ahash(tfm);
1163}
1164
1165static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1166{
1167 struct crypto_ahash *tfm;
1168
1169 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1170 if (IS_ERR(tfm))
1171 return PTR_ERR(tfm);
1172
1173 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1174 if (!queue->snd_hash)
1175 goto free_tfm;
1176 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1177
1178 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1179 if (!queue->rcv_hash)
1180 goto free_snd_hash;
1181 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1182
1183 return 0;
1184free_snd_hash:
1185 ahash_request_free(queue->snd_hash);
1186free_tfm:
1187 crypto_free_ahash(tfm);
1188 return -ENOMEM;
1189}
1190
1191static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1192{
1193 struct nvme_tcp_request *async = &ctrl->async_req;
1194
1195 page_frag_free(async->pdu);
1196}
1197
1198static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1199{
1200 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1201 struct nvme_tcp_request *async = &ctrl->async_req;
1202 u8 hdgst = nvme_tcp_hdgst_len(queue);
1203
1204 async->pdu = page_frag_alloc(&queue->pf_cache,
1205 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1206 GFP_KERNEL | __GFP_ZERO);
1207 if (!async->pdu)
1208 return -ENOMEM;
1209
1210 async->queue = &ctrl->queues[0];
1211 return 0;
1212}
1213
1214static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1215{
1216 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1217 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1218
1219 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1220 return;
1221
1222 if (queue->hdr_digest || queue->data_digest)
1223 nvme_tcp_free_crypto(queue);
1224
1225 sock_release(queue->sock);
1226 kfree(queue->pdu);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001227 mutex_destroy(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001228}
1229
1230static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1231{
1232 struct nvme_tcp_icreq_pdu *icreq;
1233 struct nvme_tcp_icresp_pdu *icresp;
1234 struct msghdr msg = {};
1235 struct kvec iov;
1236 bool ctrl_hdgst, ctrl_ddgst;
1237 int ret;
1238
1239 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1240 if (!icreq)
1241 return -ENOMEM;
1242
1243 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1244 if (!icresp) {
1245 ret = -ENOMEM;
1246 goto free_icreq;
1247 }
1248
1249 icreq->hdr.type = nvme_tcp_icreq;
1250 icreq->hdr.hlen = sizeof(*icreq);
1251 icreq->hdr.pdo = 0;
1252 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1253 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1254 icreq->maxr2t = 0; /* single inflight r2t supported */
1255 icreq->hpda = 0; /* no alignment constraint */
1256 if (queue->hdr_digest)
1257 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1258 if (queue->data_digest)
1259 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1260
1261 iov.iov_base = icreq;
1262 iov.iov_len = sizeof(*icreq);
1263 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1264 if (ret < 0)
1265 goto free_icresp;
1266
1267 memset(&msg, 0, sizeof(msg));
1268 iov.iov_base = icresp;
1269 iov.iov_len = sizeof(*icresp);
1270 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1271 iov.iov_len, msg.msg_flags);
1272 if (ret < 0)
1273 goto free_icresp;
1274
1275 ret = -EINVAL;
1276 if (icresp->hdr.type != nvme_tcp_icresp) {
1277 pr_err("queue %d: bad type returned %d\n",
1278 nvme_tcp_queue_id(queue), icresp->hdr.type);
1279 goto free_icresp;
1280 }
1281
1282 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1283 pr_err("queue %d: bad pdu length returned %d\n",
1284 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1285 goto free_icresp;
1286 }
1287
1288 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1289 pr_err("queue %d: bad pfv returned %d\n",
1290 nvme_tcp_queue_id(queue), icresp->pfv);
1291 goto free_icresp;
1292 }
1293
1294 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1295 if ((queue->data_digest && !ctrl_ddgst) ||
1296 (!queue->data_digest && ctrl_ddgst)) {
1297 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1298 nvme_tcp_queue_id(queue),
1299 queue->data_digest ? "enabled" : "disabled",
1300 ctrl_ddgst ? "enabled" : "disabled");
1301 goto free_icresp;
1302 }
1303
1304 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1305 if ((queue->hdr_digest && !ctrl_hdgst) ||
1306 (!queue->hdr_digest && ctrl_hdgst)) {
1307 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1308 nvme_tcp_queue_id(queue),
1309 queue->hdr_digest ? "enabled" : "disabled",
1310 ctrl_hdgst ? "enabled" : "disabled");
1311 goto free_icresp;
1312 }
1313
1314 if (icresp->cpda != 0) {
1315 pr_err("queue %d: unsupported cpda returned %d\n",
1316 nvme_tcp_queue_id(queue), icresp->cpda);
1317 goto free_icresp;
1318 }
1319
1320 ret = 0;
1321free_icresp:
1322 kfree(icresp);
1323free_icreq:
1324 kfree(icreq);
1325 return ret;
1326}
1327
Sagi Grimberg40510a62020-02-25 15:53:09 -08001328static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1329{
1330 return nvme_tcp_queue_id(queue) == 0;
1331}
1332
1333static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1334{
1335 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1336 int qid = nvme_tcp_queue_id(queue);
1337
1338 return !nvme_tcp_admin_queue(queue) &&
1339 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1340}
1341
1342static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1343{
1344 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1345 int qid = nvme_tcp_queue_id(queue);
1346
1347 return !nvme_tcp_admin_queue(queue) &&
1348 !nvme_tcp_default_queue(queue) &&
1349 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1350 ctrl->io_queues[HCTX_TYPE_READ];
1351}
1352
1353static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1354{
1355 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1356 int qid = nvme_tcp_queue_id(queue);
1357
1358 return !nvme_tcp_admin_queue(queue) &&
1359 !nvme_tcp_default_queue(queue) &&
1360 !nvme_tcp_read_queue(queue) &&
1361 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1362 ctrl->io_queues[HCTX_TYPE_READ] +
1363 ctrl->io_queues[HCTX_TYPE_POLL];
1364}
1365
1366static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1367{
1368 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1369 int qid = nvme_tcp_queue_id(queue);
1370 int n = 0;
1371
1372 if (nvme_tcp_default_queue(queue))
1373 n = qid - 1;
1374 else if (nvme_tcp_read_queue(queue))
1375 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1376 else if (nvme_tcp_poll_queue(queue))
1377 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1378 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1379 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1380}
1381
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001382static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1383 int qid, size_t queue_size)
1384{
1385 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1386 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001387 int ret, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001388
Chao Leng9ebbfe42021-01-14 17:09:26 +08001389 mutex_init(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001390 queue->ctrl = ctrl;
Sagi Grimberg15ec9282020-06-18 17:30:22 -07001391 init_llist_head(&queue->req_list);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001392 INIT_LIST_HEAD(&queue->send_list);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001393 mutex_init(&queue->send_mutex);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001394 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1395 queue->queue_size = queue_size;
1396
1397 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001398 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001399 else
1400 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1401 NVME_TCP_ADMIN_CCSZ;
1402
1403 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1404 IPPROTO_TCP, &queue->sock);
1405 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001406 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001407 "failed to create socket: %d\n", ret);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001408 goto err_destroy_mutex;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001409 }
1410
1411 /* Single syn retry */
Christoph Hellwig557eadf2020-05-28 07:12:21 +02001412 tcp_sock_set_syncnt(queue->sock->sk, 1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001413
1414 /* Set TCP no delay */
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001415 tcp_sock_set_nodelay(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001416
1417 /*
1418 * Cleanup whatever is sitting in the TCP transmit queue on socket
1419 * close. This is done to prevent stale data from being sent should
1420 * the network connection be restored before TCP times out.
1421 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001422 sock_no_linger(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001423
Christoph Hellwig6e434962020-05-28 07:12:11 +02001424 if (so_priority > 0)
1425 sock_set_priority(queue->sock->sk, so_priority);
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001426
Israel Rukshinbb139852019-08-18 12:08:54 +03001427 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001428 if (nctrl->opts->tos >= 0)
1429 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
Israel Rukshinbb139852019-08-18 12:08:54 +03001430
Sagi Grimbergadc99fd2020-07-23 16:42:26 -07001431 /* Set 10 seconds timeout for icresp recvmsg */
1432 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1433
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001434 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001435 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001436 queue->request = NULL;
1437 queue->data_remaining = 0;
1438 queue->ddgst_remaining = 0;
1439 queue->pdu_remaining = 0;
1440 queue->pdu_offset = 0;
1441 sk_set_memalloc(queue->sock->sk);
1442
Israel Rukshin9924b032019-08-18 12:08:53 +03001443 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001444 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1445 sizeof(ctrl->src_addr));
1446 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001447 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001448 "failed to bind queue %d socket %d\n",
1449 qid, ret);
1450 goto err_sock;
1451 }
1452 }
1453
1454 queue->hdr_digest = nctrl->opts->hdr_digest;
1455 queue->data_digest = nctrl->opts->data_digest;
1456 if (queue->hdr_digest || queue->data_digest) {
1457 ret = nvme_tcp_alloc_crypto(queue);
1458 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001459 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001460 "failed to allocate queue %d crypto\n", qid);
1461 goto err_sock;
1462 }
1463 }
1464
1465 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1466 nvme_tcp_hdgst_len(queue);
1467 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1468 if (!queue->pdu) {
1469 ret = -ENOMEM;
1470 goto err_crypto;
1471 }
1472
Israel Rukshin9924b032019-08-18 12:08:53 +03001473 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001474 nvme_tcp_queue_id(queue));
1475
1476 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1477 sizeof(ctrl->addr), 0);
1478 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001479 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001480 "failed to connect socket: %d\n", ret);
1481 goto err_rcv_pdu;
1482 }
1483
1484 ret = nvme_tcp_init_connection(queue);
1485 if (ret)
1486 goto err_init_connect;
1487
1488 queue->rd_enabled = true;
1489 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1490 nvme_tcp_init_recv_ctx(queue);
1491
1492 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1493 queue->sock->sk->sk_user_data = queue;
1494 queue->state_change = queue->sock->sk->sk_state_change;
1495 queue->data_ready = queue->sock->sk->sk_data_ready;
1496 queue->write_space = queue->sock->sk->sk_write_space;
1497 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1498 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1499 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001500#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001501 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001502#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001503 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1504
1505 return 0;
1506
1507err_init_connect:
1508 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1509err_rcv_pdu:
1510 kfree(queue->pdu);
1511err_crypto:
1512 if (queue->hdr_digest || queue->data_digest)
1513 nvme_tcp_free_crypto(queue);
1514err_sock:
1515 sock_release(queue->sock);
1516 queue->sock = NULL;
Chao Leng9ebbfe42021-01-14 17:09:26 +08001517err_destroy_mutex:
1518 mutex_destroy(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001519 return ret;
1520}
1521
1522static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1523{
1524 struct socket *sock = queue->sock;
1525
1526 write_lock_bh(&sock->sk->sk_callback_lock);
1527 sock->sk->sk_user_data = NULL;
1528 sock->sk->sk_data_ready = queue->data_ready;
1529 sock->sk->sk_state_change = queue->state_change;
1530 sock->sk->sk_write_space = queue->write_space;
1531 write_unlock_bh(&sock->sk->sk_callback_lock);
1532}
1533
1534static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1535{
1536 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1537 nvme_tcp_restore_sock_calls(queue);
1538 cancel_work_sync(&queue->io_work);
1539}
1540
1541static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1542{
1543 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1544 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1545
Chao Leng9ebbfe42021-01-14 17:09:26 +08001546 mutex_lock(&queue->queue_lock);
1547 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1548 __nvme_tcp_stop_queue(queue);
1549 mutex_unlock(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001550}
1551
1552static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1553{
1554 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1555 int ret;
1556
1557 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001558 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001559 else
1560 ret = nvmf_connect_admin_queue(nctrl);
1561
1562 if (!ret) {
1563 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1564 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001565 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1566 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001567 dev_err(nctrl->device,
1568 "failed to connect queue: %d ret=%d\n", idx, ret);
1569 }
1570 return ret;
1571}
1572
1573static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1574 bool admin)
1575{
1576 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1577 struct blk_mq_tag_set *set;
1578 int ret;
1579
1580 if (admin) {
1581 set = &ctrl->admin_tag_set;
1582 memset(set, 0, sizeof(*set));
1583 set->ops = &nvme_tcp_admin_mq_ops;
1584 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
Christoph Hellwiged01fee2021-03-03 13:28:22 +01001585 set->reserved_tags = NVMF_RESERVED_TAGS;
Max Gurtovoy610c8232020-06-16 12:34:24 +03001586 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001587 set->flags = BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001588 set->cmd_size = sizeof(struct nvme_tcp_request);
1589 set->driver_data = ctrl;
1590 set->nr_hw_queues = 1;
Chaitanya Kulkarnidc96f932020-11-09 16:33:45 -08001591 set->timeout = NVME_ADMIN_TIMEOUT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001592 } else {
1593 set = &ctrl->tag_set;
1594 memset(set, 0, sizeof(*set));
1595 set->ops = &nvme_tcp_mq_ops;
1596 set->queue_depth = nctrl->sqsize + 1;
Christoph Hellwiged01fee2021-03-03 13:28:22 +01001597 set->reserved_tags = NVMF_RESERVED_TAGS;
Max Gurtovoy610c8232020-06-16 12:34:24 +03001598 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001599 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001600 set->cmd_size = sizeof(struct nvme_tcp_request);
1601 set->driver_data = ctrl;
1602 set->nr_hw_queues = nctrl->queue_count - 1;
1603 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001604 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001605 }
1606
1607 ret = blk_mq_alloc_tag_set(set);
1608 if (ret)
1609 return ERR_PTR(ret);
1610
1611 return set;
1612}
1613
1614static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1615{
1616 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
David Milburnceb1e082020-09-02 17:42:53 -05001617 cancel_work_sync(&ctrl->async_event_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001618 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1619 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1620 }
1621
1622 nvme_tcp_free_queue(ctrl, 0);
1623}
1624
1625static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1626{
1627 int i;
1628
1629 for (i = 1; i < ctrl->queue_count; i++)
1630 nvme_tcp_free_queue(ctrl, i);
1631}
1632
1633static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1634{
1635 int i;
1636
1637 for (i = 1; i < ctrl->queue_count; i++)
1638 nvme_tcp_stop_queue(ctrl, i);
1639}
1640
1641static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1642{
1643 int i, ret = 0;
1644
1645 for (i = 1; i < ctrl->queue_count; i++) {
1646 ret = nvme_tcp_start_queue(ctrl, i);
1647 if (ret)
1648 goto out_stop_queues;
1649 }
1650
1651 return 0;
1652
1653out_stop_queues:
1654 for (i--; i >= 1; i--)
1655 nvme_tcp_stop_queue(ctrl, i);
1656 return ret;
1657}
1658
1659static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1660{
1661 int ret;
1662
1663 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1664 if (ret)
1665 return ret;
1666
1667 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1668 if (ret)
1669 goto out_free_queue;
1670
1671 return 0;
1672
1673out_free_queue:
1674 nvme_tcp_free_queue(ctrl, 0);
1675 return ret;
1676}
1677
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001678static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001679{
1680 int i, ret;
1681
1682 for (i = 1; i < ctrl->queue_count; i++) {
1683 ret = nvme_tcp_alloc_queue(ctrl, i,
1684 ctrl->sqsize + 1);
1685 if (ret)
1686 goto out_free_queues;
1687 }
1688
1689 return 0;
1690
1691out_free_queues:
1692 for (i--; i >= 1; i--)
1693 nvme_tcp_free_queue(ctrl, i);
1694
1695 return ret;
1696}
1697
1698static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1699{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001700 unsigned int nr_io_queues;
1701
1702 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1703 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001704 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001705
1706 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001707}
1708
Sagi Grimberg64861992019-05-28 22:49:05 -07001709static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1710 unsigned int nr_io_queues)
1711{
1712 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1713 struct nvmf_ctrl_options *opts = nctrl->opts;
1714
1715 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1716 /*
1717 * separate read/write queues
1718 * hand out dedicated default queues only after we have
1719 * sufficient read queues.
1720 */
1721 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1722 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1723 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1724 min(opts->nr_write_queues, nr_io_queues);
1725 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1726 } else {
1727 /*
1728 * shared read/write queues
1729 * either no write queues were requested, or we don't have
1730 * sufficient queue count to have dedicated default queues.
1731 */
1732 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1733 min(opts->nr_io_queues, nr_io_queues);
1734 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1735 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001736
1737 if (opts->nr_poll_queues && nr_io_queues) {
1738 /* map dedicated poll queues only if we have queues left */
1739 ctrl->io_queues[HCTX_TYPE_POLL] =
1740 min(opts->nr_poll_queues, nr_io_queues);
1741 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001742}
1743
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001744static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001745{
1746 unsigned int nr_io_queues;
1747 int ret;
1748
1749 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1750 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1751 if (ret)
1752 return ret;
1753
1754 ctrl->queue_count = nr_io_queues + 1;
1755 if (ctrl->queue_count < 2)
1756 return 0;
1757
1758 dev_info(ctrl->device,
1759 "creating %d I/O queues.\n", nr_io_queues);
1760
Sagi Grimberg64861992019-05-28 22:49:05 -07001761 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1762
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001763 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001764}
1765
1766static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1767{
1768 nvme_tcp_stop_io_queues(ctrl);
1769 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001770 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001771 blk_mq_free_tag_set(ctrl->tagset);
1772 }
1773 nvme_tcp_free_io_queues(ctrl);
1774}
1775
1776static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1777{
1778 int ret;
1779
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001780 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001781 if (ret)
1782 return ret;
1783
1784 if (new) {
1785 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1786 if (IS_ERR(ctrl->tagset)) {
1787 ret = PTR_ERR(ctrl->tagset);
1788 goto out_free_io_queues;
1789 }
1790
Sagi Grimberge85037a2018-12-31 23:58:30 -08001791 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1792 if (IS_ERR(ctrl->connect_q)) {
1793 ret = PTR_ERR(ctrl->connect_q);
1794 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001795 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001796 }
1797
1798 ret = nvme_tcp_start_io_queues(ctrl);
1799 if (ret)
1800 goto out_cleanup_connect_q;
1801
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001802 if (!new) {
1803 nvme_start_queues(ctrl);
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001804 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1805 /*
1806 * If we timed out waiting for freeze we are likely to
1807 * be stuck. Fail the controller initialization just
1808 * to be safe.
1809 */
1810 ret = -ENODEV;
1811 goto out_wait_freeze_timed_out;
1812 }
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001813 blk_mq_update_nr_hw_queues(ctrl->tagset,
1814 ctrl->queue_count - 1);
1815 nvme_unfreeze(ctrl);
1816 }
1817
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001818 return 0;
1819
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001820out_wait_freeze_timed_out:
1821 nvme_stop_queues(ctrl);
Chao Leng70a99572021-01-21 11:32:38 +08001822 nvme_sync_io_queues(ctrl);
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001823 nvme_tcp_stop_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001824out_cleanup_connect_q:
Chao Leng70a99572021-01-21 11:32:38 +08001825 nvme_cancel_tagset(ctrl);
Sagi Grimberge85037a2018-12-31 23:58:30 -08001826 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001827 blk_cleanup_queue(ctrl->connect_q);
1828out_free_tag_set:
1829 if (new)
1830 blk_mq_free_tag_set(ctrl->tagset);
1831out_free_io_queues:
1832 nvme_tcp_free_io_queues(ctrl);
1833 return ret;
1834}
1835
1836static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1837{
1838 nvme_tcp_stop_queue(ctrl, 0);
1839 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001840 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001841 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001842 blk_mq_free_tag_set(ctrl->admin_tagset);
1843 }
1844 nvme_tcp_free_admin_queue(ctrl);
1845}
1846
1847static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1848{
1849 int error;
1850
1851 error = nvme_tcp_alloc_admin_queue(ctrl);
1852 if (error)
1853 return error;
1854
1855 if (new) {
1856 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1857 if (IS_ERR(ctrl->admin_tagset)) {
1858 error = PTR_ERR(ctrl->admin_tagset);
1859 goto out_free_queue;
1860 }
1861
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001862 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1863 if (IS_ERR(ctrl->fabrics_q)) {
1864 error = PTR_ERR(ctrl->fabrics_q);
1865 goto out_free_tagset;
1866 }
1867
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001868 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1869 if (IS_ERR(ctrl->admin_q)) {
1870 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001871 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001872 }
1873 }
1874
1875 error = nvme_tcp_start_queue(ctrl, 0);
1876 if (error)
1877 goto out_cleanup_queue;
1878
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001879 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001880 if (error)
1881 goto out_stop_queue;
1882
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001883 blk_mq_unquiesce_queue(ctrl->admin_q);
1884
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001885 error = nvme_init_identify(ctrl);
1886 if (error)
Chao Leng70a99572021-01-21 11:32:38 +08001887 goto out_quiesce_queue;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001888
1889 return 0;
1890
Chao Leng70a99572021-01-21 11:32:38 +08001891out_quiesce_queue:
1892 blk_mq_quiesce_queue(ctrl->admin_q);
1893 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001894out_stop_queue:
1895 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng70a99572021-01-21 11:32:38 +08001896 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001897out_cleanup_queue:
1898 if (new)
1899 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001900out_cleanup_fabrics_q:
1901 if (new)
1902 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001903out_free_tagset:
1904 if (new)
1905 blk_mq_free_tag_set(ctrl->admin_tagset);
1906out_free_queue:
1907 nvme_tcp_free_admin_queue(ctrl);
1908 return error;
1909}
1910
1911static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1912 bool remove)
1913{
1914 blk_mq_quiesce_queue(ctrl->admin_q);
Chao Lengd6f66212020-10-22 10:15:15 +08001915 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001916 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng563c8152021-01-21 11:32:40 +08001917 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001918 if (remove)
1919 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001920 nvme_tcp_destroy_admin_queue(ctrl, remove);
1921}
1922
1923static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1924 bool remove)
1925{
1926 if (ctrl->queue_count <= 1)
Chao Lengd6f66212020-10-22 10:15:15 +08001927 return;
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001928 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001929 nvme_start_freeze(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001930 nvme_stop_queues(ctrl);
Chao Lengd6f66212020-10-22 10:15:15 +08001931 nvme_sync_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001932 nvme_tcp_stop_io_queues(ctrl);
Chao Leng563c8152021-01-21 11:32:40 +08001933 nvme_cancel_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001934 if (remove)
1935 nvme_start_queues(ctrl);
1936 nvme_tcp_destroy_io_queues(ctrl, remove);
1937}
1938
1939static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1940{
1941 /* If we are resetting/deleting then do nothing */
1942 if (ctrl->state != NVME_CTRL_CONNECTING) {
1943 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1944 ctrl->state == NVME_CTRL_LIVE);
1945 return;
1946 }
1947
1948 if (nvmf_should_reconnect(ctrl)) {
1949 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1950 ctrl->opts->reconnect_delay);
1951 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1952 ctrl->opts->reconnect_delay * HZ);
1953 } else {
1954 dev_info(ctrl->device, "Removing controller...\n");
1955 nvme_delete_ctrl(ctrl);
1956 }
1957}
1958
1959static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1960{
1961 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001962 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001963
1964 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1965 if (ret)
1966 return ret;
1967
1968 if (ctrl->icdoff) {
1969 dev_err(ctrl->device, "icdoff is not supported!\n");
1970 goto destroy_admin;
1971 }
1972
1973 if (opts->queue_size > ctrl->sqsize + 1)
1974 dev_warn(ctrl->device,
1975 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1976 opts->queue_size, ctrl->sqsize + 1);
1977
1978 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1979 dev_warn(ctrl->device,
1980 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1981 ctrl->sqsize + 1, ctrl->maxcmd);
1982 ctrl->sqsize = ctrl->maxcmd - 1;
1983 }
1984
1985 if (ctrl->queue_count > 1) {
1986 ret = nvme_tcp_configure_io_queues(ctrl, new);
1987 if (ret)
1988 goto destroy_admin;
1989 }
1990
1991 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001992 /*
Sagi Grimbergecca390e2020-07-22 16:32:19 -07001993 * state change failure is ok if we started ctrl delete,
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001994 * unless we're during creation of a new controller to
1995 * avoid races with teardown flow.
1996 */
Sagi Grimbergecca390e2020-07-22 16:32:19 -07001997 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
1998 ctrl->state != NVME_CTRL_DELETING_NOIO);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001999 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002000 ret = -EINVAL;
2001 goto destroy_io;
2002 }
2003
2004 nvme_start_ctrl(ctrl);
2005 return 0;
2006
2007destroy_io:
Chao Leng70a99572021-01-21 11:32:38 +08002008 if (ctrl->queue_count > 1) {
2009 nvme_stop_queues(ctrl);
2010 nvme_sync_io_queues(ctrl);
2011 nvme_tcp_stop_io_queues(ctrl);
2012 nvme_cancel_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002013 nvme_tcp_destroy_io_queues(ctrl, new);
Chao Leng70a99572021-01-21 11:32:38 +08002014 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002015destroy_admin:
Chao Leng70a99572021-01-21 11:32:38 +08002016 blk_mq_quiesce_queue(ctrl->admin_q);
2017 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002018 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng70a99572021-01-21 11:32:38 +08002019 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002020 nvme_tcp_destroy_admin_queue(ctrl, new);
2021 return ret;
2022}
2023
2024static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2025{
2026 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2027 struct nvme_tcp_ctrl, connect_work);
2028 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2029
2030 ++ctrl->nr_reconnects;
2031
2032 if (nvme_tcp_setup_ctrl(ctrl, false))
2033 goto requeue;
2034
Colin Ian King56a77d22018-12-14 11:42:43 +00002035 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002036 ctrl->nr_reconnects);
2037
2038 ctrl->nr_reconnects = 0;
2039
2040 return;
2041
2042requeue:
2043 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2044 ctrl->nr_reconnects);
2045 nvme_tcp_reconnect_or_remove(ctrl);
2046}
2047
2048static void nvme_tcp_error_recovery_work(struct work_struct *work)
2049{
2050 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2051 struct nvme_tcp_ctrl, err_work);
2052 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2053
2054 nvme_stop_keep_alive(ctrl);
2055 nvme_tcp_teardown_io_queues(ctrl, false);
2056 /* unquiesce to fail fast pending requests */
2057 nvme_start_queues(ctrl);
2058 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002059 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002060
2061 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002062 /* state change failure is ok if we started ctrl delete */
2063 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2064 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002065 return;
2066 }
2067
2068 nvme_tcp_reconnect_or_remove(ctrl);
2069}
2070
2071static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2072{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002073 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2074 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2075
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002076 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002077 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002078 if (shutdown)
2079 nvme_shutdown_ctrl(ctrl);
2080 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002081 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002082 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2083}
2084
2085static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2086{
2087 nvme_tcp_teardown_ctrl(ctrl, true);
2088}
2089
2090static void nvme_reset_ctrl_work(struct work_struct *work)
2091{
2092 struct nvme_ctrl *ctrl =
2093 container_of(work, struct nvme_ctrl, reset_work);
2094
2095 nvme_stop_ctrl(ctrl);
2096 nvme_tcp_teardown_ctrl(ctrl, false);
2097
2098 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002099 /* state change failure is ok if we started ctrl delete */
2100 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2101 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002102 return;
2103 }
2104
2105 if (nvme_tcp_setup_ctrl(ctrl, false))
2106 goto out_fail;
2107
2108 return;
2109
2110out_fail:
2111 ++ctrl->nr_reconnects;
2112 nvme_tcp_reconnect_or_remove(ctrl);
2113}
2114
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002115static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2116{
2117 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2118
2119 if (list_empty(&ctrl->list))
2120 goto free_ctrl;
2121
2122 mutex_lock(&nvme_tcp_ctrl_mutex);
2123 list_del(&ctrl->list);
2124 mutex_unlock(&nvme_tcp_ctrl_mutex);
2125
2126 nvmf_free_options(nctrl->opts);
2127free_ctrl:
2128 kfree(ctrl->queues);
2129 kfree(ctrl);
2130}
2131
2132static void nvme_tcp_set_sg_null(struct nvme_command *c)
2133{
2134 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2135
2136 sg->addr = 0;
2137 sg->length = 0;
2138 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2139 NVME_SGL_FMT_TRANSPORT_A;
2140}
2141
2142static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2143 struct nvme_command *c, u32 data_len)
2144{
2145 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2146
2147 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2148 sg->length = cpu_to_le32(data_len);
2149 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2150}
2151
2152static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2153 u32 data_len)
2154{
2155 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2156
2157 sg->addr = 0;
2158 sg->length = cpu_to_le32(data_len);
2159 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2160 NVME_SGL_FMT_TRANSPORT_A;
2161}
2162
2163static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2164{
2165 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2166 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2167 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2168 struct nvme_command *cmd = &pdu->cmd;
2169 u8 hdgst = nvme_tcp_hdgst_len(queue);
2170
2171 memset(pdu, 0, sizeof(*pdu));
2172 pdu->hdr.type = nvme_tcp_cmd;
2173 if (queue->hdr_digest)
2174 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2175 pdu->hdr.hlen = sizeof(*pdu);
2176 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2177
2178 cmd->common.opcode = nvme_admin_async_event;
2179 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2180 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2181 nvme_tcp_set_sg_null(cmd);
2182
2183 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2184 ctrl->async_req.offset = 0;
2185 ctrl->async_req.curr_bio = NULL;
2186 ctrl->async_req.data_len = 0;
2187
Sagi Grimberg86f03482020-06-18 17:30:23 -07002188 nvme_tcp_queue_request(&ctrl->async_req, true, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002189}
2190
Sagi Grimberg236187c2020-07-28 13:16:36 -07002191static void nvme_tcp_complete_timed_out(struct request *rq)
2192{
2193 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2194 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2195
Sagi Grimberg236187c2020-07-28 13:16:36 -07002196 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
Sagi Grimberg0a8a2c852020-10-22 10:15:31 +08002197 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
Sagi Grimberg236187c2020-07-28 13:16:36 -07002198 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2199 blk_mq_complete_request(rq);
2200 }
Sagi Grimberg236187c2020-07-28 13:16:36 -07002201}
2202
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002203static enum blk_eh_timer_return
2204nvme_tcp_timeout(struct request *rq, bool reserved)
2205{
2206 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg236187c2020-07-28 13:16:36 -07002207 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002208 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2209
Sagi Grimberg236187c2020-07-28 13:16:36 -07002210 dev_warn(ctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002211 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002212 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002213
Sagi Grimberg236187c2020-07-28 13:16:36 -07002214 if (ctrl->state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002215 /*
Sagi Grimberg236187c2020-07-28 13:16:36 -07002216 * If we are resetting, connecting or deleting we should
2217 * complete immediately because we may block controller
2218 * teardown or setup sequence
2219 * - ctrl disable/shutdown fabrics requests
2220 * - connect requests
2221 * - initialization admin requests
2222 * - I/O requests that entered after unquiescing and
2223 * the controller stopped responding
2224 *
2225 * All other requests should be cancelled by the error
2226 * recovery work, so it's fine that we fail it here.
Sagi Grimberg39d57752019-01-08 01:01:30 -08002227 */
Sagi Grimberg236187c2020-07-28 13:16:36 -07002228 nvme_tcp_complete_timed_out(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002229 return BLK_EH_DONE;
2230 }
2231
Sagi Grimberg236187c2020-07-28 13:16:36 -07002232 /*
2233 * LIVE state should trigger the normal error recovery which will
2234 * handle completing this request.
2235 */
2236 nvme_tcp_error_recovery(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002237 return BLK_EH_RESET_TIMER;
2238}
2239
2240static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2241 struct request *rq)
2242{
2243 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2244 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2245 struct nvme_command *c = &pdu->cmd;
2246
2247 c->common.flags |= NVME_CMD_SGL_METABUF;
2248
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002249 if (!blk_rq_nr_phys_segments(rq))
2250 nvme_tcp_set_sg_null(c);
2251 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002252 req->data_len <= nvme_tcp_inline_data_size(queue))
2253 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2254 else
2255 nvme_tcp_set_sg_host_data(c, req->data_len);
2256
2257 return 0;
2258}
2259
2260static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2261 struct request *rq)
2262{
2263 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2264 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2265 struct nvme_tcp_queue *queue = req->queue;
2266 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2267 blk_status_t ret;
2268
2269 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2270 if (ret)
2271 return ret;
2272
2273 req->state = NVME_TCP_SEND_CMD_PDU;
2274 req->offset = 0;
2275 req->data_sent = 0;
2276 req->pdu_len = 0;
2277 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002278 req->data_len = blk_rq_nr_phys_segments(rq) ?
2279 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002280 req->curr_bio = rq->bio;
Sagi Grimberge11e5112021-02-10 14:04:00 -08002281 if (req->curr_bio && req->data_len)
Sagi Grimbergcb9b8702021-01-14 13:15:24 -08002282 nvme_tcp_init_iter(req, rq_data_dir(rq));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002283
2284 if (rq_data_dir(rq) == WRITE &&
2285 req->data_len <= nvme_tcp_inline_data_size(queue))
2286 req->pdu_len = req->data_len;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002287
2288 pdu->hdr.type = nvme_tcp_cmd;
2289 pdu->hdr.flags = 0;
2290 if (queue->hdr_digest)
2291 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2292 if (queue->data_digest && req->pdu_len) {
2293 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2294 ddgst = nvme_tcp_ddgst_len(queue);
2295 }
2296 pdu->hdr.hlen = sizeof(*pdu);
2297 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2298 pdu->hdr.plen =
2299 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2300
2301 ret = nvme_tcp_map_data(queue, rq);
2302 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002303 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002304 dev_err(queue->ctrl->ctrl.device,
2305 "Failed to map data (%d)\n", ret);
2306 return ret;
2307 }
2308
2309 return 0;
2310}
2311
Sagi Grimberg86f03482020-06-18 17:30:23 -07002312static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2313{
2314 struct nvme_tcp_queue *queue = hctx->driver_data;
2315
2316 if (!llist_empty(&queue->req_list))
2317 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2318}
2319
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002320static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2321 const struct blk_mq_queue_data *bd)
2322{
2323 struct nvme_ns *ns = hctx->queue->queuedata;
2324 struct nvme_tcp_queue *queue = hctx->driver_data;
2325 struct request *rq = bd->rq;
2326 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2327 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2328 blk_status_t ret;
2329
2330 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2331 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2332
2333 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2334 if (unlikely(ret))
2335 return ret;
2336
2337 blk_mq_start_request(rq);
2338
Sagi Grimberg86f03482020-06-18 17:30:23 -07002339 nvme_tcp_queue_request(req, true, bd->last);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002340
2341 return BLK_STS_OK;
2342}
2343
Sagi Grimberg873946f2018-12-11 23:38:57 -08002344static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2345{
2346 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002347 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002348
Sagi Grimberg64861992019-05-28 22:49:05 -07002349 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002350 /* separate read/write queues */
2351 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002352 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2353 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2354 set->map[HCTX_TYPE_READ].nr_queues =
2355 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002356 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002357 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002358 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002359 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002360 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002361 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2362 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2363 set->map[HCTX_TYPE_READ].nr_queues =
2364 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002365 set->map[HCTX_TYPE_READ].queue_offset = 0;
2366 }
2367 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2368 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002369
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002370 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2371 /* map dedicated poll queues only if we have queues left */
2372 set->map[HCTX_TYPE_POLL].nr_queues =
2373 ctrl->io_queues[HCTX_TYPE_POLL];
2374 set->map[HCTX_TYPE_POLL].queue_offset =
2375 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2376 ctrl->io_queues[HCTX_TYPE_READ];
2377 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2378 }
2379
Sagi Grimberg64861992019-05-28 22:49:05 -07002380 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002381 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002382 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002383 ctrl->io_queues[HCTX_TYPE_READ],
2384 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002385
Sagi Grimberg873946f2018-12-11 23:38:57 -08002386 return 0;
2387}
2388
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002389static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2390{
2391 struct nvme_tcp_queue *queue = hctx->driver_data;
2392 struct sock *sk = queue->sock->sk;
2393
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002394 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2395 return 0;
2396
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002397 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
Eric Dumazet3f926af2019-10-23 22:44:51 -07002398 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002399 sk_busy_loop(sk, true);
2400 nvme_tcp_try_recv(queue);
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002401 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002402 return queue->nr_cqe;
2403}
2404
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002405static const struct blk_mq_ops nvme_tcp_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002406 .queue_rq = nvme_tcp_queue_rq,
Sagi Grimberg86f03482020-06-18 17:30:23 -07002407 .commit_rqs = nvme_tcp_commit_rqs,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002408 .complete = nvme_complete_rq,
2409 .init_request = nvme_tcp_init_request,
2410 .exit_request = nvme_tcp_exit_request,
2411 .init_hctx = nvme_tcp_init_hctx,
2412 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002413 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002414 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002415};
2416
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002417static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002418 .queue_rq = nvme_tcp_queue_rq,
2419 .complete = nvme_complete_rq,
2420 .init_request = nvme_tcp_init_request,
2421 .exit_request = nvme_tcp_exit_request,
2422 .init_hctx = nvme_tcp_init_admin_hctx,
2423 .timeout = nvme_tcp_timeout,
2424};
2425
2426static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2427 .name = "tcp",
2428 .module = THIS_MODULE,
2429 .flags = NVME_F_FABRICS,
2430 .reg_read32 = nvmf_reg_read32,
2431 .reg_read64 = nvmf_reg_read64,
2432 .reg_write32 = nvmf_reg_write32,
2433 .free_ctrl = nvme_tcp_free_ctrl,
2434 .submit_async_event = nvme_tcp_submit_async_event,
2435 .delete_ctrl = nvme_tcp_delete_ctrl,
2436 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002437};
2438
2439static bool
2440nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2441{
2442 struct nvme_tcp_ctrl *ctrl;
2443 bool found = false;
2444
2445 mutex_lock(&nvme_tcp_ctrl_mutex);
2446 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2447 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2448 if (found)
2449 break;
2450 }
2451 mutex_unlock(&nvme_tcp_ctrl_mutex);
2452
2453 return found;
2454}
2455
2456static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2457 struct nvmf_ctrl_options *opts)
2458{
2459 struct nvme_tcp_ctrl *ctrl;
2460 int ret;
2461
2462 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2463 if (!ctrl)
2464 return ERR_PTR(-ENOMEM);
2465
2466 INIT_LIST_HEAD(&ctrl->list);
2467 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002468 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2469 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002470 ctrl->ctrl.sqsize = opts->queue_size - 1;
2471 ctrl->ctrl.kato = opts->kato;
2472
2473 INIT_DELAYED_WORK(&ctrl->connect_work,
2474 nvme_tcp_reconnect_ctrl_work);
2475 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2476 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2477
2478 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2479 opts->trsvcid =
2480 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2481 if (!opts->trsvcid) {
2482 ret = -ENOMEM;
2483 goto out_free_ctrl;
2484 }
2485 opts->mask |= NVMF_OPT_TRSVCID;
2486 }
2487
2488 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2489 opts->traddr, opts->trsvcid, &ctrl->addr);
2490 if (ret) {
2491 pr_err("malformed address passed: %s:%s\n",
2492 opts->traddr, opts->trsvcid);
2493 goto out_free_ctrl;
2494 }
2495
2496 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2497 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2498 opts->host_traddr, NULL, &ctrl->src_addr);
2499 if (ret) {
2500 pr_err("malformed src address passed: %s\n",
2501 opts->host_traddr);
2502 goto out_free_ctrl;
2503 }
2504 }
2505
2506 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2507 ret = -EALREADY;
2508 goto out_free_ctrl;
2509 }
2510
Sagi Grimberg873946f2018-12-11 23:38:57 -08002511 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002512 GFP_KERNEL);
2513 if (!ctrl->queues) {
2514 ret = -ENOMEM;
2515 goto out_free_ctrl;
2516 }
2517
2518 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2519 if (ret)
2520 goto out_kfree_queues;
2521
2522 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2523 WARN_ON_ONCE(1);
2524 ret = -EINTR;
2525 goto out_uninit_ctrl;
2526 }
2527
2528 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2529 if (ret)
2530 goto out_uninit_ctrl;
2531
2532 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2533 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2534
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002535 mutex_lock(&nvme_tcp_ctrl_mutex);
2536 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2537 mutex_unlock(&nvme_tcp_ctrl_mutex);
2538
2539 return &ctrl->ctrl;
2540
2541out_uninit_ctrl:
2542 nvme_uninit_ctrl(&ctrl->ctrl);
2543 nvme_put_ctrl(&ctrl->ctrl);
2544 if (ret > 0)
2545 ret = -EIO;
2546 return ERR_PTR(ret);
2547out_kfree_queues:
2548 kfree(ctrl->queues);
2549out_free_ctrl:
2550 kfree(ctrl);
2551 return ERR_PTR(ret);
2552}
2553
2554static struct nvmf_transport_ops nvme_tcp_transport = {
2555 .name = "tcp",
2556 .module = THIS_MODULE,
2557 .required_opts = NVMF_OPT_TRADDR,
2558 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2559 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002560 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002561 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2562 NVMF_OPT_TOS,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002563 .create_ctrl = nvme_tcp_create_ctrl,
2564};
2565
2566static int __init nvme_tcp_init_module(void)
2567{
2568 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2569 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2570 if (!nvme_tcp_wq)
2571 return -ENOMEM;
2572
2573 nvmf_register_transport(&nvme_tcp_transport);
2574 return 0;
2575}
2576
2577static void __exit nvme_tcp_cleanup_module(void)
2578{
2579 struct nvme_tcp_ctrl *ctrl;
2580
2581 nvmf_unregister_transport(&nvme_tcp_transport);
2582
2583 mutex_lock(&nvme_tcp_ctrl_mutex);
2584 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2585 nvme_delete_ctrl(&ctrl->ctrl);
2586 mutex_unlock(&nvme_tcp_ctrl_mutex);
2587 flush_workqueue(nvme_delete_wq);
2588
2589 destroy_workqueue(nvme_tcp_wq);
2590}
2591
2592module_init(nvme_tcp_init_module);
2593module_exit(nvme_tcp_cleanup_module);
2594
2595MODULE_LICENSE("GPL v2");