blob: d58a6e2a4ab1262369110a17fcbf350198472a25 [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070049 struct llist_node lentry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010050 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080051
52 struct bio *curr_bio;
53 struct iov_iter iter;
54
55 /* send state */
56 size_t offset;
57 size_t data_sent;
58 enum nvme_tcp_send_state state;
59};
60
61enum nvme_tcp_queue_flags {
62 NVME_TCP_Q_ALLOCATED = 0,
63 NVME_TCP_Q_LIVE = 1,
Sagi Grimberg72e5d752020-05-01 14:25:44 -070064 NVME_TCP_Q_POLLING = 2,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080065};
66
67enum nvme_tcp_recv_state {
68 NVME_TCP_RECV_PDU = 0,
69 NVME_TCP_RECV_DATA,
70 NVME_TCP_RECV_DDGST,
71};
72
73struct nvme_tcp_ctrl;
74struct nvme_tcp_queue {
75 struct socket *sock;
76 struct work_struct io_work;
77 int io_cpu;
78
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -070079 struct mutex send_mutex;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070080 struct llist_head req_list;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080081 struct list_head send_list;
Sagi Grimberg122e5b92020-06-18 17:30:24 -070082 bool more_requests;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080083
84 /* recv state */
85 void *pdu;
86 int pdu_remaining;
87 int pdu_offset;
88 size_t data_remaining;
89 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070090 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080091
92 /* send state */
93 struct nvme_tcp_request *request;
94
95 int queue_size;
96 size_t cmnd_capsule_len;
97 struct nvme_tcp_ctrl *ctrl;
98 unsigned long flags;
99 bool rd_enabled;
100
101 bool hdr_digest;
102 bool data_digest;
103 struct ahash_request *rcv_hash;
104 struct ahash_request *snd_hash;
105 __le32 exp_ddgst;
106 __le32 recv_ddgst;
107
108 struct page_frag_cache pf_cache;
109
110 void (*state_change)(struct sock *);
111 void (*data_ready)(struct sock *);
112 void (*write_space)(struct sock *);
113};
114
115struct nvme_tcp_ctrl {
116 /* read only in the hot path */
117 struct nvme_tcp_queue *queues;
118 struct blk_mq_tag_set tag_set;
119
120 /* other member variables */
121 struct list_head list;
122 struct blk_mq_tag_set admin_tag_set;
123 struct sockaddr_storage addr;
124 struct sockaddr_storage src_addr;
125 struct nvme_ctrl ctrl;
126
Sagi Grimbergd4d61472020-08-05 18:13:48 -0700127 struct mutex teardown_lock;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800128 struct work_struct err_work;
129 struct delayed_work connect_work;
130 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700131 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800132};
133
134static LIST_HEAD(nvme_tcp_ctrl_list);
135static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
136static struct workqueue_struct *nvme_tcp_wq;
Rikard Falkeborn6acbd962020-05-29 00:25:07 +0200137static const struct blk_mq_ops nvme_tcp_mq_ops;
138static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700139static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800140
141static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
142{
143 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
144}
145
146static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
147{
148 return queue - queue->ctrl->queues;
149}
150
151static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
152{
153 u32 queue_idx = nvme_tcp_queue_id(queue);
154
155 if (queue_idx == 0)
156 return queue->ctrl->admin_tag_set.tags[queue_idx];
157 return queue->ctrl->tag_set.tags[queue_idx - 1];
158}
159
160static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
161{
162 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
163}
164
165static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
166{
167 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
168}
169
170static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
171{
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
173}
174
175static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
176{
177 return req == &req->queue->ctrl->async_req;
178}
179
180static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
181{
182 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800183
184 if (unlikely(nvme_tcp_async_req(req)))
185 return false; /* async events don't have a request */
186
187 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800188
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700189 return rq_data_dir(rq) == WRITE && req->data_len &&
190 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800191}
192
193static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
194{
195 return req->iter.bvec->bv_page;
196}
197
198static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
199{
200 return req->iter.bvec->bv_offset + req->iter.iov_offset;
201}
202
203static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
204{
205 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
206 req->pdu_len - req->pdu_sent);
207}
208
209static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
210{
211 return req->iter.iov_offset;
212}
213
214static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
215{
216 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
217 req->pdu_len - req->pdu_sent : 0;
218}
219
220static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
221 int len)
222{
223 return nvme_tcp_pdu_data_left(req) <= len;
224}
225
226static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
227 unsigned int dir)
228{
229 struct request *rq = blk_mq_rq_from_pdu(req);
230 struct bio_vec *vec;
231 unsigned int size;
232 int nsegs;
233 size_t offset;
234
235 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
236 vec = &rq->special_vec;
237 nsegs = 1;
238 size = blk_rq_payload_bytes(rq);
239 offset = 0;
240 } else {
241 struct bio *bio = req->curr_bio;
242
243 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
244 nsegs = bio_segments(bio);
245 size = bio->bi_iter.bi_size;
246 offset = bio->bi_iter.bi_bvec_done;
247 }
248
249 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
250 req->iter.iov_offset = offset;
251}
252
253static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
254 int len)
255{
256 req->data_sent += len;
257 req->pdu_sent += len;
258 iov_iter_advance(&req->iter, len);
259 if (!iov_iter_count(&req->iter) &&
260 req->data_sent < req->data_len) {
261 req->curr_bio = req->curr_bio->bi_next;
262 nvme_tcp_init_iter(req, WRITE);
263 }
264}
265
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700266static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
Sagi Grimberg86f03482020-06-18 17:30:23 -0700267 bool sync, bool last)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800268{
269 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700270 bool empty;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800271
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700272 empty = llist_add(&req->lentry, &queue->req_list) &&
273 list_empty(&queue->send_list) && !queue->request;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800274
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700275 /*
276 * if we're the first on the send_list and we can try to send
277 * directly, otherwise queue io_work. Also, only do that if we
278 * are on the same cpu, so we don't introduce contention.
279 */
280 if (queue->io_cpu == smp_processor_id() &&
281 sync && empty && mutex_trylock(&queue->send_mutex)) {
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700282 queue->more_requests = !last;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700283 nvme_tcp_try_send(queue);
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700284 queue->more_requests = false;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700285 mutex_unlock(&queue->send_mutex);
Sagi Grimberg86f03482020-06-18 17:30:23 -0700286 } else if (last) {
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700287 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
288 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800289}
290
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700291static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
292{
293 struct nvme_tcp_request *req;
294 struct llist_node *node;
295
296 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
297 req = llist_entry(node, struct nvme_tcp_request, lentry);
298 list_add(&req->entry, &queue->send_list);
299 }
300}
301
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800302static inline struct nvme_tcp_request *
303nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
304{
305 struct nvme_tcp_request *req;
306
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800307 req = list_first_entry_or_null(&queue->send_list,
308 struct nvme_tcp_request, entry);
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700309 if (!req) {
310 nvme_tcp_process_req_list(queue);
311 req = list_first_entry_or_null(&queue->send_list,
312 struct nvme_tcp_request, entry);
313 if (unlikely(!req))
314 return NULL;
315 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800316
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700317 list_del(&req->entry);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800318 return req;
319}
320
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100321static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
322 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800323{
324 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
325 crypto_ahash_final(hash);
326}
327
328static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
329 struct page *page, off_t off, size_t len)
330{
331 struct scatterlist sg;
332
333 sg_init_marker(&sg, 1);
334 sg_set_page(&sg, page, len, off);
335 ahash_request_set_crypt(hash, &sg, NULL, len);
336 crypto_ahash_update(hash);
337}
338
339static inline void nvme_tcp_hdgst(struct ahash_request *hash,
340 void *pdu, size_t len)
341{
342 struct scatterlist sg;
343
344 sg_init_one(&sg, pdu, len);
345 ahash_request_set_crypt(hash, &sg, pdu + len, len);
346 crypto_ahash_digest(hash);
347}
348
349static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
350 void *pdu, size_t pdu_len)
351{
352 struct nvme_tcp_hdr *hdr = pdu;
353 __le32 recv_digest;
354 __le32 exp_digest;
355
356 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
357 dev_err(queue->ctrl->ctrl.device,
358 "queue %d: header digest flag is cleared\n",
359 nvme_tcp_queue_id(queue));
360 return -EPROTO;
361 }
362
363 recv_digest = *(__le32 *)(pdu + hdr->hlen);
364 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
365 exp_digest = *(__le32 *)(pdu + hdr->hlen);
366 if (recv_digest != exp_digest) {
367 dev_err(queue->ctrl->ctrl.device,
368 "header digest error: recv %#x expected %#x\n",
369 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
370 return -EIO;
371 }
372
373 return 0;
374}
375
376static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
377{
378 struct nvme_tcp_hdr *hdr = pdu;
379 u8 digest_len = nvme_tcp_hdgst_len(queue);
380 u32 len;
381
382 len = le32_to_cpu(hdr->plen) - hdr->hlen -
383 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
384
385 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
386 dev_err(queue->ctrl->ctrl.device,
387 "queue %d: data digest flag is cleared\n",
388 nvme_tcp_queue_id(queue));
389 return -EPROTO;
390 }
391 crypto_ahash_init(queue->rcv_hash);
392
393 return 0;
394}
395
396static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
397 struct request *rq, unsigned int hctx_idx)
398{
399 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
400
401 page_frag_free(req->pdu);
402}
403
404static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
405 struct request *rq, unsigned int hctx_idx,
406 unsigned int numa_node)
407{
408 struct nvme_tcp_ctrl *ctrl = set->driver_data;
409 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
410 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
411 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
412 u8 hdgst = nvme_tcp_hdgst_len(queue);
413
414 req->pdu = page_frag_alloc(&queue->pf_cache,
415 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
416 GFP_KERNEL | __GFP_ZERO);
417 if (!req->pdu)
418 return -ENOMEM;
419
420 req->queue = queue;
421 nvme_req(rq)->ctrl = &ctrl->ctrl;
422
423 return 0;
424}
425
426static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
427 unsigned int hctx_idx)
428{
429 struct nvme_tcp_ctrl *ctrl = data;
430 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
431
432 hctx->driver_data = queue;
433 return 0;
434}
435
436static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
437 unsigned int hctx_idx)
438{
439 struct nvme_tcp_ctrl *ctrl = data;
440 struct nvme_tcp_queue *queue = &ctrl->queues[0];
441
442 hctx->driver_data = queue;
443 return 0;
444}
445
446static enum nvme_tcp_recv_state
447nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
448{
449 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
450 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
451 NVME_TCP_RECV_DATA;
452}
453
454static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
455{
456 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
457 nvme_tcp_hdgst_len(queue);
458 queue->pdu_offset = 0;
459 queue->data_remaining = -1;
460 queue->ddgst_remaining = 0;
461}
462
463static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
464{
465 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
466 return;
467
Nigel Kirkland97b25122020-02-10 16:01:45 -0800468 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800469}
470
471static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
472 struct nvme_completion *cqe)
473{
474 struct request *rq;
475
476 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
477 if (!rq) {
478 dev_err(queue->ctrl->ctrl.device,
479 "queue %d tag 0x%x not found\n",
480 nvme_tcp_queue_id(queue), cqe->command_id);
481 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
482 return -EINVAL;
483 }
484
Christoph Hellwig2eb81a32020-08-18 09:11:29 +0200485 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
Christoph Hellwigff029452020-06-11 08:44:52 +0200486 nvme_complete_rq(rq);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700487 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800488
489 return 0;
490}
491
492static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
493 struct nvme_tcp_data_pdu *pdu)
494{
495 struct request *rq;
496
497 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
498 if (!rq) {
499 dev_err(queue->ctrl->ctrl.device,
500 "queue %d tag %#x not found\n",
501 nvme_tcp_queue_id(queue), pdu->command_id);
502 return -ENOENT;
503 }
504
505 if (!blk_rq_payload_bytes(rq)) {
506 dev_err(queue->ctrl->ctrl.device,
507 "queue %d tag %#x unexpected data\n",
508 nvme_tcp_queue_id(queue), rq->tag);
509 return -EIO;
510 }
511
512 queue->data_remaining = le32_to_cpu(pdu->data_length);
513
Sagi Grimberg602d6742019-03-13 18:55:10 +0100514 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
515 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
516 dev_err(queue->ctrl->ctrl.device,
517 "queue %d tag %#x SUCCESS set but not last PDU\n",
518 nvme_tcp_queue_id(queue), rq->tag);
519 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
520 return -EPROTO;
521 }
522
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800523 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800524}
525
526static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
527 struct nvme_tcp_rsp_pdu *pdu)
528{
529 struct nvme_completion *cqe = &pdu->cqe;
530 int ret = 0;
531
532 /*
533 * AEN requests are special as they don't time out and can
534 * survive any kind of queue freeze and often don't respond to
535 * aborts. We don't even bother to allocate a struct request
536 * for them but rather special case them here.
537 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300538 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
539 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800540 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
541 &cqe->result);
542 else
543 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
544
545 return ret;
546}
547
548static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
549 struct nvme_tcp_r2t_pdu *pdu)
550{
551 struct nvme_tcp_data_pdu *data = req->pdu;
552 struct nvme_tcp_queue *queue = req->queue;
553 struct request *rq = blk_mq_rq_from_pdu(req);
554 u8 hdgst = nvme_tcp_hdgst_len(queue);
555 u8 ddgst = nvme_tcp_ddgst_len(queue);
556
557 req->pdu_len = le32_to_cpu(pdu->r2t_length);
558 req->pdu_sent = 0;
559
560 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
561 dev_err(queue->ctrl->ctrl.device,
562 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
563 rq->tag, req->pdu_len, req->data_len,
564 req->data_sent);
565 return -EPROTO;
566 }
567
568 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
569 dev_err(queue->ctrl->ctrl.device,
570 "req %d unexpected r2t offset %u (expected %zu)\n",
571 rq->tag, le32_to_cpu(pdu->r2t_offset),
572 req->data_sent);
573 return -EPROTO;
574 }
575
576 memset(data, 0, sizeof(*data));
577 data->hdr.type = nvme_tcp_h2c_data;
578 data->hdr.flags = NVME_TCP_F_DATA_LAST;
579 if (queue->hdr_digest)
580 data->hdr.flags |= NVME_TCP_F_HDGST;
581 if (queue->data_digest)
582 data->hdr.flags |= NVME_TCP_F_DDGST;
583 data->hdr.hlen = sizeof(*data);
584 data->hdr.pdo = data->hdr.hlen + hdgst;
585 data->hdr.plen =
586 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
587 data->ttag = pdu->ttag;
588 data->command_id = rq->tag;
589 data->data_offset = cpu_to_le32(req->data_sent);
590 data->data_length = cpu_to_le32(req->pdu_len);
591 return 0;
592}
593
594static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
595 struct nvme_tcp_r2t_pdu *pdu)
596{
597 struct nvme_tcp_request *req;
598 struct request *rq;
599 int ret;
600
601 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
602 if (!rq) {
603 dev_err(queue->ctrl->ctrl.device,
604 "queue %d tag %#x not found\n",
605 nvme_tcp_queue_id(queue), pdu->command_id);
606 return -ENOENT;
607 }
608 req = blk_mq_rq_to_pdu(rq);
609
610 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
611 if (unlikely(ret))
612 return ret;
613
614 req->state = NVME_TCP_SEND_H2C_PDU;
615 req->offset = 0;
616
Sagi Grimberg86f03482020-06-18 17:30:23 -0700617 nvme_tcp_queue_request(req, false, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800618
619 return 0;
620}
621
622static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
623 unsigned int *offset, size_t *len)
624{
625 struct nvme_tcp_hdr *hdr;
626 char *pdu = queue->pdu;
627 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
628 int ret;
629
630 ret = skb_copy_bits(skb, *offset,
631 &pdu[queue->pdu_offset], rcv_len);
632 if (unlikely(ret))
633 return ret;
634
635 queue->pdu_remaining -= rcv_len;
636 queue->pdu_offset += rcv_len;
637 *offset += rcv_len;
638 *len -= rcv_len;
639 if (queue->pdu_remaining)
640 return 0;
641
642 hdr = queue->pdu;
643 if (queue->hdr_digest) {
644 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
645 if (unlikely(ret))
646 return ret;
647 }
648
649
650 if (queue->data_digest) {
651 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
652 if (unlikely(ret))
653 return ret;
654 }
655
656 switch (hdr->type) {
657 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700658 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800659 case nvme_tcp_rsp:
660 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700661 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800662 case nvme_tcp_r2t:
663 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700664 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800665 default:
666 dev_err(queue->ctrl->ctrl.device,
667 "unsupported pdu type (%d)\n", hdr->type);
668 return -EINVAL;
669 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800670}
671
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100672static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100673{
674 union nvme_result res = {};
675
Christoph Hellwig2eb81a32020-08-18 09:11:29 +0200676 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
Christoph Hellwigff029452020-06-11 08:44:52 +0200677 nvme_complete_rq(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100678}
679
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800680static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
681 unsigned int *offset, size_t *len)
682{
683 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
684 struct nvme_tcp_request *req;
685 struct request *rq;
686
687 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
688 if (!rq) {
689 dev_err(queue->ctrl->ctrl.device,
690 "queue %d tag %#x not found\n",
691 nvme_tcp_queue_id(queue), pdu->command_id);
692 return -ENOENT;
693 }
694 req = blk_mq_rq_to_pdu(rq);
695
696 while (true) {
697 int recv_len, ret;
698
699 recv_len = min_t(size_t, *len, queue->data_remaining);
700 if (!recv_len)
701 break;
702
703 if (!iov_iter_count(&req->iter)) {
704 req->curr_bio = req->curr_bio->bi_next;
705
706 /*
707 * If we don`t have any bios it means that controller
708 * sent more data than we requested, hence error
709 */
710 if (!req->curr_bio) {
711 dev_err(queue->ctrl->ctrl.device,
712 "queue %d no space in request %#x",
713 nvme_tcp_queue_id(queue), rq->tag);
714 nvme_tcp_init_recv_ctx(queue);
715 return -EIO;
716 }
717 nvme_tcp_init_iter(req, READ);
718 }
719
720 /* we can read only from what is left in this bio */
721 recv_len = min_t(size_t, recv_len,
722 iov_iter_count(&req->iter));
723
724 if (queue->data_digest)
725 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
726 &req->iter, recv_len, queue->rcv_hash);
727 else
728 ret = skb_copy_datagram_iter(skb, *offset,
729 &req->iter, recv_len);
730 if (ret) {
731 dev_err(queue->ctrl->ctrl.device,
732 "queue %d failed to copy request %#x data",
733 nvme_tcp_queue_id(queue), rq->tag);
734 return ret;
735 }
736
737 *len -= recv_len;
738 *offset += recv_len;
739 queue->data_remaining -= recv_len;
740 }
741
742 if (!queue->data_remaining) {
743 if (queue->data_digest) {
744 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
745 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
746 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700747 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100748 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700749 queue->nr_cqe++;
750 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800751 nvme_tcp_init_recv_ctx(queue);
752 }
753 }
754
755 return 0;
756}
757
758static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
759 struct sk_buff *skb, unsigned int *offset, size_t *len)
760{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100761 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800762 char *ddgst = (char *)&queue->recv_ddgst;
763 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
764 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
765 int ret;
766
767 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
768 if (unlikely(ret))
769 return ret;
770
771 queue->ddgst_remaining -= recv_len;
772 *offset += recv_len;
773 *len -= recv_len;
774 if (queue->ddgst_remaining)
775 return 0;
776
777 if (queue->recv_ddgst != queue->exp_ddgst) {
778 dev_err(queue->ctrl->ctrl.device,
779 "data digest error: recv %#x expected %#x\n",
780 le32_to_cpu(queue->recv_ddgst),
781 le32_to_cpu(queue->exp_ddgst));
782 return -EIO;
783 }
784
Sagi Grimberg602d6742019-03-13 18:55:10 +0100785 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
786 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
787 pdu->command_id);
788
789 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700790 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100791 }
792
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800793 nvme_tcp_init_recv_ctx(queue);
794 return 0;
795}
796
797static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
798 unsigned int offset, size_t len)
799{
800 struct nvme_tcp_queue *queue = desc->arg.data;
801 size_t consumed = len;
802 int result;
803
804 while (len) {
805 switch (nvme_tcp_recv_state(queue)) {
806 case NVME_TCP_RECV_PDU:
807 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
808 break;
809 case NVME_TCP_RECV_DATA:
810 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
811 break;
812 case NVME_TCP_RECV_DDGST:
813 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
814 break;
815 default:
816 result = -EFAULT;
817 }
818 if (result) {
819 dev_err(queue->ctrl->ctrl.device,
820 "receive failed: %d\n", result);
821 queue->rd_enabled = false;
822 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
823 return result;
824 }
825 }
826
827 return consumed;
828}
829
830static void nvme_tcp_data_ready(struct sock *sk)
831{
832 struct nvme_tcp_queue *queue;
833
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700834 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800835 queue = sk->sk_user_data;
Sagi Grimberg72e5d752020-05-01 14:25:44 -0700836 if (likely(queue && queue->rd_enabled) &&
837 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800838 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700839 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800840}
841
842static void nvme_tcp_write_space(struct sock *sk)
843{
844 struct nvme_tcp_queue *queue;
845
846 read_lock_bh(&sk->sk_callback_lock);
847 queue = sk->sk_user_data;
848 if (likely(queue && sk_stream_is_writeable(sk))) {
849 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
850 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
851 }
852 read_unlock_bh(&sk->sk_callback_lock);
853}
854
855static void nvme_tcp_state_change(struct sock *sk)
856{
857 struct nvme_tcp_queue *queue;
858
859 read_lock(&sk->sk_callback_lock);
860 queue = sk->sk_user_data;
861 if (!queue)
862 goto done;
863
864 switch (sk->sk_state) {
865 case TCP_CLOSE:
866 case TCP_CLOSE_WAIT:
867 case TCP_LAST_ACK:
868 case TCP_FIN_WAIT1:
869 case TCP_FIN_WAIT2:
870 /* fallthrough */
871 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
872 break;
873 default:
874 dev_info(queue->ctrl->ctrl.device,
875 "queue %d socket state %d\n",
876 nvme_tcp_queue_id(queue), sk->sk_state);
877 }
878
879 queue->state_change(sk);
880done:
881 read_unlock(&sk->sk_callback_lock);
882}
883
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700884static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
885{
886 return !list_empty(&queue->send_list) ||
887 !llist_empty(&queue->req_list) || queue->more_requests;
888}
889
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800890static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
891{
892 queue->request = NULL;
893}
894
895static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
896{
Sagi Grimberg16686012019-08-02 18:17:52 -0700897 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800898}
899
900static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
901{
902 struct nvme_tcp_queue *queue = req->queue;
903
904 while (true) {
905 struct page *page = nvme_tcp_req_cur_page(req);
906 size_t offset = nvme_tcp_req_cur_offset(req);
907 size_t len = nvme_tcp_req_cur_length(req);
908 bool last = nvme_tcp_pdu_last_send(req, len);
909 int ret, flags = MSG_DONTWAIT;
910
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700911 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800912 flags |= MSG_EOR;
913 else
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700914 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800915
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200916 /* can't zcopy slab pages */
917 if (unlikely(PageSlab(page))) {
918 ret = sock_no_sendpage(queue->sock, page, offset, len,
919 flags);
920 } else {
921 ret = kernel_sendpage(queue->sock, page, offset, len,
922 flags);
923 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800924 if (ret <= 0)
925 return ret;
926
927 nvme_tcp_advance_req(req, ret);
928 if (queue->data_digest)
929 nvme_tcp_ddgst_update(queue->snd_hash, page,
930 offset, ret);
931
932 /* fully successful last write*/
933 if (last && ret == len) {
934 if (queue->data_digest) {
935 nvme_tcp_ddgst_final(queue->snd_hash,
936 &req->ddgst);
937 req->state = NVME_TCP_SEND_DDGST;
938 req->offset = 0;
939 } else {
940 nvme_tcp_done_send_req(queue);
941 }
942 return 1;
943 }
944 }
945 return -EAGAIN;
946}
947
948static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
949{
950 struct nvme_tcp_queue *queue = req->queue;
951 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
952 bool inline_data = nvme_tcp_has_inline_data(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800953 u8 hdgst = nvme_tcp_hdgst_len(queue);
954 int len = sizeof(*pdu) + hdgst - req->offset;
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700955 int flags = MSG_DONTWAIT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800956 int ret;
957
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700958 if (inline_data || nvme_tcp_queue_more(queue))
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700959 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
960 else
961 flags |= MSG_EOR;
962
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800963 if (queue->hdr_digest && !req->offset)
964 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
965
966 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
967 offset_in_page(pdu) + req->offset, len, flags);
968 if (unlikely(ret <= 0))
969 return ret;
970
971 len -= ret;
972 if (!len) {
973 if (inline_data) {
974 req->state = NVME_TCP_SEND_DATA;
975 if (queue->data_digest)
976 crypto_ahash_init(queue->snd_hash);
977 nvme_tcp_init_iter(req, WRITE);
978 } else {
979 nvme_tcp_done_send_req(queue);
980 }
981 return 1;
982 }
983 req->offset += ret;
984
985 return -EAGAIN;
986}
987
988static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
989{
990 struct nvme_tcp_queue *queue = req->queue;
991 struct nvme_tcp_data_pdu *pdu = req->pdu;
992 u8 hdgst = nvme_tcp_hdgst_len(queue);
993 int len = sizeof(*pdu) - req->offset + hdgst;
994 int ret;
995
996 if (queue->hdr_digest && !req->offset)
997 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
998
999 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1000 offset_in_page(pdu) + req->offset, len,
Sagi Grimberg5bb052d2020-05-04 22:20:01 -07001001 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001002 if (unlikely(ret <= 0))
1003 return ret;
1004
1005 len -= ret;
1006 if (!len) {
1007 req->state = NVME_TCP_SEND_DATA;
1008 if (queue->data_digest)
1009 crypto_ahash_init(queue->snd_hash);
1010 if (!req->data_sent)
1011 nvme_tcp_init_iter(req, WRITE);
1012 return 1;
1013 }
1014 req->offset += ret;
1015
1016 return -EAGAIN;
1017}
1018
1019static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1020{
1021 struct nvme_tcp_queue *queue = req->queue;
1022 int ret;
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001023 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001024 struct kvec iov = {
1025 .iov_base = &req->ddgst + req->offset,
1026 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1027 };
1028
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001029 if (nvme_tcp_queue_more(queue))
1030 msg.msg_flags |= MSG_MORE;
1031 else
1032 msg.msg_flags |= MSG_EOR;
1033
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001034 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1035 if (unlikely(ret <= 0))
1036 return ret;
1037
1038 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1039 nvme_tcp_done_send_req(queue);
1040 return 1;
1041 }
1042
1043 req->offset += ret;
1044 return -EAGAIN;
1045}
1046
1047static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1048{
1049 struct nvme_tcp_request *req;
1050 int ret = 1;
1051
1052 if (!queue->request) {
1053 queue->request = nvme_tcp_fetch_request(queue);
1054 if (!queue->request)
1055 return 0;
1056 }
1057 req = queue->request;
1058
1059 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1060 ret = nvme_tcp_try_send_cmd_pdu(req);
1061 if (ret <= 0)
1062 goto done;
1063 if (!nvme_tcp_has_inline_data(req))
1064 return ret;
1065 }
1066
1067 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1068 ret = nvme_tcp_try_send_data_pdu(req);
1069 if (ret <= 0)
1070 goto done;
1071 }
1072
1073 if (req->state == NVME_TCP_SEND_DATA) {
1074 ret = nvme_tcp_try_send_data(req);
1075 if (ret <= 0)
1076 goto done;
1077 }
1078
1079 if (req->state == NVME_TCP_SEND_DDGST)
1080 ret = nvme_tcp_try_send_ddgst(req);
1081done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001082 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001083 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001084 } else if (ret < 0) {
1085 dev_err(queue->ctrl->ctrl.device,
1086 "failed to send request %d\n", ret);
1087 if (ret != -EPIPE && ret != -ECONNRESET)
1088 nvme_tcp_fail_request(queue->request);
1089 nvme_tcp_done_send_req(queue);
1090 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001091 return ret;
1092}
1093
1094static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1095{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301096 struct socket *sock = queue->sock;
1097 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001098 read_descriptor_t rd_desc;
1099 int consumed;
1100
1101 rd_desc.arg.data = queue;
1102 rd_desc.count = 1;
1103 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001104 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301105 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001106 release_sock(sk);
1107 return consumed;
1108}
1109
1110static void nvme_tcp_io_work(struct work_struct *w)
1111{
1112 struct nvme_tcp_queue *queue =
1113 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001114 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001115
1116 do {
1117 bool pending = false;
1118 int result;
1119
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001120 if (mutex_trylock(&queue->send_mutex)) {
1121 result = nvme_tcp_try_send(queue);
1122 mutex_unlock(&queue->send_mutex);
1123 if (result > 0)
1124 pending = true;
1125 else if (unlikely(result < 0))
1126 break;
1127 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001128
1129 result = nvme_tcp_try_recv(queue);
1130 if (result > 0)
1131 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001132 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001133 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001134
1135 if (!pending)
1136 return;
1137
Wunderlich, Markddef2952019-09-18 23:36:37 +00001138 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001139
1140 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1141}
1142
1143static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1144{
1145 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1146
1147 ahash_request_free(queue->rcv_hash);
1148 ahash_request_free(queue->snd_hash);
1149 crypto_free_ahash(tfm);
1150}
1151
1152static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1153{
1154 struct crypto_ahash *tfm;
1155
1156 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1157 if (IS_ERR(tfm))
1158 return PTR_ERR(tfm);
1159
1160 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1161 if (!queue->snd_hash)
1162 goto free_tfm;
1163 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1164
1165 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1166 if (!queue->rcv_hash)
1167 goto free_snd_hash;
1168 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1169
1170 return 0;
1171free_snd_hash:
1172 ahash_request_free(queue->snd_hash);
1173free_tfm:
1174 crypto_free_ahash(tfm);
1175 return -ENOMEM;
1176}
1177
1178static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1179{
1180 struct nvme_tcp_request *async = &ctrl->async_req;
1181
1182 page_frag_free(async->pdu);
1183}
1184
1185static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1186{
1187 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1188 struct nvme_tcp_request *async = &ctrl->async_req;
1189 u8 hdgst = nvme_tcp_hdgst_len(queue);
1190
1191 async->pdu = page_frag_alloc(&queue->pf_cache,
1192 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1193 GFP_KERNEL | __GFP_ZERO);
1194 if (!async->pdu)
1195 return -ENOMEM;
1196
1197 async->queue = &ctrl->queues[0];
1198 return 0;
1199}
1200
1201static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1202{
1203 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1204 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1205
1206 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1207 return;
1208
1209 if (queue->hdr_digest || queue->data_digest)
1210 nvme_tcp_free_crypto(queue);
1211
1212 sock_release(queue->sock);
1213 kfree(queue->pdu);
1214}
1215
1216static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1217{
1218 struct nvme_tcp_icreq_pdu *icreq;
1219 struct nvme_tcp_icresp_pdu *icresp;
1220 struct msghdr msg = {};
1221 struct kvec iov;
1222 bool ctrl_hdgst, ctrl_ddgst;
1223 int ret;
1224
1225 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1226 if (!icreq)
1227 return -ENOMEM;
1228
1229 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1230 if (!icresp) {
1231 ret = -ENOMEM;
1232 goto free_icreq;
1233 }
1234
1235 icreq->hdr.type = nvme_tcp_icreq;
1236 icreq->hdr.hlen = sizeof(*icreq);
1237 icreq->hdr.pdo = 0;
1238 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1239 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1240 icreq->maxr2t = 0; /* single inflight r2t supported */
1241 icreq->hpda = 0; /* no alignment constraint */
1242 if (queue->hdr_digest)
1243 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1244 if (queue->data_digest)
1245 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1246
1247 iov.iov_base = icreq;
1248 iov.iov_len = sizeof(*icreq);
1249 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1250 if (ret < 0)
1251 goto free_icresp;
1252
1253 memset(&msg, 0, sizeof(msg));
1254 iov.iov_base = icresp;
1255 iov.iov_len = sizeof(*icresp);
1256 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1257 iov.iov_len, msg.msg_flags);
1258 if (ret < 0)
1259 goto free_icresp;
1260
1261 ret = -EINVAL;
1262 if (icresp->hdr.type != nvme_tcp_icresp) {
1263 pr_err("queue %d: bad type returned %d\n",
1264 nvme_tcp_queue_id(queue), icresp->hdr.type);
1265 goto free_icresp;
1266 }
1267
1268 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1269 pr_err("queue %d: bad pdu length returned %d\n",
1270 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1271 goto free_icresp;
1272 }
1273
1274 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1275 pr_err("queue %d: bad pfv returned %d\n",
1276 nvme_tcp_queue_id(queue), icresp->pfv);
1277 goto free_icresp;
1278 }
1279
1280 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1281 if ((queue->data_digest && !ctrl_ddgst) ||
1282 (!queue->data_digest && ctrl_ddgst)) {
1283 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1284 nvme_tcp_queue_id(queue),
1285 queue->data_digest ? "enabled" : "disabled",
1286 ctrl_ddgst ? "enabled" : "disabled");
1287 goto free_icresp;
1288 }
1289
1290 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1291 if ((queue->hdr_digest && !ctrl_hdgst) ||
1292 (!queue->hdr_digest && ctrl_hdgst)) {
1293 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1294 nvme_tcp_queue_id(queue),
1295 queue->hdr_digest ? "enabled" : "disabled",
1296 ctrl_hdgst ? "enabled" : "disabled");
1297 goto free_icresp;
1298 }
1299
1300 if (icresp->cpda != 0) {
1301 pr_err("queue %d: unsupported cpda returned %d\n",
1302 nvme_tcp_queue_id(queue), icresp->cpda);
1303 goto free_icresp;
1304 }
1305
1306 ret = 0;
1307free_icresp:
1308 kfree(icresp);
1309free_icreq:
1310 kfree(icreq);
1311 return ret;
1312}
1313
Sagi Grimberg40510a62020-02-25 15:53:09 -08001314static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1315{
1316 return nvme_tcp_queue_id(queue) == 0;
1317}
1318
1319static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1320{
1321 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1322 int qid = nvme_tcp_queue_id(queue);
1323
1324 return !nvme_tcp_admin_queue(queue) &&
1325 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1326}
1327
1328static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1329{
1330 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1331 int qid = nvme_tcp_queue_id(queue);
1332
1333 return !nvme_tcp_admin_queue(queue) &&
1334 !nvme_tcp_default_queue(queue) &&
1335 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1336 ctrl->io_queues[HCTX_TYPE_READ];
1337}
1338
1339static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1340{
1341 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1342 int qid = nvme_tcp_queue_id(queue);
1343
1344 return !nvme_tcp_admin_queue(queue) &&
1345 !nvme_tcp_default_queue(queue) &&
1346 !nvme_tcp_read_queue(queue) &&
1347 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1348 ctrl->io_queues[HCTX_TYPE_READ] +
1349 ctrl->io_queues[HCTX_TYPE_POLL];
1350}
1351
1352static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1353{
1354 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1355 int qid = nvme_tcp_queue_id(queue);
1356 int n = 0;
1357
1358 if (nvme_tcp_default_queue(queue))
1359 n = qid - 1;
1360 else if (nvme_tcp_read_queue(queue))
1361 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1362 else if (nvme_tcp_poll_queue(queue))
1363 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1364 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1365 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1366}
1367
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001368static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1369 int qid, size_t queue_size)
1370{
1371 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1372 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001373 int ret, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001374
1375 queue->ctrl = ctrl;
Sagi Grimberg15ec9282020-06-18 17:30:22 -07001376 init_llist_head(&queue->req_list);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001377 INIT_LIST_HEAD(&queue->send_list);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001378 mutex_init(&queue->send_mutex);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001379 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1380 queue->queue_size = queue_size;
1381
1382 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001383 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001384 else
1385 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1386 NVME_TCP_ADMIN_CCSZ;
1387
1388 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1389 IPPROTO_TCP, &queue->sock);
1390 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001391 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001392 "failed to create socket: %d\n", ret);
1393 return ret;
1394 }
1395
1396 /* Single syn retry */
Christoph Hellwig557eadf2020-05-28 07:12:21 +02001397 tcp_sock_set_syncnt(queue->sock->sk, 1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001398
1399 /* Set TCP no delay */
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001400 tcp_sock_set_nodelay(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001401
1402 /*
1403 * Cleanup whatever is sitting in the TCP transmit queue on socket
1404 * close. This is done to prevent stale data from being sent should
1405 * the network connection be restored before TCP times out.
1406 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001407 sock_no_linger(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001408
Christoph Hellwig6e434962020-05-28 07:12:11 +02001409 if (so_priority > 0)
1410 sock_set_priority(queue->sock->sk, so_priority);
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001411
Israel Rukshinbb139852019-08-18 12:08:54 +03001412 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001413 if (nctrl->opts->tos >= 0)
1414 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
Israel Rukshinbb139852019-08-18 12:08:54 +03001415
Sagi Grimbergadc99fd2020-07-23 16:42:26 -07001416 /* Set 10 seconds timeout for icresp recvmsg */
1417 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1418
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001419 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001420 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001421 queue->request = NULL;
1422 queue->data_remaining = 0;
1423 queue->ddgst_remaining = 0;
1424 queue->pdu_remaining = 0;
1425 queue->pdu_offset = 0;
1426 sk_set_memalloc(queue->sock->sk);
1427
Israel Rukshin9924b032019-08-18 12:08:53 +03001428 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001429 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1430 sizeof(ctrl->src_addr));
1431 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001432 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001433 "failed to bind queue %d socket %d\n",
1434 qid, ret);
1435 goto err_sock;
1436 }
1437 }
1438
1439 queue->hdr_digest = nctrl->opts->hdr_digest;
1440 queue->data_digest = nctrl->opts->data_digest;
1441 if (queue->hdr_digest || queue->data_digest) {
1442 ret = nvme_tcp_alloc_crypto(queue);
1443 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001444 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001445 "failed to allocate queue %d crypto\n", qid);
1446 goto err_sock;
1447 }
1448 }
1449
1450 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1451 nvme_tcp_hdgst_len(queue);
1452 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1453 if (!queue->pdu) {
1454 ret = -ENOMEM;
1455 goto err_crypto;
1456 }
1457
Israel Rukshin9924b032019-08-18 12:08:53 +03001458 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001459 nvme_tcp_queue_id(queue));
1460
1461 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1462 sizeof(ctrl->addr), 0);
1463 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001464 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001465 "failed to connect socket: %d\n", ret);
1466 goto err_rcv_pdu;
1467 }
1468
1469 ret = nvme_tcp_init_connection(queue);
1470 if (ret)
1471 goto err_init_connect;
1472
1473 queue->rd_enabled = true;
1474 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1475 nvme_tcp_init_recv_ctx(queue);
1476
1477 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1478 queue->sock->sk->sk_user_data = queue;
1479 queue->state_change = queue->sock->sk->sk_state_change;
1480 queue->data_ready = queue->sock->sk->sk_data_ready;
1481 queue->write_space = queue->sock->sk->sk_write_space;
1482 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1483 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1484 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001485#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001486 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001487#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001488 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1489
1490 return 0;
1491
1492err_init_connect:
1493 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1494err_rcv_pdu:
1495 kfree(queue->pdu);
1496err_crypto:
1497 if (queue->hdr_digest || queue->data_digest)
1498 nvme_tcp_free_crypto(queue);
1499err_sock:
1500 sock_release(queue->sock);
1501 queue->sock = NULL;
1502 return ret;
1503}
1504
1505static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1506{
1507 struct socket *sock = queue->sock;
1508
1509 write_lock_bh(&sock->sk->sk_callback_lock);
1510 sock->sk->sk_user_data = NULL;
1511 sock->sk->sk_data_ready = queue->data_ready;
1512 sock->sk->sk_state_change = queue->state_change;
1513 sock->sk->sk_write_space = queue->write_space;
1514 write_unlock_bh(&sock->sk->sk_callback_lock);
1515}
1516
1517static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1518{
1519 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1520 nvme_tcp_restore_sock_calls(queue);
1521 cancel_work_sync(&queue->io_work);
1522}
1523
1524static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1525{
1526 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1527 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1528
1529 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1530 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001531 __nvme_tcp_stop_queue(queue);
1532}
1533
1534static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1535{
1536 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1537 int ret;
1538
1539 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001540 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001541 else
1542 ret = nvmf_connect_admin_queue(nctrl);
1543
1544 if (!ret) {
1545 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1546 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001547 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1548 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001549 dev_err(nctrl->device,
1550 "failed to connect queue: %d ret=%d\n", idx, ret);
1551 }
1552 return ret;
1553}
1554
1555static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1556 bool admin)
1557{
1558 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1559 struct blk_mq_tag_set *set;
1560 int ret;
1561
1562 if (admin) {
1563 set = &ctrl->admin_tag_set;
1564 memset(set, 0, sizeof(*set));
1565 set->ops = &nvme_tcp_admin_mq_ops;
1566 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1567 set->reserved_tags = 2; /* connect + keep-alive */
Max Gurtovoy610c8232020-06-16 12:34:24 +03001568 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001569 set->flags = BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001570 set->cmd_size = sizeof(struct nvme_tcp_request);
1571 set->driver_data = ctrl;
1572 set->nr_hw_queues = 1;
1573 set->timeout = ADMIN_TIMEOUT;
1574 } else {
1575 set = &ctrl->tag_set;
1576 memset(set, 0, sizeof(*set));
1577 set->ops = &nvme_tcp_mq_ops;
1578 set->queue_depth = nctrl->sqsize + 1;
1579 set->reserved_tags = 1; /* fabric connect */
Max Gurtovoy610c8232020-06-16 12:34:24 +03001580 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001581 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001582 set->cmd_size = sizeof(struct nvme_tcp_request);
1583 set->driver_data = ctrl;
1584 set->nr_hw_queues = nctrl->queue_count - 1;
1585 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001586 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001587 }
1588
1589 ret = blk_mq_alloc_tag_set(set);
1590 if (ret)
1591 return ERR_PTR(ret);
1592
1593 return set;
1594}
1595
1596static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1597{
1598 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1599 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1600 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1601 }
1602
1603 nvme_tcp_free_queue(ctrl, 0);
1604}
1605
1606static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1607{
1608 int i;
1609
1610 for (i = 1; i < ctrl->queue_count; i++)
1611 nvme_tcp_free_queue(ctrl, i);
1612}
1613
1614static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1615{
1616 int i;
1617
1618 for (i = 1; i < ctrl->queue_count; i++)
1619 nvme_tcp_stop_queue(ctrl, i);
1620}
1621
1622static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1623{
1624 int i, ret = 0;
1625
1626 for (i = 1; i < ctrl->queue_count; i++) {
1627 ret = nvme_tcp_start_queue(ctrl, i);
1628 if (ret)
1629 goto out_stop_queues;
1630 }
1631
1632 return 0;
1633
1634out_stop_queues:
1635 for (i--; i >= 1; i--)
1636 nvme_tcp_stop_queue(ctrl, i);
1637 return ret;
1638}
1639
1640static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1641{
1642 int ret;
1643
1644 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1645 if (ret)
1646 return ret;
1647
1648 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1649 if (ret)
1650 goto out_free_queue;
1651
1652 return 0;
1653
1654out_free_queue:
1655 nvme_tcp_free_queue(ctrl, 0);
1656 return ret;
1657}
1658
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001659static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001660{
1661 int i, ret;
1662
1663 for (i = 1; i < ctrl->queue_count; i++) {
1664 ret = nvme_tcp_alloc_queue(ctrl, i,
1665 ctrl->sqsize + 1);
1666 if (ret)
1667 goto out_free_queues;
1668 }
1669
1670 return 0;
1671
1672out_free_queues:
1673 for (i--; i >= 1; i--)
1674 nvme_tcp_free_queue(ctrl, i);
1675
1676 return ret;
1677}
1678
1679static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1680{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001681 unsigned int nr_io_queues;
1682
1683 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1684 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001685 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001686
1687 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001688}
1689
Sagi Grimberg64861992019-05-28 22:49:05 -07001690static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1691 unsigned int nr_io_queues)
1692{
1693 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1694 struct nvmf_ctrl_options *opts = nctrl->opts;
1695
1696 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1697 /*
1698 * separate read/write queues
1699 * hand out dedicated default queues only after we have
1700 * sufficient read queues.
1701 */
1702 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1703 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1704 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1705 min(opts->nr_write_queues, nr_io_queues);
1706 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1707 } else {
1708 /*
1709 * shared read/write queues
1710 * either no write queues were requested, or we don't have
1711 * sufficient queue count to have dedicated default queues.
1712 */
1713 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1714 min(opts->nr_io_queues, nr_io_queues);
1715 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1716 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001717
1718 if (opts->nr_poll_queues && nr_io_queues) {
1719 /* map dedicated poll queues only if we have queues left */
1720 ctrl->io_queues[HCTX_TYPE_POLL] =
1721 min(opts->nr_poll_queues, nr_io_queues);
1722 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001723}
1724
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001725static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001726{
1727 unsigned int nr_io_queues;
1728 int ret;
1729
1730 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1731 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1732 if (ret)
1733 return ret;
1734
1735 ctrl->queue_count = nr_io_queues + 1;
1736 if (ctrl->queue_count < 2)
1737 return 0;
1738
1739 dev_info(ctrl->device,
1740 "creating %d I/O queues.\n", nr_io_queues);
1741
Sagi Grimberg64861992019-05-28 22:49:05 -07001742 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1743
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001744 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001745}
1746
1747static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1748{
1749 nvme_tcp_stop_io_queues(ctrl);
1750 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001751 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001752 blk_mq_free_tag_set(ctrl->tagset);
1753 }
1754 nvme_tcp_free_io_queues(ctrl);
1755}
1756
1757static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1758{
1759 int ret;
1760
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001761 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001762 if (ret)
1763 return ret;
1764
1765 if (new) {
1766 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1767 if (IS_ERR(ctrl->tagset)) {
1768 ret = PTR_ERR(ctrl->tagset);
1769 goto out_free_io_queues;
1770 }
1771
Sagi Grimberge85037a2018-12-31 23:58:30 -08001772 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1773 if (IS_ERR(ctrl->connect_q)) {
1774 ret = PTR_ERR(ctrl->connect_q);
1775 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001776 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001777 }
1778
1779 ret = nvme_tcp_start_io_queues(ctrl);
1780 if (ret)
1781 goto out_cleanup_connect_q;
1782
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001783 if (!new) {
1784 nvme_start_queues(ctrl);
1785 nvme_wait_freeze(ctrl);
1786 blk_mq_update_nr_hw_queues(ctrl->tagset,
1787 ctrl->queue_count - 1);
1788 nvme_unfreeze(ctrl);
1789 }
1790
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001791 return 0;
1792
1793out_cleanup_connect_q:
Sagi Grimberge85037a2018-12-31 23:58:30 -08001794 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001795 blk_cleanup_queue(ctrl->connect_q);
1796out_free_tag_set:
1797 if (new)
1798 blk_mq_free_tag_set(ctrl->tagset);
1799out_free_io_queues:
1800 nvme_tcp_free_io_queues(ctrl);
1801 return ret;
1802}
1803
1804static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1805{
1806 nvme_tcp_stop_queue(ctrl, 0);
1807 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001808 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001809 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001810 blk_mq_free_tag_set(ctrl->admin_tagset);
1811 }
1812 nvme_tcp_free_admin_queue(ctrl);
1813}
1814
1815static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1816{
1817 int error;
1818
1819 error = nvme_tcp_alloc_admin_queue(ctrl);
1820 if (error)
1821 return error;
1822
1823 if (new) {
1824 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1825 if (IS_ERR(ctrl->admin_tagset)) {
1826 error = PTR_ERR(ctrl->admin_tagset);
1827 goto out_free_queue;
1828 }
1829
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001830 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1831 if (IS_ERR(ctrl->fabrics_q)) {
1832 error = PTR_ERR(ctrl->fabrics_q);
1833 goto out_free_tagset;
1834 }
1835
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001836 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1837 if (IS_ERR(ctrl->admin_q)) {
1838 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001839 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001840 }
1841 }
1842
1843 error = nvme_tcp_start_queue(ctrl, 0);
1844 if (error)
1845 goto out_cleanup_queue;
1846
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001847 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001848 if (error)
1849 goto out_stop_queue;
1850
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001851 blk_mq_unquiesce_queue(ctrl->admin_q);
1852
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001853 error = nvme_init_identify(ctrl);
1854 if (error)
1855 goto out_stop_queue;
1856
1857 return 0;
1858
1859out_stop_queue:
1860 nvme_tcp_stop_queue(ctrl, 0);
1861out_cleanup_queue:
1862 if (new)
1863 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001864out_cleanup_fabrics_q:
1865 if (new)
1866 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001867out_free_tagset:
1868 if (new)
1869 blk_mq_free_tag_set(ctrl->admin_tagset);
1870out_free_queue:
1871 nvme_tcp_free_admin_queue(ctrl);
1872 return error;
1873}
1874
1875static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1876 bool remove)
1877{
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001878 mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001879 blk_mq_quiesce_queue(ctrl->admin_q);
1880 nvme_tcp_stop_queue(ctrl, 0);
Ming Lei622b8b62019-07-24 11:48:42 +08001881 if (ctrl->admin_tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001882 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1883 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001884 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1885 }
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001886 if (remove)
1887 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001888 nvme_tcp_destroy_admin_queue(ctrl, remove);
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001889 mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001890}
1891
1892static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1893 bool remove)
1894{
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001895 mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001896 if (ctrl->queue_count <= 1)
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001897 goto out;
1898 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001899 nvme_start_freeze(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001900 nvme_stop_queues(ctrl);
1901 nvme_tcp_stop_io_queues(ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001902 if (ctrl->tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001903 blk_mq_tagset_busy_iter(ctrl->tagset,
1904 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001905 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1906 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001907 if (remove)
1908 nvme_start_queues(ctrl);
1909 nvme_tcp_destroy_io_queues(ctrl, remove);
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001910out:
1911 mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001912}
1913
1914static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1915{
1916 /* If we are resetting/deleting then do nothing */
1917 if (ctrl->state != NVME_CTRL_CONNECTING) {
1918 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1919 ctrl->state == NVME_CTRL_LIVE);
1920 return;
1921 }
1922
1923 if (nvmf_should_reconnect(ctrl)) {
1924 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1925 ctrl->opts->reconnect_delay);
1926 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1927 ctrl->opts->reconnect_delay * HZ);
1928 } else {
1929 dev_info(ctrl->device, "Removing controller...\n");
1930 nvme_delete_ctrl(ctrl);
1931 }
1932}
1933
1934static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1935{
1936 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001937 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001938
1939 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1940 if (ret)
1941 return ret;
1942
1943 if (ctrl->icdoff) {
1944 dev_err(ctrl->device, "icdoff is not supported!\n");
1945 goto destroy_admin;
1946 }
1947
1948 if (opts->queue_size > ctrl->sqsize + 1)
1949 dev_warn(ctrl->device,
1950 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1951 opts->queue_size, ctrl->sqsize + 1);
1952
1953 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1954 dev_warn(ctrl->device,
1955 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1956 ctrl->sqsize + 1, ctrl->maxcmd);
1957 ctrl->sqsize = ctrl->maxcmd - 1;
1958 }
1959
1960 if (ctrl->queue_count > 1) {
1961 ret = nvme_tcp_configure_io_queues(ctrl, new);
1962 if (ret)
1963 goto destroy_admin;
1964 }
1965
1966 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001967 /*
Sagi Grimbergecca390e2020-07-22 16:32:19 -07001968 * state change failure is ok if we started ctrl delete,
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001969 * unless we're during creation of a new controller to
1970 * avoid races with teardown flow.
1971 */
Sagi Grimbergecca390e2020-07-22 16:32:19 -07001972 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
1973 ctrl->state != NVME_CTRL_DELETING_NOIO);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001974 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001975 ret = -EINVAL;
1976 goto destroy_io;
1977 }
1978
1979 nvme_start_ctrl(ctrl);
1980 return 0;
1981
1982destroy_io:
1983 if (ctrl->queue_count > 1)
1984 nvme_tcp_destroy_io_queues(ctrl, new);
1985destroy_admin:
1986 nvme_tcp_stop_queue(ctrl, 0);
1987 nvme_tcp_destroy_admin_queue(ctrl, new);
1988 return ret;
1989}
1990
1991static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1992{
1993 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1994 struct nvme_tcp_ctrl, connect_work);
1995 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1996
1997 ++ctrl->nr_reconnects;
1998
1999 if (nvme_tcp_setup_ctrl(ctrl, false))
2000 goto requeue;
2001
Colin Ian King56a77d22018-12-14 11:42:43 +00002002 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002003 ctrl->nr_reconnects);
2004
2005 ctrl->nr_reconnects = 0;
2006
2007 return;
2008
2009requeue:
2010 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2011 ctrl->nr_reconnects);
2012 nvme_tcp_reconnect_or_remove(ctrl);
2013}
2014
2015static void nvme_tcp_error_recovery_work(struct work_struct *work)
2016{
2017 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2018 struct nvme_tcp_ctrl, err_work);
2019 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2020
2021 nvme_stop_keep_alive(ctrl);
2022 nvme_tcp_teardown_io_queues(ctrl, false);
2023 /* unquiesce to fail fast pending requests */
2024 nvme_start_queues(ctrl);
2025 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002026 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002027
2028 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002029 /* state change failure is ok if we started ctrl delete */
2030 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2031 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002032 return;
2033 }
2034
2035 nvme_tcp_reconnect_or_remove(ctrl);
2036}
2037
2038static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2039{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002040 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2041 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2042
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002043 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002044 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002045 if (shutdown)
2046 nvme_shutdown_ctrl(ctrl);
2047 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002048 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002049 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2050}
2051
2052static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2053{
2054 nvme_tcp_teardown_ctrl(ctrl, true);
2055}
2056
2057static void nvme_reset_ctrl_work(struct work_struct *work)
2058{
2059 struct nvme_ctrl *ctrl =
2060 container_of(work, struct nvme_ctrl, reset_work);
2061
2062 nvme_stop_ctrl(ctrl);
2063 nvme_tcp_teardown_ctrl(ctrl, false);
2064
2065 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002066 /* state change failure is ok if we started ctrl delete */
2067 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2068 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002069 return;
2070 }
2071
2072 if (nvme_tcp_setup_ctrl(ctrl, false))
2073 goto out_fail;
2074
2075 return;
2076
2077out_fail:
2078 ++ctrl->nr_reconnects;
2079 nvme_tcp_reconnect_or_remove(ctrl);
2080}
2081
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002082static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2083{
2084 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2085
2086 if (list_empty(&ctrl->list))
2087 goto free_ctrl;
2088
2089 mutex_lock(&nvme_tcp_ctrl_mutex);
2090 list_del(&ctrl->list);
2091 mutex_unlock(&nvme_tcp_ctrl_mutex);
2092
2093 nvmf_free_options(nctrl->opts);
2094free_ctrl:
2095 kfree(ctrl->queues);
2096 kfree(ctrl);
2097}
2098
2099static void nvme_tcp_set_sg_null(struct nvme_command *c)
2100{
2101 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2102
2103 sg->addr = 0;
2104 sg->length = 0;
2105 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2106 NVME_SGL_FMT_TRANSPORT_A;
2107}
2108
2109static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2110 struct nvme_command *c, u32 data_len)
2111{
2112 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2113
2114 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2115 sg->length = cpu_to_le32(data_len);
2116 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2117}
2118
2119static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2120 u32 data_len)
2121{
2122 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2123
2124 sg->addr = 0;
2125 sg->length = cpu_to_le32(data_len);
2126 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2127 NVME_SGL_FMT_TRANSPORT_A;
2128}
2129
2130static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2131{
2132 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2133 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2134 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2135 struct nvme_command *cmd = &pdu->cmd;
2136 u8 hdgst = nvme_tcp_hdgst_len(queue);
2137
2138 memset(pdu, 0, sizeof(*pdu));
2139 pdu->hdr.type = nvme_tcp_cmd;
2140 if (queue->hdr_digest)
2141 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2142 pdu->hdr.hlen = sizeof(*pdu);
2143 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2144
2145 cmd->common.opcode = nvme_admin_async_event;
2146 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2147 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2148 nvme_tcp_set_sg_null(cmd);
2149
2150 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2151 ctrl->async_req.offset = 0;
2152 ctrl->async_req.curr_bio = NULL;
2153 ctrl->async_req.data_len = 0;
2154
Sagi Grimberg86f03482020-06-18 17:30:23 -07002155 nvme_tcp_queue_request(&ctrl->async_req, true, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002156}
2157
2158static enum blk_eh_timer_return
2159nvme_tcp_timeout(struct request *rq, bool reserved)
2160{
2161 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2162 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2163 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2164
Keith Busch92b98e82019-09-05 08:09:33 -06002165 /*
2166 * Restart the timer if a controller reset is already scheduled. Any
2167 * timed out commands would be handled before entering the connecting
2168 * state.
2169 */
2170 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2171 return BLK_EH_RESET_TIMER;
2172
Sagi Grimberg39d57752019-01-08 01:01:30 -08002173 dev_warn(ctrl->ctrl.device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002174 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002175 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002176
2177 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002178 /*
2179 * Teardown immediately if controller times out while starting
2180 * or we are already started error recovery. all outstanding
2181 * requests are completed on shutdown, so we return BLK_EH_DONE.
2182 */
2183 flush_work(&ctrl->err_work);
2184 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2185 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002186 return BLK_EH_DONE;
2187 }
2188
Sagi Grimberg39d57752019-01-08 01:01:30 -08002189 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002190 nvme_tcp_error_recovery(&ctrl->ctrl);
2191
2192 return BLK_EH_RESET_TIMER;
2193}
2194
2195static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2196 struct request *rq)
2197{
2198 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2199 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2200 struct nvme_command *c = &pdu->cmd;
2201
2202 c->common.flags |= NVME_CMD_SGL_METABUF;
2203
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002204 if (!blk_rq_nr_phys_segments(rq))
2205 nvme_tcp_set_sg_null(c);
2206 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002207 req->data_len <= nvme_tcp_inline_data_size(queue))
2208 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2209 else
2210 nvme_tcp_set_sg_host_data(c, req->data_len);
2211
2212 return 0;
2213}
2214
2215static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2216 struct request *rq)
2217{
2218 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2219 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2220 struct nvme_tcp_queue *queue = req->queue;
2221 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2222 blk_status_t ret;
2223
2224 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2225 if (ret)
2226 return ret;
2227
2228 req->state = NVME_TCP_SEND_CMD_PDU;
2229 req->offset = 0;
2230 req->data_sent = 0;
2231 req->pdu_len = 0;
2232 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002233 req->data_len = blk_rq_nr_phys_segments(rq) ?
2234 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002235 req->curr_bio = rq->bio;
2236
2237 if (rq_data_dir(rq) == WRITE &&
2238 req->data_len <= nvme_tcp_inline_data_size(queue))
2239 req->pdu_len = req->data_len;
2240 else if (req->curr_bio)
2241 nvme_tcp_init_iter(req, READ);
2242
2243 pdu->hdr.type = nvme_tcp_cmd;
2244 pdu->hdr.flags = 0;
2245 if (queue->hdr_digest)
2246 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2247 if (queue->data_digest && req->pdu_len) {
2248 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2249 ddgst = nvme_tcp_ddgst_len(queue);
2250 }
2251 pdu->hdr.hlen = sizeof(*pdu);
2252 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2253 pdu->hdr.plen =
2254 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2255
2256 ret = nvme_tcp_map_data(queue, rq);
2257 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002258 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002259 dev_err(queue->ctrl->ctrl.device,
2260 "Failed to map data (%d)\n", ret);
2261 return ret;
2262 }
2263
2264 return 0;
2265}
2266
Sagi Grimberg86f03482020-06-18 17:30:23 -07002267static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2268{
2269 struct nvme_tcp_queue *queue = hctx->driver_data;
2270
2271 if (!llist_empty(&queue->req_list))
2272 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2273}
2274
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002275static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2276 const struct blk_mq_queue_data *bd)
2277{
2278 struct nvme_ns *ns = hctx->queue->queuedata;
2279 struct nvme_tcp_queue *queue = hctx->driver_data;
2280 struct request *rq = bd->rq;
2281 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2282 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2283 blk_status_t ret;
2284
2285 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2286 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2287
2288 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2289 if (unlikely(ret))
2290 return ret;
2291
2292 blk_mq_start_request(rq);
2293
Sagi Grimberg86f03482020-06-18 17:30:23 -07002294 nvme_tcp_queue_request(req, true, bd->last);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002295
2296 return BLK_STS_OK;
2297}
2298
Sagi Grimberg873946f2018-12-11 23:38:57 -08002299static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2300{
2301 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002302 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002303
Sagi Grimberg64861992019-05-28 22:49:05 -07002304 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002305 /* separate read/write queues */
2306 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002307 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2308 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2309 set->map[HCTX_TYPE_READ].nr_queues =
2310 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002311 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002312 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002313 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002314 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002315 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002316 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2317 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2318 set->map[HCTX_TYPE_READ].nr_queues =
2319 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002320 set->map[HCTX_TYPE_READ].queue_offset = 0;
2321 }
2322 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2323 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002324
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002325 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2326 /* map dedicated poll queues only if we have queues left */
2327 set->map[HCTX_TYPE_POLL].nr_queues =
2328 ctrl->io_queues[HCTX_TYPE_POLL];
2329 set->map[HCTX_TYPE_POLL].queue_offset =
2330 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2331 ctrl->io_queues[HCTX_TYPE_READ];
2332 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2333 }
2334
Sagi Grimberg64861992019-05-28 22:49:05 -07002335 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002336 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002337 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002338 ctrl->io_queues[HCTX_TYPE_READ],
2339 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002340
Sagi Grimberg873946f2018-12-11 23:38:57 -08002341 return 0;
2342}
2343
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002344static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2345{
2346 struct nvme_tcp_queue *queue = hctx->driver_data;
2347 struct sock *sk = queue->sock->sk;
2348
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002349 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2350 return 0;
2351
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002352 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
Eric Dumazet3f926af2019-10-23 22:44:51 -07002353 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002354 sk_busy_loop(sk, true);
2355 nvme_tcp_try_recv(queue);
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002356 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002357 return queue->nr_cqe;
2358}
2359
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002360static const struct blk_mq_ops nvme_tcp_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002361 .queue_rq = nvme_tcp_queue_rq,
Sagi Grimberg86f03482020-06-18 17:30:23 -07002362 .commit_rqs = nvme_tcp_commit_rqs,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002363 .complete = nvme_complete_rq,
2364 .init_request = nvme_tcp_init_request,
2365 .exit_request = nvme_tcp_exit_request,
2366 .init_hctx = nvme_tcp_init_hctx,
2367 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002368 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002369 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002370};
2371
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002372static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002373 .queue_rq = nvme_tcp_queue_rq,
2374 .complete = nvme_complete_rq,
2375 .init_request = nvme_tcp_init_request,
2376 .exit_request = nvme_tcp_exit_request,
2377 .init_hctx = nvme_tcp_init_admin_hctx,
2378 .timeout = nvme_tcp_timeout,
2379};
2380
2381static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2382 .name = "tcp",
2383 .module = THIS_MODULE,
2384 .flags = NVME_F_FABRICS,
2385 .reg_read32 = nvmf_reg_read32,
2386 .reg_read64 = nvmf_reg_read64,
2387 .reg_write32 = nvmf_reg_write32,
2388 .free_ctrl = nvme_tcp_free_ctrl,
2389 .submit_async_event = nvme_tcp_submit_async_event,
2390 .delete_ctrl = nvme_tcp_delete_ctrl,
2391 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002392};
2393
2394static bool
2395nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2396{
2397 struct nvme_tcp_ctrl *ctrl;
2398 bool found = false;
2399
2400 mutex_lock(&nvme_tcp_ctrl_mutex);
2401 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2402 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2403 if (found)
2404 break;
2405 }
2406 mutex_unlock(&nvme_tcp_ctrl_mutex);
2407
2408 return found;
2409}
2410
2411static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2412 struct nvmf_ctrl_options *opts)
2413{
2414 struct nvme_tcp_ctrl *ctrl;
2415 int ret;
2416
2417 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2418 if (!ctrl)
2419 return ERR_PTR(-ENOMEM);
2420
2421 INIT_LIST_HEAD(&ctrl->list);
2422 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002423 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2424 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002425 ctrl->ctrl.sqsize = opts->queue_size - 1;
2426 ctrl->ctrl.kato = opts->kato;
2427
2428 INIT_DELAYED_WORK(&ctrl->connect_work,
2429 nvme_tcp_reconnect_ctrl_work);
2430 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2431 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
Sagi Grimbergd4d61472020-08-05 18:13:48 -07002432 mutex_init(&ctrl->teardown_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002433
2434 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2435 opts->trsvcid =
2436 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2437 if (!opts->trsvcid) {
2438 ret = -ENOMEM;
2439 goto out_free_ctrl;
2440 }
2441 opts->mask |= NVMF_OPT_TRSVCID;
2442 }
2443
2444 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2445 opts->traddr, opts->trsvcid, &ctrl->addr);
2446 if (ret) {
2447 pr_err("malformed address passed: %s:%s\n",
2448 opts->traddr, opts->trsvcid);
2449 goto out_free_ctrl;
2450 }
2451
2452 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2453 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2454 opts->host_traddr, NULL, &ctrl->src_addr);
2455 if (ret) {
2456 pr_err("malformed src address passed: %s\n",
2457 opts->host_traddr);
2458 goto out_free_ctrl;
2459 }
2460 }
2461
2462 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2463 ret = -EALREADY;
2464 goto out_free_ctrl;
2465 }
2466
Sagi Grimberg873946f2018-12-11 23:38:57 -08002467 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002468 GFP_KERNEL);
2469 if (!ctrl->queues) {
2470 ret = -ENOMEM;
2471 goto out_free_ctrl;
2472 }
2473
2474 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2475 if (ret)
2476 goto out_kfree_queues;
2477
2478 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2479 WARN_ON_ONCE(1);
2480 ret = -EINTR;
2481 goto out_uninit_ctrl;
2482 }
2483
2484 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2485 if (ret)
2486 goto out_uninit_ctrl;
2487
2488 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2489 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2490
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002491 mutex_lock(&nvme_tcp_ctrl_mutex);
2492 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2493 mutex_unlock(&nvme_tcp_ctrl_mutex);
2494
2495 return &ctrl->ctrl;
2496
2497out_uninit_ctrl:
2498 nvme_uninit_ctrl(&ctrl->ctrl);
2499 nvme_put_ctrl(&ctrl->ctrl);
2500 if (ret > 0)
2501 ret = -EIO;
2502 return ERR_PTR(ret);
2503out_kfree_queues:
2504 kfree(ctrl->queues);
2505out_free_ctrl:
2506 kfree(ctrl);
2507 return ERR_PTR(ret);
2508}
2509
2510static struct nvmf_transport_ops nvme_tcp_transport = {
2511 .name = "tcp",
2512 .module = THIS_MODULE,
2513 .required_opts = NVMF_OPT_TRADDR,
2514 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2515 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002516 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002517 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2518 NVMF_OPT_TOS,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002519 .create_ctrl = nvme_tcp_create_ctrl,
2520};
2521
2522static int __init nvme_tcp_init_module(void)
2523{
2524 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2525 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2526 if (!nvme_tcp_wq)
2527 return -ENOMEM;
2528
2529 nvmf_register_transport(&nvme_tcp_transport);
2530 return 0;
2531}
2532
2533static void __exit nvme_tcp_cleanup_module(void)
2534{
2535 struct nvme_tcp_ctrl *ctrl;
2536
2537 nvmf_unregister_transport(&nvme_tcp_transport);
2538
2539 mutex_lock(&nvme_tcp_ctrl_mutex);
2540 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2541 nvme_delete_ctrl(&ctrl->ctrl);
2542 mutex_unlock(&nvme_tcp_ctrl_mutex);
2543 flush_workqueue(nvme_delete_wq);
2544
2545 destroy_workqueue(nvme_tcp_wq);
2546}
2547
2548module_init(nvme_tcp_init_module);
2549module_exit(nvme_tcp_cleanup_module);
2550
2551MODULE_LICENSE("GPL v2");