blob: 2d3962c164a48368321c9afec9d31bc1bf4a5710 [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
48 struct list_head entry;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070049 struct llist_node lentry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010050 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080051
52 struct bio *curr_bio;
53 struct iov_iter iter;
54
55 /* send state */
56 size_t offset;
57 size_t data_sent;
58 enum nvme_tcp_send_state state;
59};
60
61enum nvme_tcp_queue_flags {
62 NVME_TCP_Q_ALLOCATED = 0,
63 NVME_TCP_Q_LIVE = 1,
Sagi Grimberg72e5d752020-05-01 14:25:44 -070064 NVME_TCP_Q_POLLING = 2,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080065};
66
67enum nvme_tcp_recv_state {
68 NVME_TCP_RECV_PDU = 0,
69 NVME_TCP_RECV_DATA,
70 NVME_TCP_RECV_DDGST,
71};
72
73struct nvme_tcp_ctrl;
74struct nvme_tcp_queue {
75 struct socket *sock;
76 struct work_struct io_work;
77 int io_cpu;
78
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -070079 struct mutex send_mutex;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070080 struct llist_head req_list;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080081 struct list_head send_list;
82
83 /* recv state */
84 void *pdu;
85 int pdu_remaining;
86 int pdu_offset;
87 size_t data_remaining;
88 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070089 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080090
91 /* send state */
92 struct nvme_tcp_request *request;
93
94 int queue_size;
95 size_t cmnd_capsule_len;
96 struct nvme_tcp_ctrl *ctrl;
97 unsigned long flags;
98 bool rd_enabled;
99
100 bool hdr_digest;
101 bool data_digest;
102 struct ahash_request *rcv_hash;
103 struct ahash_request *snd_hash;
104 __le32 exp_ddgst;
105 __le32 recv_ddgst;
106
107 struct page_frag_cache pf_cache;
108
109 void (*state_change)(struct sock *);
110 void (*data_ready)(struct sock *);
111 void (*write_space)(struct sock *);
112};
113
114struct nvme_tcp_ctrl {
115 /* read only in the hot path */
116 struct nvme_tcp_queue *queues;
117 struct blk_mq_tag_set tag_set;
118
119 /* other member variables */
120 struct list_head list;
121 struct blk_mq_tag_set admin_tag_set;
122 struct sockaddr_storage addr;
123 struct sockaddr_storage src_addr;
124 struct nvme_ctrl ctrl;
125
126 struct work_struct err_work;
127 struct delayed_work connect_work;
128 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700129 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800130};
131
132static LIST_HEAD(nvme_tcp_ctrl_list);
133static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
134static struct workqueue_struct *nvme_tcp_wq;
Rikard Falkeborn6acbd962020-05-29 00:25:07 +0200135static const struct blk_mq_ops nvme_tcp_mq_ops;
136static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700137static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800138
139static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
140{
141 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
142}
143
144static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
145{
146 return queue - queue->ctrl->queues;
147}
148
149static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
150{
151 u32 queue_idx = nvme_tcp_queue_id(queue);
152
153 if (queue_idx == 0)
154 return queue->ctrl->admin_tag_set.tags[queue_idx];
155 return queue->ctrl->tag_set.tags[queue_idx - 1];
156}
157
158static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
159{
160 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
161}
162
163static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
164{
165 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
166}
167
168static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
169{
170 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
171}
172
173static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
174{
175 return req == &req->queue->ctrl->async_req;
176}
177
178static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
179{
180 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800181
182 if (unlikely(nvme_tcp_async_req(req)))
183 return false; /* async events don't have a request */
184
185 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800186
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700187 return rq_data_dir(rq) == WRITE && req->data_len &&
188 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800189}
190
191static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
192{
193 return req->iter.bvec->bv_page;
194}
195
196static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
197{
198 return req->iter.bvec->bv_offset + req->iter.iov_offset;
199}
200
201static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
202{
203 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
204 req->pdu_len - req->pdu_sent);
205}
206
207static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
208{
209 return req->iter.iov_offset;
210}
211
212static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
213{
214 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
215 req->pdu_len - req->pdu_sent : 0;
216}
217
218static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
219 int len)
220{
221 return nvme_tcp_pdu_data_left(req) <= len;
222}
223
224static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
225 unsigned int dir)
226{
227 struct request *rq = blk_mq_rq_from_pdu(req);
228 struct bio_vec *vec;
229 unsigned int size;
230 int nsegs;
231 size_t offset;
232
233 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
234 vec = &rq->special_vec;
235 nsegs = 1;
236 size = blk_rq_payload_bytes(rq);
237 offset = 0;
238 } else {
239 struct bio *bio = req->curr_bio;
240
241 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
242 nsegs = bio_segments(bio);
243 size = bio->bi_iter.bi_size;
244 offset = bio->bi_iter.bi_bvec_done;
245 }
246
247 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
248 req->iter.iov_offset = offset;
249}
250
251static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
252 int len)
253{
254 req->data_sent += len;
255 req->pdu_sent += len;
256 iov_iter_advance(&req->iter, len);
257 if (!iov_iter_count(&req->iter) &&
258 req->data_sent < req->data_len) {
259 req->curr_bio = req->curr_bio->bi_next;
260 nvme_tcp_init_iter(req, WRITE);
261 }
262}
263
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700264static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
Sagi Grimberg86f03482020-06-18 17:30:23 -0700265 bool sync, bool last)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800266{
267 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700268 bool empty;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800269
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700270 empty = llist_add(&req->lentry, &queue->req_list) &&
271 list_empty(&queue->send_list) && !queue->request;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800272
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700273 /*
274 * if we're the first on the send_list and we can try to send
275 * directly, otherwise queue io_work. Also, only do that if we
276 * are on the same cpu, so we don't introduce contention.
277 */
278 if (queue->io_cpu == smp_processor_id() &&
279 sync && empty && mutex_trylock(&queue->send_mutex)) {
280 nvme_tcp_try_send(queue);
281 mutex_unlock(&queue->send_mutex);
Sagi Grimberg86f03482020-06-18 17:30:23 -0700282 } else if (last) {
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700283 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
284 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800285}
286
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700287static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
288{
289 struct nvme_tcp_request *req;
290 struct llist_node *node;
291
292 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
293 req = llist_entry(node, struct nvme_tcp_request, lentry);
294 list_add(&req->entry, &queue->send_list);
295 }
296}
297
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800298static inline struct nvme_tcp_request *
299nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
300{
301 struct nvme_tcp_request *req;
302
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800303 req = list_first_entry_or_null(&queue->send_list,
304 struct nvme_tcp_request, entry);
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700305 if (!req) {
306 nvme_tcp_process_req_list(queue);
307 req = list_first_entry_or_null(&queue->send_list,
308 struct nvme_tcp_request, entry);
309 if (unlikely(!req))
310 return NULL;
311 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800312
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700313 list_del(&req->entry);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800314 return req;
315}
316
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100317static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
318 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800319{
320 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
321 crypto_ahash_final(hash);
322}
323
324static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
325 struct page *page, off_t off, size_t len)
326{
327 struct scatterlist sg;
328
329 sg_init_marker(&sg, 1);
330 sg_set_page(&sg, page, len, off);
331 ahash_request_set_crypt(hash, &sg, NULL, len);
332 crypto_ahash_update(hash);
333}
334
335static inline void nvme_tcp_hdgst(struct ahash_request *hash,
336 void *pdu, size_t len)
337{
338 struct scatterlist sg;
339
340 sg_init_one(&sg, pdu, len);
341 ahash_request_set_crypt(hash, &sg, pdu + len, len);
342 crypto_ahash_digest(hash);
343}
344
345static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
346 void *pdu, size_t pdu_len)
347{
348 struct nvme_tcp_hdr *hdr = pdu;
349 __le32 recv_digest;
350 __le32 exp_digest;
351
352 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
353 dev_err(queue->ctrl->ctrl.device,
354 "queue %d: header digest flag is cleared\n",
355 nvme_tcp_queue_id(queue));
356 return -EPROTO;
357 }
358
359 recv_digest = *(__le32 *)(pdu + hdr->hlen);
360 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
361 exp_digest = *(__le32 *)(pdu + hdr->hlen);
362 if (recv_digest != exp_digest) {
363 dev_err(queue->ctrl->ctrl.device,
364 "header digest error: recv %#x expected %#x\n",
365 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
366 return -EIO;
367 }
368
369 return 0;
370}
371
372static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
373{
374 struct nvme_tcp_hdr *hdr = pdu;
375 u8 digest_len = nvme_tcp_hdgst_len(queue);
376 u32 len;
377
378 len = le32_to_cpu(hdr->plen) - hdr->hlen -
379 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
380
381 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
382 dev_err(queue->ctrl->ctrl.device,
383 "queue %d: data digest flag is cleared\n",
384 nvme_tcp_queue_id(queue));
385 return -EPROTO;
386 }
387 crypto_ahash_init(queue->rcv_hash);
388
389 return 0;
390}
391
392static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
393 struct request *rq, unsigned int hctx_idx)
394{
395 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
396
397 page_frag_free(req->pdu);
398}
399
400static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
401 struct request *rq, unsigned int hctx_idx,
402 unsigned int numa_node)
403{
404 struct nvme_tcp_ctrl *ctrl = set->driver_data;
405 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
406 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
407 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
408 u8 hdgst = nvme_tcp_hdgst_len(queue);
409
410 req->pdu = page_frag_alloc(&queue->pf_cache,
411 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
412 GFP_KERNEL | __GFP_ZERO);
413 if (!req->pdu)
414 return -ENOMEM;
415
416 req->queue = queue;
417 nvme_req(rq)->ctrl = &ctrl->ctrl;
418
419 return 0;
420}
421
422static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
423 unsigned int hctx_idx)
424{
425 struct nvme_tcp_ctrl *ctrl = data;
426 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
427
428 hctx->driver_data = queue;
429 return 0;
430}
431
432static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
433 unsigned int hctx_idx)
434{
435 struct nvme_tcp_ctrl *ctrl = data;
436 struct nvme_tcp_queue *queue = &ctrl->queues[0];
437
438 hctx->driver_data = queue;
439 return 0;
440}
441
442static enum nvme_tcp_recv_state
443nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
444{
445 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
446 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
447 NVME_TCP_RECV_DATA;
448}
449
450static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
451{
452 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
453 nvme_tcp_hdgst_len(queue);
454 queue->pdu_offset = 0;
455 queue->data_remaining = -1;
456 queue->ddgst_remaining = 0;
457}
458
459static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
460{
461 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
462 return;
463
Nigel Kirkland97b25122020-02-10 16:01:45 -0800464 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800465}
466
467static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
468 struct nvme_completion *cqe)
469{
470 struct request *rq;
471
472 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
473 if (!rq) {
474 dev_err(queue->ctrl->ctrl.device,
475 "queue %d tag 0x%x not found\n",
476 nvme_tcp_queue_id(queue), cqe->command_id);
477 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
478 return -EINVAL;
479 }
480
Christoph Hellwigff029452020-06-11 08:44:52 +0200481 if (!nvme_end_request(rq, cqe->status, cqe->result))
482 nvme_complete_rq(rq);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700483 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800484
485 return 0;
486}
487
488static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
489 struct nvme_tcp_data_pdu *pdu)
490{
491 struct request *rq;
492
493 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
494 if (!rq) {
495 dev_err(queue->ctrl->ctrl.device,
496 "queue %d tag %#x not found\n",
497 nvme_tcp_queue_id(queue), pdu->command_id);
498 return -ENOENT;
499 }
500
501 if (!blk_rq_payload_bytes(rq)) {
502 dev_err(queue->ctrl->ctrl.device,
503 "queue %d tag %#x unexpected data\n",
504 nvme_tcp_queue_id(queue), rq->tag);
505 return -EIO;
506 }
507
508 queue->data_remaining = le32_to_cpu(pdu->data_length);
509
Sagi Grimberg602d6742019-03-13 18:55:10 +0100510 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
511 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
512 dev_err(queue->ctrl->ctrl.device,
513 "queue %d tag %#x SUCCESS set but not last PDU\n",
514 nvme_tcp_queue_id(queue), rq->tag);
515 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
516 return -EPROTO;
517 }
518
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800519 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800520}
521
522static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
523 struct nvme_tcp_rsp_pdu *pdu)
524{
525 struct nvme_completion *cqe = &pdu->cqe;
526 int ret = 0;
527
528 /*
529 * AEN requests are special as they don't time out and can
530 * survive any kind of queue freeze and often don't respond to
531 * aborts. We don't even bother to allocate a struct request
532 * for them but rather special case them here.
533 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300534 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
535 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800536 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
537 &cqe->result);
538 else
539 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
540
541 return ret;
542}
543
544static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
545 struct nvme_tcp_r2t_pdu *pdu)
546{
547 struct nvme_tcp_data_pdu *data = req->pdu;
548 struct nvme_tcp_queue *queue = req->queue;
549 struct request *rq = blk_mq_rq_from_pdu(req);
550 u8 hdgst = nvme_tcp_hdgst_len(queue);
551 u8 ddgst = nvme_tcp_ddgst_len(queue);
552
553 req->pdu_len = le32_to_cpu(pdu->r2t_length);
554 req->pdu_sent = 0;
555
556 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
557 dev_err(queue->ctrl->ctrl.device,
558 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
559 rq->tag, req->pdu_len, req->data_len,
560 req->data_sent);
561 return -EPROTO;
562 }
563
564 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
565 dev_err(queue->ctrl->ctrl.device,
566 "req %d unexpected r2t offset %u (expected %zu)\n",
567 rq->tag, le32_to_cpu(pdu->r2t_offset),
568 req->data_sent);
569 return -EPROTO;
570 }
571
572 memset(data, 0, sizeof(*data));
573 data->hdr.type = nvme_tcp_h2c_data;
574 data->hdr.flags = NVME_TCP_F_DATA_LAST;
575 if (queue->hdr_digest)
576 data->hdr.flags |= NVME_TCP_F_HDGST;
577 if (queue->data_digest)
578 data->hdr.flags |= NVME_TCP_F_DDGST;
579 data->hdr.hlen = sizeof(*data);
580 data->hdr.pdo = data->hdr.hlen + hdgst;
581 data->hdr.plen =
582 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
583 data->ttag = pdu->ttag;
584 data->command_id = rq->tag;
585 data->data_offset = cpu_to_le32(req->data_sent);
586 data->data_length = cpu_to_le32(req->pdu_len);
587 return 0;
588}
589
590static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
591 struct nvme_tcp_r2t_pdu *pdu)
592{
593 struct nvme_tcp_request *req;
594 struct request *rq;
595 int ret;
596
597 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
598 if (!rq) {
599 dev_err(queue->ctrl->ctrl.device,
600 "queue %d tag %#x not found\n",
601 nvme_tcp_queue_id(queue), pdu->command_id);
602 return -ENOENT;
603 }
604 req = blk_mq_rq_to_pdu(rq);
605
606 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
607 if (unlikely(ret))
608 return ret;
609
610 req->state = NVME_TCP_SEND_H2C_PDU;
611 req->offset = 0;
612
Sagi Grimberg86f03482020-06-18 17:30:23 -0700613 nvme_tcp_queue_request(req, false, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800614
615 return 0;
616}
617
618static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
619 unsigned int *offset, size_t *len)
620{
621 struct nvme_tcp_hdr *hdr;
622 char *pdu = queue->pdu;
623 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
624 int ret;
625
626 ret = skb_copy_bits(skb, *offset,
627 &pdu[queue->pdu_offset], rcv_len);
628 if (unlikely(ret))
629 return ret;
630
631 queue->pdu_remaining -= rcv_len;
632 queue->pdu_offset += rcv_len;
633 *offset += rcv_len;
634 *len -= rcv_len;
635 if (queue->pdu_remaining)
636 return 0;
637
638 hdr = queue->pdu;
639 if (queue->hdr_digest) {
640 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
641 if (unlikely(ret))
642 return ret;
643 }
644
645
646 if (queue->data_digest) {
647 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
648 if (unlikely(ret))
649 return ret;
650 }
651
652 switch (hdr->type) {
653 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700654 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800655 case nvme_tcp_rsp:
656 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700657 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800658 case nvme_tcp_r2t:
659 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700660 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800661 default:
662 dev_err(queue->ctrl->ctrl.device,
663 "unsupported pdu type (%d)\n", hdr->type);
664 return -EINVAL;
665 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800666}
667
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100668static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100669{
670 union nvme_result res = {};
671
Christoph Hellwigff029452020-06-11 08:44:52 +0200672 if (!nvme_end_request(rq, cpu_to_le16(status << 1), res))
673 nvme_complete_rq(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100674}
675
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800676static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
677 unsigned int *offset, size_t *len)
678{
679 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
680 struct nvme_tcp_request *req;
681 struct request *rq;
682
683 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
684 if (!rq) {
685 dev_err(queue->ctrl->ctrl.device,
686 "queue %d tag %#x not found\n",
687 nvme_tcp_queue_id(queue), pdu->command_id);
688 return -ENOENT;
689 }
690 req = blk_mq_rq_to_pdu(rq);
691
692 while (true) {
693 int recv_len, ret;
694
695 recv_len = min_t(size_t, *len, queue->data_remaining);
696 if (!recv_len)
697 break;
698
699 if (!iov_iter_count(&req->iter)) {
700 req->curr_bio = req->curr_bio->bi_next;
701
702 /*
703 * If we don`t have any bios it means that controller
704 * sent more data than we requested, hence error
705 */
706 if (!req->curr_bio) {
707 dev_err(queue->ctrl->ctrl.device,
708 "queue %d no space in request %#x",
709 nvme_tcp_queue_id(queue), rq->tag);
710 nvme_tcp_init_recv_ctx(queue);
711 return -EIO;
712 }
713 nvme_tcp_init_iter(req, READ);
714 }
715
716 /* we can read only from what is left in this bio */
717 recv_len = min_t(size_t, recv_len,
718 iov_iter_count(&req->iter));
719
720 if (queue->data_digest)
721 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
722 &req->iter, recv_len, queue->rcv_hash);
723 else
724 ret = skb_copy_datagram_iter(skb, *offset,
725 &req->iter, recv_len);
726 if (ret) {
727 dev_err(queue->ctrl->ctrl.device,
728 "queue %d failed to copy request %#x data",
729 nvme_tcp_queue_id(queue), rq->tag);
730 return ret;
731 }
732
733 *len -= recv_len;
734 *offset += recv_len;
735 queue->data_remaining -= recv_len;
736 }
737
738 if (!queue->data_remaining) {
739 if (queue->data_digest) {
740 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
741 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
742 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700743 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberg602d6742019-03-13 18:55:10 +0100744 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700745 queue->nr_cqe++;
746 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800747 nvme_tcp_init_recv_ctx(queue);
748 }
749 }
750
751 return 0;
752}
753
754static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
755 struct sk_buff *skb, unsigned int *offset, size_t *len)
756{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100757 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800758 char *ddgst = (char *)&queue->recv_ddgst;
759 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
760 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
761 int ret;
762
763 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
764 if (unlikely(ret))
765 return ret;
766
767 queue->ddgst_remaining -= recv_len;
768 *offset += recv_len;
769 *len -= recv_len;
770 if (queue->ddgst_remaining)
771 return 0;
772
773 if (queue->recv_ddgst != queue->exp_ddgst) {
774 dev_err(queue->ctrl->ctrl.device,
775 "data digest error: recv %#x expected %#x\n",
776 le32_to_cpu(queue->recv_ddgst),
777 le32_to_cpu(queue->exp_ddgst));
778 return -EIO;
779 }
780
Sagi Grimberg602d6742019-03-13 18:55:10 +0100781 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
782 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
783 pdu->command_id);
784
785 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700786 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100787 }
788
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800789 nvme_tcp_init_recv_ctx(queue);
790 return 0;
791}
792
793static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
794 unsigned int offset, size_t len)
795{
796 struct nvme_tcp_queue *queue = desc->arg.data;
797 size_t consumed = len;
798 int result;
799
800 while (len) {
801 switch (nvme_tcp_recv_state(queue)) {
802 case NVME_TCP_RECV_PDU:
803 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
804 break;
805 case NVME_TCP_RECV_DATA:
806 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
807 break;
808 case NVME_TCP_RECV_DDGST:
809 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
810 break;
811 default:
812 result = -EFAULT;
813 }
814 if (result) {
815 dev_err(queue->ctrl->ctrl.device,
816 "receive failed: %d\n", result);
817 queue->rd_enabled = false;
818 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
819 return result;
820 }
821 }
822
823 return consumed;
824}
825
826static void nvme_tcp_data_ready(struct sock *sk)
827{
828 struct nvme_tcp_queue *queue;
829
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700830 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800831 queue = sk->sk_user_data;
Sagi Grimberg72e5d752020-05-01 14:25:44 -0700832 if (likely(queue && queue->rd_enabled) &&
833 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800834 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700835 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800836}
837
838static void nvme_tcp_write_space(struct sock *sk)
839{
840 struct nvme_tcp_queue *queue;
841
842 read_lock_bh(&sk->sk_callback_lock);
843 queue = sk->sk_user_data;
844 if (likely(queue && sk_stream_is_writeable(sk))) {
845 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
846 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
847 }
848 read_unlock_bh(&sk->sk_callback_lock);
849}
850
851static void nvme_tcp_state_change(struct sock *sk)
852{
853 struct nvme_tcp_queue *queue;
854
855 read_lock(&sk->sk_callback_lock);
856 queue = sk->sk_user_data;
857 if (!queue)
858 goto done;
859
860 switch (sk->sk_state) {
861 case TCP_CLOSE:
862 case TCP_CLOSE_WAIT:
863 case TCP_LAST_ACK:
864 case TCP_FIN_WAIT1:
865 case TCP_FIN_WAIT2:
866 /* fallthrough */
867 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
868 break;
869 default:
870 dev_info(queue->ctrl->ctrl.device,
871 "queue %d socket state %d\n",
872 nvme_tcp_queue_id(queue), sk->sk_state);
873 }
874
875 queue->state_change(sk);
876done:
877 read_unlock(&sk->sk_callback_lock);
878}
879
880static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
881{
882 queue->request = NULL;
883}
884
885static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
886{
Sagi Grimberg16686012019-08-02 18:17:52 -0700887 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800888}
889
890static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
891{
892 struct nvme_tcp_queue *queue = req->queue;
893
894 while (true) {
895 struct page *page = nvme_tcp_req_cur_page(req);
896 size_t offset = nvme_tcp_req_cur_offset(req);
897 size_t len = nvme_tcp_req_cur_length(req);
898 bool last = nvme_tcp_pdu_last_send(req, len);
899 int ret, flags = MSG_DONTWAIT;
900
901 if (last && !queue->data_digest)
902 flags |= MSG_EOR;
903 else
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700904 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800905
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200906 /* can't zcopy slab pages */
907 if (unlikely(PageSlab(page))) {
908 ret = sock_no_sendpage(queue->sock, page, offset, len,
909 flags);
910 } else {
911 ret = kernel_sendpage(queue->sock, page, offset, len,
912 flags);
913 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800914 if (ret <= 0)
915 return ret;
916
917 nvme_tcp_advance_req(req, ret);
918 if (queue->data_digest)
919 nvme_tcp_ddgst_update(queue->snd_hash, page,
920 offset, ret);
921
922 /* fully successful last write*/
923 if (last && ret == len) {
924 if (queue->data_digest) {
925 nvme_tcp_ddgst_final(queue->snd_hash,
926 &req->ddgst);
927 req->state = NVME_TCP_SEND_DDGST;
928 req->offset = 0;
929 } else {
930 nvme_tcp_done_send_req(queue);
931 }
932 return 1;
933 }
934 }
935 return -EAGAIN;
936}
937
938static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
939{
940 struct nvme_tcp_queue *queue = req->queue;
941 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
942 bool inline_data = nvme_tcp_has_inline_data(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800943 u8 hdgst = nvme_tcp_hdgst_len(queue);
944 int len = sizeof(*pdu) + hdgst - req->offset;
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700945 int flags = MSG_DONTWAIT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800946 int ret;
947
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700948 if (inline_data)
949 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
950 else
951 flags |= MSG_EOR;
952
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800953 if (queue->hdr_digest && !req->offset)
954 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
955
956 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
957 offset_in_page(pdu) + req->offset, len, flags);
958 if (unlikely(ret <= 0))
959 return ret;
960
961 len -= ret;
962 if (!len) {
963 if (inline_data) {
964 req->state = NVME_TCP_SEND_DATA;
965 if (queue->data_digest)
966 crypto_ahash_init(queue->snd_hash);
967 nvme_tcp_init_iter(req, WRITE);
968 } else {
969 nvme_tcp_done_send_req(queue);
970 }
971 return 1;
972 }
973 req->offset += ret;
974
975 return -EAGAIN;
976}
977
978static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
979{
980 struct nvme_tcp_queue *queue = req->queue;
981 struct nvme_tcp_data_pdu *pdu = req->pdu;
982 u8 hdgst = nvme_tcp_hdgst_len(queue);
983 int len = sizeof(*pdu) - req->offset + hdgst;
984 int ret;
985
986 if (queue->hdr_digest && !req->offset)
987 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
988
989 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
990 offset_in_page(pdu) + req->offset, len,
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700991 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800992 if (unlikely(ret <= 0))
993 return ret;
994
995 len -= ret;
996 if (!len) {
997 req->state = NVME_TCP_SEND_DATA;
998 if (queue->data_digest)
999 crypto_ahash_init(queue->snd_hash);
1000 if (!req->data_sent)
1001 nvme_tcp_init_iter(req, WRITE);
1002 return 1;
1003 }
1004 req->offset += ret;
1005
1006 return -EAGAIN;
1007}
1008
1009static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1010{
1011 struct nvme_tcp_queue *queue = req->queue;
1012 int ret;
1013 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
1014 struct kvec iov = {
1015 .iov_base = &req->ddgst + req->offset,
1016 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1017 };
1018
1019 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1020 if (unlikely(ret <= 0))
1021 return ret;
1022
1023 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1024 nvme_tcp_done_send_req(queue);
1025 return 1;
1026 }
1027
1028 req->offset += ret;
1029 return -EAGAIN;
1030}
1031
1032static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1033{
1034 struct nvme_tcp_request *req;
1035 int ret = 1;
1036
1037 if (!queue->request) {
1038 queue->request = nvme_tcp_fetch_request(queue);
1039 if (!queue->request)
1040 return 0;
1041 }
1042 req = queue->request;
1043
1044 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1045 ret = nvme_tcp_try_send_cmd_pdu(req);
1046 if (ret <= 0)
1047 goto done;
1048 if (!nvme_tcp_has_inline_data(req))
1049 return ret;
1050 }
1051
1052 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1053 ret = nvme_tcp_try_send_data_pdu(req);
1054 if (ret <= 0)
1055 goto done;
1056 }
1057
1058 if (req->state == NVME_TCP_SEND_DATA) {
1059 ret = nvme_tcp_try_send_data(req);
1060 if (ret <= 0)
1061 goto done;
1062 }
1063
1064 if (req->state == NVME_TCP_SEND_DDGST)
1065 ret = nvme_tcp_try_send_ddgst(req);
1066done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001067 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001068 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001069 } else if (ret < 0) {
1070 dev_err(queue->ctrl->ctrl.device,
1071 "failed to send request %d\n", ret);
1072 if (ret != -EPIPE && ret != -ECONNRESET)
1073 nvme_tcp_fail_request(queue->request);
1074 nvme_tcp_done_send_req(queue);
1075 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001076 return ret;
1077}
1078
1079static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1080{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301081 struct socket *sock = queue->sock;
1082 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001083 read_descriptor_t rd_desc;
1084 int consumed;
1085
1086 rd_desc.arg.data = queue;
1087 rd_desc.count = 1;
1088 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001089 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301090 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001091 release_sock(sk);
1092 return consumed;
1093}
1094
1095static void nvme_tcp_io_work(struct work_struct *w)
1096{
1097 struct nvme_tcp_queue *queue =
1098 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001099 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001100
1101 do {
1102 bool pending = false;
1103 int result;
1104
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001105 if (mutex_trylock(&queue->send_mutex)) {
1106 result = nvme_tcp_try_send(queue);
1107 mutex_unlock(&queue->send_mutex);
1108 if (result > 0)
1109 pending = true;
1110 else if (unlikely(result < 0))
1111 break;
1112 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001113
1114 result = nvme_tcp_try_recv(queue);
1115 if (result > 0)
1116 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001117 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001118 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001119
1120 if (!pending)
1121 return;
1122
Wunderlich, Markddef2952019-09-18 23:36:37 +00001123 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001124
1125 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1126}
1127
1128static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1129{
1130 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1131
1132 ahash_request_free(queue->rcv_hash);
1133 ahash_request_free(queue->snd_hash);
1134 crypto_free_ahash(tfm);
1135}
1136
1137static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1138{
1139 struct crypto_ahash *tfm;
1140
1141 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1142 if (IS_ERR(tfm))
1143 return PTR_ERR(tfm);
1144
1145 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1146 if (!queue->snd_hash)
1147 goto free_tfm;
1148 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1149
1150 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1151 if (!queue->rcv_hash)
1152 goto free_snd_hash;
1153 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1154
1155 return 0;
1156free_snd_hash:
1157 ahash_request_free(queue->snd_hash);
1158free_tfm:
1159 crypto_free_ahash(tfm);
1160 return -ENOMEM;
1161}
1162
1163static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1164{
1165 struct nvme_tcp_request *async = &ctrl->async_req;
1166
1167 page_frag_free(async->pdu);
1168}
1169
1170static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1171{
1172 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1173 struct nvme_tcp_request *async = &ctrl->async_req;
1174 u8 hdgst = nvme_tcp_hdgst_len(queue);
1175
1176 async->pdu = page_frag_alloc(&queue->pf_cache,
1177 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1178 GFP_KERNEL | __GFP_ZERO);
1179 if (!async->pdu)
1180 return -ENOMEM;
1181
1182 async->queue = &ctrl->queues[0];
1183 return 0;
1184}
1185
1186static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1187{
1188 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1189 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1190
1191 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1192 return;
1193
1194 if (queue->hdr_digest || queue->data_digest)
1195 nvme_tcp_free_crypto(queue);
1196
1197 sock_release(queue->sock);
1198 kfree(queue->pdu);
1199}
1200
1201static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1202{
1203 struct nvme_tcp_icreq_pdu *icreq;
1204 struct nvme_tcp_icresp_pdu *icresp;
1205 struct msghdr msg = {};
1206 struct kvec iov;
1207 bool ctrl_hdgst, ctrl_ddgst;
1208 int ret;
1209
1210 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1211 if (!icreq)
1212 return -ENOMEM;
1213
1214 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1215 if (!icresp) {
1216 ret = -ENOMEM;
1217 goto free_icreq;
1218 }
1219
1220 icreq->hdr.type = nvme_tcp_icreq;
1221 icreq->hdr.hlen = sizeof(*icreq);
1222 icreq->hdr.pdo = 0;
1223 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1224 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1225 icreq->maxr2t = 0; /* single inflight r2t supported */
1226 icreq->hpda = 0; /* no alignment constraint */
1227 if (queue->hdr_digest)
1228 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1229 if (queue->data_digest)
1230 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1231
1232 iov.iov_base = icreq;
1233 iov.iov_len = sizeof(*icreq);
1234 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1235 if (ret < 0)
1236 goto free_icresp;
1237
1238 memset(&msg, 0, sizeof(msg));
1239 iov.iov_base = icresp;
1240 iov.iov_len = sizeof(*icresp);
1241 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1242 iov.iov_len, msg.msg_flags);
1243 if (ret < 0)
1244 goto free_icresp;
1245
1246 ret = -EINVAL;
1247 if (icresp->hdr.type != nvme_tcp_icresp) {
1248 pr_err("queue %d: bad type returned %d\n",
1249 nvme_tcp_queue_id(queue), icresp->hdr.type);
1250 goto free_icresp;
1251 }
1252
1253 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1254 pr_err("queue %d: bad pdu length returned %d\n",
1255 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1256 goto free_icresp;
1257 }
1258
1259 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1260 pr_err("queue %d: bad pfv returned %d\n",
1261 nvme_tcp_queue_id(queue), icresp->pfv);
1262 goto free_icresp;
1263 }
1264
1265 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1266 if ((queue->data_digest && !ctrl_ddgst) ||
1267 (!queue->data_digest && ctrl_ddgst)) {
1268 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1269 nvme_tcp_queue_id(queue),
1270 queue->data_digest ? "enabled" : "disabled",
1271 ctrl_ddgst ? "enabled" : "disabled");
1272 goto free_icresp;
1273 }
1274
1275 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1276 if ((queue->hdr_digest && !ctrl_hdgst) ||
1277 (!queue->hdr_digest && ctrl_hdgst)) {
1278 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1279 nvme_tcp_queue_id(queue),
1280 queue->hdr_digest ? "enabled" : "disabled",
1281 ctrl_hdgst ? "enabled" : "disabled");
1282 goto free_icresp;
1283 }
1284
1285 if (icresp->cpda != 0) {
1286 pr_err("queue %d: unsupported cpda returned %d\n",
1287 nvme_tcp_queue_id(queue), icresp->cpda);
1288 goto free_icresp;
1289 }
1290
1291 ret = 0;
1292free_icresp:
1293 kfree(icresp);
1294free_icreq:
1295 kfree(icreq);
1296 return ret;
1297}
1298
Sagi Grimberg40510a62020-02-25 15:53:09 -08001299static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1300{
1301 return nvme_tcp_queue_id(queue) == 0;
1302}
1303
1304static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1305{
1306 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1307 int qid = nvme_tcp_queue_id(queue);
1308
1309 return !nvme_tcp_admin_queue(queue) &&
1310 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1311}
1312
1313static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1314{
1315 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1316 int qid = nvme_tcp_queue_id(queue);
1317
1318 return !nvme_tcp_admin_queue(queue) &&
1319 !nvme_tcp_default_queue(queue) &&
1320 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1321 ctrl->io_queues[HCTX_TYPE_READ];
1322}
1323
1324static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1325{
1326 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1327 int qid = nvme_tcp_queue_id(queue);
1328
1329 return !nvme_tcp_admin_queue(queue) &&
1330 !nvme_tcp_default_queue(queue) &&
1331 !nvme_tcp_read_queue(queue) &&
1332 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1333 ctrl->io_queues[HCTX_TYPE_READ] +
1334 ctrl->io_queues[HCTX_TYPE_POLL];
1335}
1336
1337static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1338{
1339 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1340 int qid = nvme_tcp_queue_id(queue);
1341 int n = 0;
1342
1343 if (nvme_tcp_default_queue(queue))
1344 n = qid - 1;
1345 else if (nvme_tcp_read_queue(queue))
1346 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1347 else if (nvme_tcp_poll_queue(queue))
1348 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1349 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1350 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1351}
1352
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001353static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1354 int qid, size_t queue_size)
1355{
1356 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1357 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001358 int ret, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001359
1360 queue->ctrl = ctrl;
Sagi Grimberg15ec9282020-06-18 17:30:22 -07001361 init_llist_head(&queue->req_list);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001362 INIT_LIST_HEAD(&queue->send_list);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001363 mutex_init(&queue->send_mutex);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001364 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1365 queue->queue_size = queue_size;
1366
1367 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001368 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001369 else
1370 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1371 NVME_TCP_ADMIN_CCSZ;
1372
1373 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1374 IPPROTO_TCP, &queue->sock);
1375 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001376 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001377 "failed to create socket: %d\n", ret);
1378 return ret;
1379 }
1380
1381 /* Single syn retry */
Christoph Hellwig557eadf2020-05-28 07:12:21 +02001382 tcp_sock_set_syncnt(queue->sock->sk, 1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001383
1384 /* Set TCP no delay */
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001385 tcp_sock_set_nodelay(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001386
1387 /*
1388 * Cleanup whatever is sitting in the TCP transmit queue on socket
1389 * close. This is done to prevent stale data from being sent should
1390 * the network connection be restored before TCP times out.
1391 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001392 sock_no_linger(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001393
Christoph Hellwig6e434962020-05-28 07:12:11 +02001394 if (so_priority > 0)
1395 sock_set_priority(queue->sock->sk, so_priority);
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001396
Israel Rukshinbb139852019-08-18 12:08:54 +03001397 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001398 if (nctrl->opts->tos >= 0)
1399 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
Israel Rukshinbb139852019-08-18 12:08:54 +03001400
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001401 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001402 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001403 queue->request = NULL;
1404 queue->data_remaining = 0;
1405 queue->ddgst_remaining = 0;
1406 queue->pdu_remaining = 0;
1407 queue->pdu_offset = 0;
1408 sk_set_memalloc(queue->sock->sk);
1409
Israel Rukshin9924b032019-08-18 12:08:53 +03001410 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001411 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1412 sizeof(ctrl->src_addr));
1413 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001414 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001415 "failed to bind queue %d socket %d\n",
1416 qid, ret);
1417 goto err_sock;
1418 }
1419 }
1420
1421 queue->hdr_digest = nctrl->opts->hdr_digest;
1422 queue->data_digest = nctrl->opts->data_digest;
1423 if (queue->hdr_digest || queue->data_digest) {
1424 ret = nvme_tcp_alloc_crypto(queue);
1425 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001426 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001427 "failed to allocate queue %d crypto\n", qid);
1428 goto err_sock;
1429 }
1430 }
1431
1432 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1433 nvme_tcp_hdgst_len(queue);
1434 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1435 if (!queue->pdu) {
1436 ret = -ENOMEM;
1437 goto err_crypto;
1438 }
1439
Israel Rukshin9924b032019-08-18 12:08:53 +03001440 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001441 nvme_tcp_queue_id(queue));
1442
1443 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1444 sizeof(ctrl->addr), 0);
1445 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001446 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001447 "failed to connect socket: %d\n", ret);
1448 goto err_rcv_pdu;
1449 }
1450
1451 ret = nvme_tcp_init_connection(queue);
1452 if (ret)
1453 goto err_init_connect;
1454
1455 queue->rd_enabled = true;
1456 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1457 nvme_tcp_init_recv_ctx(queue);
1458
1459 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1460 queue->sock->sk->sk_user_data = queue;
1461 queue->state_change = queue->sock->sk->sk_state_change;
1462 queue->data_ready = queue->sock->sk->sk_data_ready;
1463 queue->write_space = queue->sock->sk->sk_write_space;
1464 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1465 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1466 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001467#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001468 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001469#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001470 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1471
1472 return 0;
1473
1474err_init_connect:
1475 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1476err_rcv_pdu:
1477 kfree(queue->pdu);
1478err_crypto:
1479 if (queue->hdr_digest || queue->data_digest)
1480 nvme_tcp_free_crypto(queue);
1481err_sock:
1482 sock_release(queue->sock);
1483 queue->sock = NULL;
1484 return ret;
1485}
1486
1487static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1488{
1489 struct socket *sock = queue->sock;
1490
1491 write_lock_bh(&sock->sk->sk_callback_lock);
1492 sock->sk->sk_user_data = NULL;
1493 sock->sk->sk_data_ready = queue->data_ready;
1494 sock->sk->sk_state_change = queue->state_change;
1495 sock->sk->sk_write_space = queue->write_space;
1496 write_unlock_bh(&sock->sk->sk_callback_lock);
1497}
1498
1499static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1500{
1501 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1502 nvme_tcp_restore_sock_calls(queue);
1503 cancel_work_sync(&queue->io_work);
1504}
1505
1506static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1507{
1508 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1509 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1510
1511 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1512 return;
1513
1514 __nvme_tcp_stop_queue(queue);
1515}
1516
1517static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1518{
1519 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1520 int ret;
1521
1522 if (idx)
Sagi Grimberg26c68222018-12-14 11:06:08 -08001523 ret = nvmf_connect_io_queue(nctrl, idx, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001524 else
1525 ret = nvmf_connect_admin_queue(nctrl);
1526
1527 if (!ret) {
1528 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1529 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001530 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1531 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001532 dev_err(nctrl->device,
1533 "failed to connect queue: %d ret=%d\n", idx, ret);
1534 }
1535 return ret;
1536}
1537
1538static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1539 bool admin)
1540{
1541 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1542 struct blk_mq_tag_set *set;
1543 int ret;
1544
1545 if (admin) {
1546 set = &ctrl->admin_tag_set;
1547 memset(set, 0, sizeof(*set));
1548 set->ops = &nvme_tcp_admin_mq_ops;
1549 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1550 set->reserved_tags = 2; /* connect + keep-alive */
Max Gurtovoy610c8232020-06-16 12:34:24 +03001551 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001552 set->flags = BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001553 set->cmd_size = sizeof(struct nvme_tcp_request);
1554 set->driver_data = ctrl;
1555 set->nr_hw_queues = 1;
1556 set->timeout = ADMIN_TIMEOUT;
1557 } else {
1558 set = &ctrl->tag_set;
1559 memset(set, 0, sizeof(*set));
1560 set->ops = &nvme_tcp_mq_ops;
1561 set->queue_depth = nctrl->sqsize + 1;
1562 set->reserved_tags = 1; /* fabric connect */
Max Gurtovoy610c8232020-06-16 12:34:24 +03001563 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001564 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001565 set->cmd_size = sizeof(struct nvme_tcp_request);
1566 set->driver_data = ctrl;
1567 set->nr_hw_queues = nctrl->queue_count - 1;
1568 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001569 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001570 }
1571
1572 ret = blk_mq_alloc_tag_set(set);
1573 if (ret)
1574 return ERR_PTR(ret);
1575
1576 return set;
1577}
1578
1579static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1580{
1581 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1582 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1583 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1584 }
1585
1586 nvme_tcp_free_queue(ctrl, 0);
1587}
1588
1589static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1590{
1591 int i;
1592
1593 for (i = 1; i < ctrl->queue_count; i++)
1594 nvme_tcp_free_queue(ctrl, i);
1595}
1596
1597static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1598{
1599 int i;
1600
1601 for (i = 1; i < ctrl->queue_count; i++)
1602 nvme_tcp_stop_queue(ctrl, i);
1603}
1604
1605static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1606{
1607 int i, ret = 0;
1608
1609 for (i = 1; i < ctrl->queue_count; i++) {
1610 ret = nvme_tcp_start_queue(ctrl, i);
1611 if (ret)
1612 goto out_stop_queues;
1613 }
1614
1615 return 0;
1616
1617out_stop_queues:
1618 for (i--; i >= 1; i--)
1619 nvme_tcp_stop_queue(ctrl, i);
1620 return ret;
1621}
1622
1623static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1624{
1625 int ret;
1626
1627 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1628 if (ret)
1629 return ret;
1630
1631 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1632 if (ret)
1633 goto out_free_queue;
1634
1635 return 0;
1636
1637out_free_queue:
1638 nvme_tcp_free_queue(ctrl, 0);
1639 return ret;
1640}
1641
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001642static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001643{
1644 int i, ret;
1645
1646 for (i = 1; i < ctrl->queue_count; i++) {
1647 ret = nvme_tcp_alloc_queue(ctrl, i,
1648 ctrl->sqsize + 1);
1649 if (ret)
1650 goto out_free_queues;
1651 }
1652
1653 return 0;
1654
1655out_free_queues:
1656 for (i--; i >= 1; i--)
1657 nvme_tcp_free_queue(ctrl, i);
1658
1659 return ret;
1660}
1661
1662static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1663{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001664 unsigned int nr_io_queues;
1665
1666 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1667 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001668 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001669
1670 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001671}
1672
Sagi Grimberg64861992019-05-28 22:49:05 -07001673static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1674 unsigned int nr_io_queues)
1675{
1676 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1677 struct nvmf_ctrl_options *opts = nctrl->opts;
1678
1679 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1680 /*
1681 * separate read/write queues
1682 * hand out dedicated default queues only after we have
1683 * sufficient read queues.
1684 */
1685 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1686 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1687 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1688 min(opts->nr_write_queues, nr_io_queues);
1689 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1690 } else {
1691 /*
1692 * shared read/write queues
1693 * either no write queues were requested, or we don't have
1694 * sufficient queue count to have dedicated default queues.
1695 */
1696 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1697 min(opts->nr_io_queues, nr_io_queues);
1698 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1699 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001700
1701 if (opts->nr_poll_queues && nr_io_queues) {
1702 /* map dedicated poll queues only if we have queues left */
1703 ctrl->io_queues[HCTX_TYPE_POLL] =
1704 min(opts->nr_poll_queues, nr_io_queues);
1705 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001706}
1707
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001708static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001709{
1710 unsigned int nr_io_queues;
1711 int ret;
1712
1713 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1714 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1715 if (ret)
1716 return ret;
1717
1718 ctrl->queue_count = nr_io_queues + 1;
1719 if (ctrl->queue_count < 2)
1720 return 0;
1721
1722 dev_info(ctrl->device,
1723 "creating %d I/O queues.\n", nr_io_queues);
1724
Sagi Grimberg64861992019-05-28 22:49:05 -07001725 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1726
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001727 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001728}
1729
1730static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1731{
1732 nvme_tcp_stop_io_queues(ctrl);
1733 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001734 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001735 blk_mq_free_tag_set(ctrl->tagset);
1736 }
1737 nvme_tcp_free_io_queues(ctrl);
1738}
1739
1740static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1741{
1742 int ret;
1743
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001744 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001745 if (ret)
1746 return ret;
1747
1748 if (new) {
1749 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1750 if (IS_ERR(ctrl->tagset)) {
1751 ret = PTR_ERR(ctrl->tagset);
1752 goto out_free_io_queues;
1753 }
1754
Sagi Grimberge85037a2018-12-31 23:58:30 -08001755 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1756 if (IS_ERR(ctrl->connect_q)) {
1757 ret = PTR_ERR(ctrl->connect_q);
1758 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001759 }
1760 } else {
1761 blk_mq_update_nr_hw_queues(ctrl->tagset,
1762 ctrl->queue_count - 1);
1763 }
1764
1765 ret = nvme_tcp_start_io_queues(ctrl);
1766 if (ret)
1767 goto out_cleanup_connect_q;
1768
1769 return 0;
1770
1771out_cleanup_connect_q:
Sagi Grimberge85037a2018-12-31 23:58:30 -08001772 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001773 blk_cleanup_queue(ctrl->connect_q);
1774out_free_tag_set:
1775 if (new)
1776 blk_mq_free_tag_set(ctrl->tagset);
1777out_free_io_queues:
1778 nvme_tcp_free_io_queues(ctrl);
1779 return ret;
1780}
1781
1782static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1783{
1784 nvme_tcp_stop_queue(ctrl, 0);
1785 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001786 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001787 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001788 blk_mq_free_tag_set(ctrl->admin_tagset);
1789 }
1790 nvme_tcp_free_admin_queue(ctrl);
1791}
1792
1793static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1794{
1795 int error;
1796
1797 error = nvme_tcp_alloc_admin_queue(ctrl);
1798 if (error)
1799 return error;
1800
1801 if (new) {
1802 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1803 if (IS_ERR(ctrl->admin_tagset)) {
1804 error = PTR_ERR(ctrl->admin_tagset);
1805 goto out_free_queue;
1806 }
1807
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001808 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1809 if (IS_ERR(ctrl->fabrics_q)) {
1810 error = PTR_ERR(ctrl->fabrics_q);
1811 goto out_free_tagset;
1812 }
1813
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001814 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1815 if (IS_ERR(ctrl->admin_q)) {
1816 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001817 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001818 }
1819 }
1820
1821 error = nvme_tcp_start_queue(ctrl, 0);
1822 if (error)
1823 goto out_cleanup_queue;
1824
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001825 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001826 if (error)
1827 goto out_stop_queue;
1828
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001829 blk_mq_unquiesce_queue(ctrl->admin_q);
1830
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001831 error = nvme_init_identify(ctrl);
1832 if (error)
1833 goto out_stop_queue;
1834
1835 return 0;
1836
1837out_stop_queue:
1838 nvme_tcp_stop_queue(ctrl, 0);
1839out_cleanup_queue:
1840 if (new)
1841 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001842out_cleanup_fabrics_q:
1843 if (new)
1844 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001845out_free_tagset:
1846 if (new)
1847 blk_mq_free_tag_set(ctrl->admin_tagset);
1848out_free_queue:
1849 nvme_tcp_free_admin_queue(ctrl);
1850 return error;
1851}
1852
1853static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1854 bool remove)
1855{
1856 blk_mq_quiesce_queue(ctrl->admin_q);
1857 nvme_tcp_stop_queue(ctrl, 0);
Ming Lei622b8b62019-07-24 11:48:42 +08001858 if (ctrl->admin_tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001859 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1860 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001861 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1862 }
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001863 if (remove)
1864 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001865 nvme_tcp_destroy_admin_queue(ctrl, remove);
1866}
1867
1868static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1869 bool remove)
1870{
1871 if (ctrl->queue_count <= 1)
1872 return;
1873 nvme_stop_queues(ctrl);
1874 nvme_tcp_stop_io_queues(ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001875 if (ctrl->tagset) {
Sagi Grimberg7a425892019-04-24 11:53:17 -07001876 blk_mq_tagset_busy_iter(ctrl->tagset,
1877 nvme_cancel_request, ctrl);
Ming Lei622b8b62019-07-24 11:48:42 +08001878 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1879 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001880 if (remove)
1881 nvme_start_queues(ctrl);
1882 nvme_tcp_destroy_io_queues(ctrl, remove);
1883}
1884
1885static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1886{
1887 /* If we are resetting/deleting then do nothing */
1888 if (ctrl->state != NVME_CTRL_CONNECTING) {
1889 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1890 ctrl->state == NVME_CTRL_LIVE);
1891 return;
1892 }
1893
1894 if (nvmf_should_reconnect(ctrl)) {
1895 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1896 ctrl->opts->reconnect_delay);
1897 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1898 ctrl->opts->reconnect_delay * HZ);
1899 } else {
1900 dev_info(ctrl->device, "Removing controller...\n");
1901 nvme_delete_ctrl(ctrl);
1902 }
1903}
1904
1905static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1906{
1907 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001908 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001909
1910 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1911 if (ret)
1912 return ret;
1913
1914 if (ctrl->icdoff) {
1915 dev_err(ctrl->device, "icdoff is not supported!\n");
1916 goto destroy_admin;
1917 }
1918
1919 if (opts->queue_size > ctrl->sqsize + 1)
1920 dev_warn(ctrl->device,
1921 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1922 opts->queue_size, ctrl->sqsize + 1);
1923
1924 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1925 dev_warn(ctrl->device,
1926 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1927 ctrl->sqsize + 1, ctrl->maxcmd);
1928 ctrl->sqsize = ctrl->maxcmd - 1;
1929 }
1930
1931 if (ctrl->queue_count > 1) {
1932 ret = nvme_tcp_configure_io_queues(ctrl, new);
1933 if (ret)
1934 goto destroy_admin;
1935 }
1936
1937 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001938 /*
1939 * state change failure is ok if we're in DELETING state,
1940 * unless we're during creation of a new controller to
1941 * avoid races with teardown flow.
1942 */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001943 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02001944 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001945 ret = -EINVAL;
1946 goto destroy_io;
1947 }
1948
1949 nvme_start_ctrl(ctrl);
1950 return 0;
1951
1952destroy_io:
1953 if (ctrl->queue_count > 1)
1954 nvme_tcp_destroy_io_queues(ctrl, new);
1955destroy_admin:
1956 nvme_tcp_stop_queue(ctrl, 0);
1957 nvme_tcp_destroy_admin_queue(ctrl, new);
1958 return ret;
1959}
1960
1961static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1962{
1963 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1964 struct nvme_tcp_ctrl, connect_work);
1965 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1966
1967 ++ctrl->nr_reconnects;
1968
1969 if (nvme_tcp_setup_ctrl(ctrl, false))
1970 goto requeue;
1971
Colin Ian King56a77d22018-12-14 11:42:43 +00001972 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001973 ctrl->nr_reconnects);
1974
1975 ctrl->nr_reconnects = 0;
1976
1977 return;
1978
1979requeue:
1980 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1981 ctrl->nr_reconnects);
1982 nvme_tcp_reconnect_or_remove(ctrl);
1983}
1984
1985static void nvme_tcp_error_recovery_work(struct work_struct *work)
1986{
1987 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1988 struct nvme_tcp_ctrl, err_work);
1989 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1990
1991 nvme_stop_keep_alive(ctrl);
1992 nvme_tcp_teardown_io_queues(ctrl, false);
1993 /* unquiesce to fail fast pending requests */
1994 nvme_start_queues(ctrl);
1995 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001996 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001997
1998 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1999 /* state change failure is ok if we're in DELETING state */
2000 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2001 return;
2002 }
2003
2004 nvme_tcp_reconnect_or_remove(ctrl);
2005}
2006
2007static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2008{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002009 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2010 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2011
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002012 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002013 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002014 if (shutdown)
2015 nvme_shutdown_ctrl(ctrl);
2016 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002017 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002018 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2019}
2020
2021static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2022{
2023 nvme_tcp_teardown_ctrl(ctrl, true);
2024}
2025
2026static void nvme_reset_ctrl_work(struct work_struct *work)
2027{
2028 struct nvme_ctrl *ctrl =
2029 container_of(work, struct nvme_ctrl, reset_work);
2030
2031 nvme_stop_ctrl(ctrl);
2032 nvme_tcp_teardown_ctrl(ctrl, false);
2033
2034 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2035 /* state change failure is ok if we're in DELETING state */
2036 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
2037 return;
2038 }
2039
2040 if (nvme_tcp_setup_ctrl(ctrl, false))
2041 goto out_fail;
2042
2043 return;
2044
2045out_fail:
2046 ++ctrl->nr_reconnects;
2047 nvme_tcp_reconnect_or_remove(ctrl);
2048}
2049
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002050static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2051{
2052 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2053
2054 if (list_empty(&ctrl->list))
2055 goto free_ctrl;
2056
2057 mutex_lock(&nvme_tcp_ctrl_mutex);
2058 list_del(&ctrl->list);
2059 mutex_unlock(&nvme_tcp_ctrl_mutex);
2060
2061 nvmf_free_options(nctrl->opts);
2062free_ctrl:
2063 kfree(ctrl->queues);
2064 kfree(ctrl);
2065}
2066
2067static void nvme_tcp_set_sg_null(struct nvme_command *c)
2068{
2069 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2070
2071 sg->addr = 0;
2072 sg->length = 0;
2073 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2074 NVME_SGL_FMT_TRANSPORT_A;
2075}
2076
2077static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2078 struct nvme_command *c, u32 data_len)
2079{
2080 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2081
2082 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2083 sg->length = cpu_to_le32(data_len);
2084 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2085}
2086
2087static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2088 u32 data_len)
2089{
2090 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2091
2092 sg->addr = 0;
2093 sg->length = cpu_to_le32(data_len);
2094 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2095 NVME_SGL_FMT_TRANSPORT_A;
2096}
2097
2098static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2099{
2100 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2101 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2102 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2103 struct nvme_command *cmd = &pdu->cmd;
2104 u8 hdgst = nvme_tcp_hdgst_len(queue);
2105
2106 memset(pdu, 0, sizeof(*pdu));
2107 pdu->hdr.type = nvme_tcp_cmd;
2108 if (queue->hdr_digest)
2109 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2110 pdu->hdr.hlen = sizeof(*pdu);
2111 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2112
2113 cmd->common.opcode = nvme_admin_async_event;
2114 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2115 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2116 nvme_tcp_set_sg_null(cmd);
2117
2118 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2119 ctrl->async_req.offset = 0;
2120 ctrl->async_req.curr_bio = NULL;
2121 ctrl->async_req.data_len = 0;
2122
Sagi Grimberg86f03482020-06-18 17:30:23 -07002123 nvme_tcp_queue_request(&ctrl->async_req, true, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002124}
2125
2126static enum blk_eh_timer_return
2127nvme_tcp_timeout(struct request *rq, bool reserved)
2128{
2129 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2130 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2131 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2132
Keith Busch92b98e82019-09-05 08:09:33 -06002133 /*
2134 * Restart the timer if a controller reset is already scheduled. Any
2135 * timed out commands would be handled before entering the connecting
2136 * state.
2137 */
2138 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
2139 return BLK_EH_RESET_TIMER;
2140
Sagi Grimberg39d57752019-01-08 01:01:30 -08002141 dev_warn(ctrl->ctrl.device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002142 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002143 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002144
2145 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002146 /*
2147 * Teardown immediately if controller times out while starting
2148 * or we are already started error recovery. all outstanding
2149 * requests are completed on shutdown, so we return BLK_EH_DONE.
2150 */
2151 flush_work(&ctrl->err_work);
2152 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2153 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002154 return BLK_EH_DONE;
2155 }
2156
Sagi Grimberg39d57752019-01-08 01:01:30 -08002157 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002158 nvme_tcp_error_recovery(&ctrl->ctrl);
2159
2160 return BLK_EH_RESET_TIMER;
2161}
2162
2163static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2164 struct request *rq)
2165{
2166 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2167 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2168 struct nvme_command *c = &pdu->cmd;
2169
2170 c->common.flags |= NVME_CMD_SGL_METABUF;
2171
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002172 if (!blk_rq_nr_phys_segments(rq))
2173 nvme_tcp_set_sg_null(c);
2174 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002175 req->data_len <= nvme_tcp_inline_data_size(queue))
2176 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2177 else
2178 nvme_tcp_set_sg_host_data(c, req->data_len);
2179
2180 return 0;
2181}
2182
2183static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2184 struct request *rq)
2185{
2186 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2187 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2188 struct nvme_tcp_queue *queue = req->queue;
2189 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2190 blk_status_t ret;
2191
2192 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2193 if (ret)
2194 return ret;
2195
2196 req->state = NVME_TCP_SEND_CMD_PDU;
2197 req->offset = 0;
2198 req->data_sent = 0;
2199 req->pdu_len = 0;
2200 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002201 req->data_len = blk_rq_nr_phys_segments(rq) ?
2202 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002203 req->curr_bio = rq->bio;
2204
2205 if (rq_data_dir(rq) == WRITE &&
2206 req->data_len <= nvme_tcp_inline_data_size(queue))
2207 req->pdu_len = req->data_len;
2208 else if (req->curr_bio)
2209 nvme_tcp_init_iter(req, READ);
2210
2211 pdu->hdr.type = nvme_tcp_cmd;
2212 pdu->hdr.flags = 0;
2213 if (queue->hdr_digest)
2214 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2215 if (queue->data_digest && req->pdu_len) {
2216 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2217 ddgst = nvme_tcp_ddgst_len(queue);
2218 }
2219 pdu->hdr.hlen = sizeof(*pdu);
2220 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2221 pdu->hdr.plen =
2222 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2223
2224 ret = nvme_tcp_map_data(queue, rq);
2225 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002226 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002227 dev_err(queue->ctrl->ctrl.device,
2228 "Failed to map data (%d)\n", ret);
2229 return ret;
2230 }
2231
2232 return 0;
2233}
2234
Sagi Grimberg86f03482020-06-18 17:30:23 -07002235static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2236{
2237 struct nvme_tcp_queue *queue = hctx->driver_data;
2238
2239 if (!llist_empty(&queue->req_list))
2240 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2241}
2242
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002243static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2244 const struct blk_mq_queue_data *bd)
2245{
2246 struct nvme_ns *ns = hctx->queue->queuedata;
2247 struct nvme_tcp_queue *queue = hctx->driver_data;
2248 struct request *rq = bd->rq;
2249 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2250 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2251 blk_status_t ret;
2252
2253 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2254 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2255
2256 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2257 if (unlikely(ret))
2258 return ret;
2259
2260 blk_mq_start_request(rq);
2261
Sagi Grimberg86f03482020-06-18 17:30:23 -07002262 nvme_tcp_queue_request(req, true, bd->last);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002263
2264 return BLK_STS_OK;
2265}
2266
Sagi Grimberg873946f2018-12-11 23:38:57 -08002267static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2268{
2269 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002270 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002271
Sagi Grimberg64861992019-05-28 22:49:05 -07002272 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002273 /* separate read/write queues */
2274 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002275 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2276 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2277 set->map[HCTX_TYPE_READ].nr_queues =
2278 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002279 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002280 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002281 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002282 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002283 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002284 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2285 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2286 set->map[HCTX_TYPE_READ].nr_queues =
2287 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002288 set->map[HCTX_TYPE_READ].queue_offset = 0;
2289 }
2290 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2291 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002292
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002293 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2294 /* map dedicated poll queues only if we have queues left */
2295 set->map[HCTX_TYPE_POLL].nr_queues =
2296 ctrl->io_queues[HCTX_TYPE_POLL];
2297 set->map[HCTX_TYPE_POLL].queue_offset =
2298 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2299 ctrl->io_queues[HCTX_TYPE_READ];
2300 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2301 }
2302
Sagi Grimberg64861992019-05-28 22:49:05 -07002303 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002304 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002305 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002306 ctrl->io_queues[HCTX_TYPE_READ],
2307 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002308
Sagi Grimberg873946f2018-12-11 23:38:57 -08002309 return 0;
2310}
2311
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002312static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2313{
2314 struct nvme_tcp_queue *queue = hctx->driver_data;
2315 struct sock *sk = queue->sock->sk;
2316
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002317 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2318 return 0;
2319
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002320 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
Eric Dumazet3f926af2019-10-23 22:44:51 -07002321 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002322 sk_busy_loop(sk, true);
2323 nvme_tcp_try_recv(queue);
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002324 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002325 return queue->nr_cqe;
2326}
2327
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002328static const struct blk_mq_ops nvme_tcp_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002329 .queue_rq = nvme_tcp_queue_rq,
Sagi Grimberg86f03482020-06-18 17:30:23 -07002330 .commit_rqs = nvme_tcp_commit_rqs,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002331 .complete = nvme_complete_rq,
2332 .init_request = nvme_tcp_init_request,
2333 .exit_request = nvme_tcp_exit_request,
2334 .init_hctx = nvme_tcp_init_hctx,
2335 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002336 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002337 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002338};
2339
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002340static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002341 .queue_rq = nvme_tcp_queue_rq,
2342 .complete = nvme_complete_rq,
2343 .init_request = nvme_tcp_init_request,
2344 .exit_request = nvme_tcp_exit_request,
2345 .init_hctx = nvme_tcp_init_admin_hctx,
2346 .timeout = nvme_tcp_timeout,
2347};
2348
2349static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2350 .name = "tcp",
2351 .module = THIS_MODULE,
2352 .flags = NVME_F_FABRICS,
2353 .reg_read32 = nvmf_reg_read32,
2354 .reg_read64 = nvmf_reg_read64,
2355 .reg_write32 = nvmf_reg_write32,
2356 .free_ctrl = nvme_tcp_free_ctrl,
2357 .submit_async_event = nvme_tcp_submit_async_event,
2358 .delete_ctrl = nvme_tcp_delete_ctrl,
2359 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002360};
2361
2362static bool
2363nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2364{
2365 struct nvme_tcp_ctrl *ctrl;
2366 bool found = false;
2367
2368 mutex_lock(&nvme_tcp_ctrl_mutex);
2369 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2370 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2371 if (found)
2372 break;
2373 }
2374 mutex_unlock(&nvme_tcp_ctrl_mutex);
2375
2376 return found;
2377}
2378
2379static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2380 struct nvmf_ctrl_options *opts)
2381{
2382 struct nvme_tcp_ctrl *ctrl;
2383 int ret;
2384
2385 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2386 if (!ctrl)
2387 return ERR_PTR(-ENOMEM);
2388
2389 INIT_LIST_HEAD(&ctrl->list);
2390 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002391 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2392 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002393 ctrl->ctrl.sqsize = opts->queue_size - 1;
2394 ctrl->ctrl.kato = opts->kato;
2395
2396 INIT_DELAYED_WORK(&ctrl->connect_work,
2397 nvme_tcp_reconnect_ctrl_work);
2398 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2399 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2400
2401 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2402 opts->trsvcid =
2403 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2404 if (!opts->trsvcid) {
2405 ret = -ENOMEM;
2406 goto out_free_ctrl;
2407 }
2408 opts->mask |= NVMF_OPT_TRSVCID;
2409 }
2410
2411 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2412 opts->traddr, opts->trsvcid, &ctrl->addr);
2413 if (ret) {
2414 pr_err("malformed address passed: %s:%s\n",
2415 opts->traddr, opts->trsvcid);
2416 goto out_free_ctrl;
2417 }
2418
2419 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2420 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2421 opts->host_traddr, NULL, &ctrl->src_addr);
2422 if (ret) {
2423 pr_err("malformed src address passed: %s\n",
2424 opts->host_traddr);
2425 goto out_free_ctrl;
2426 }
2427 }
2428
2429 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2430 ret = -EALREADY;
2431 goto out_free_ctrl;
2432 }
2433
Sagi Grimberg873946f2018-12-11 23:38:57 -08002434 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002435 GFP_KERNEL);
2436 if (!ctrl->queues) {
2437 ret = -ENOMEM;
2438 goto out_free_ctrl;
2439 }
2440
2441 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2442 if (ret)
2443 goto out_kfree_queues;
2444
2445 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2446 WARN_ON_ONCE(1);
2447 ret = -EINTR;
2448 goto out_uninit_ctrl;
2449 }
2450
2451 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2452 if (ret)
2453 goto out_uninit_ctrl;
2454
2455 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2456 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2457
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002458 mutex_lock(&nvme_tcp_ctrl_mutex);
2459 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2460 mutex_unlock(&nvme_tcp_ctrl_mutex);
2461
2462 return &ctrl->ctrl;
2463
2464out_uninit_ctrl:
2465 nvme_uninit_ctrl(&ctrl->ctrl);
2466 nvme_put_ctrl(&ctrl->ctrl);
2467 if (ret > 0)
2468 ret = -EIO;
2469 return ERR_PTR(ret);
2470out_kfree_queues:
2471 kfree(ctrl->queues);
2472out_free_ctrl:
2473 kfree(ctrl);
2474 return ERR_PTR(ret);
2475}
2476
2477static struct nvmf_transport_ops nvme_tcp_transport = {
2478 .name = "tcp",
2479 .module = THIS_MODULE,
2480 .required_opts = NVMF_OPT_TRADDR,
2481 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2482 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002483 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002484 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2485 NVMF_OPT_TOS,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002486 .create_ctrl = nvme_tcp_create_ctrl,
2487};
2488
2489static int __init nvme_tcp_init_module(void)
2490{
2491 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2492 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2493 if (!nvme_tcp_wq)
2494 return -ENOMEM;
2495
2496 nvmf_register_transport(&nvme_tcp_transport);
2497 return 0;
2498}
2499
2500static void __exit nvme_tcp_cleanup_module(void)
2501{
2502 struct nvme_tcp_ctrl *ctrl;
2503
2504 nvmf_unregister_transport(&nvme_tcp_transport);
2505
2506 mutex_lock(&nvme_tcp_ctrl_mutex);
2507 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2508 nvme_delete_ctrl(&ctrl->ctrl);
2509 mutex_unlock(&nvme_tcp_ctrl_mutex);
2510 flush_workqueue(nvme_delete_wq);
2511
2512 destroy_workqueue(nvme_tcp_wq);
2513}
2514
2515module_init(nvme_tcp_init_module);
2516module_exit(nvme_tcp_cleanup_module);
2517
2518MODULE_LICENSE("GPL v2");