blob: 891a36d02e7c7191471b66285d2c58da4ac53dd7 [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
Daniel Wagner1ba2e502021-08-30 15:36:26 +020048 __le16 status;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080049 struct list_head entry;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070050 struct llist_node lentry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010051 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080052
53 struct bio *curr_bio;
54 struct iov_iter iter;
55
56 /* send state */
57 size_t offset;
58 size_t data_sent;
59 enum nvme_tcp_send_state state;
60};
61
62enum nvme_tcp_queue_flags {
63 NVME_TCP_Q_ALLOCATED = 0,
64 NVME_TCP_Q_LIVE = 1,
Sagi Grimberg72e5d752020-05-01 14:25:44 -070065 NVME_TCP_Q_POLLING = 2,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080066};
67
68enum nvme_tcp_recv_state {
69 NVME_TCP_RECV_PDU = 0,
70 NVME_TCP_RECV_DATA,
71 NVME_TCP_RECV_DDGST,
72};
73
74struct nvme_tcp_ctrl;
75struct nvme_tcp_queue {
76 struct socket *sock;
77 struct work_struct io_work;
78 int io_cpu;
79
Chao Leng9ebbfe42021-01-14 17:09:26 +080080 struct mutex queue_lock;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -070081 struct mutex send_mutex;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070082 struct llist_head req_list;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080083 struct list_head send_list;
Sagi Grimberg122e5b92020-06-18 17:30:24 -070084 bool more_requests;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080085
86 /* recv state */
87 void *pdu;
88 int pdu_remaining;
89 int pdu_offset;
90 size_t data_remaining;
91 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070092 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080093
94 /* send state */
95 struct nvme_tcp_request *request;
96
97 int queue_size;
98 size_t cmnd_capsule_len;
99 struct nvme_tcp_ctrl *ctrl;
100 unsigned long flags;
101 bool rd_enabled;
102
103 bool hdr_digest;
104 bool data_digest;
105 struct ahash_request *rcv_hash;
106 struct ahash_request *snd_hash;
107 __le32 exp_ddgst;
108 __le32 recv_ddgst;
109
110 struct page_frag_cache pf_cache;
111
112 void (*state_change)(struct sock *);
113 void (*data_ready)(struct sock *);
114 void (*write_space)(struct sock *);
115};
116
117struct nvme_tcp_ctrl {
118 /* read only in the hot path */
119 struct nvme_tcp_queue *queues;
120 struct blk_mq_tag_set tag_set;
121
122 /* other member variables */
123 struct list_head list;
124 struct blk_mq_tag_set admin_tag_set;
125 struct sockaddr_storage addr;
126 struct sockaddr_storage src_addr;
127 struct nvme_ctrl ctrl;
128
129 struct work_struct err_work;
130 struct delayed_work connect_work;
131 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700132 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800133};
134
135static LIST_HEAD(nvme_tcp_ctrl_list);
136static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
137static struct workqueue_struct *nvme_tcp_wq;
Rikard Falkeborn6acbd962020-05-29 00:25:07 +0200138static const struct blk_mq_ops nvme_tcp_mq_ops;
139static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700140static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800141
142static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
143{
144 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
145}
146
147static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
148{
149 return queue - queue->ctrl->queues;
150}
151
152static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
153{
154 u32 queue_idx = nvme_tcp_queue_id(queue);
155
156 if (queue_idx == 0)
157 return queue->ctrl->admin_tag_set.tags[queue_idx];
158 return queue->ctrl->tag_set.tags[queue_idx - 1];
159}
160
161static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
162{
163 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
164}
165
166static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
167{
168 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
169}
170
171static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
172{
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
174}
175
176static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
177{
178 return req == &req->queue->ctrl->async_req;
179}
180
181static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
182{
183 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800184
185 if (unlikely(nvme_tcp_async_req(req)))
186 return false; /* async events don't have a request */
187
188 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800189
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700190 return rq_data_dir(rq) == WRITE && req->data_len &&
191 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800192}
193
194static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
195{
196 return req->iter.bvec->bv_page;
197}
198
199static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
200{
201 return req->iter.bvec->bv_offset + req->iter.iov_offset;
202}
203
204static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
205{
Sagi Grimbergca1ff672021-01-13 13:56:57 -0800206 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800207 req->pdu_len - req->pdu_sent);
208}
209
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800210static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
211{
212 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
213 req->pdu_len - req->pdu_sent : 0;
214}
215
216static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
217 int len)
218{
219 return nvme_tcp_pdu_data_left(req) <= len;
220}
221
222static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
223 unsigned int dir)
224{
225 struct request *rq = blk_mq_rq_from_pdu(req);
226 struct bio_vec *vec;
227 unsigned int size;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800228 int nr_bvec;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800229 size_t offset;
230
231 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
232 vec = &rq->special_vec;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800233 nr_bvec = 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800234 size = blk_rq_payload_bytes(rq);
235 offset = 0;
236 } else {
237 struct bio *bio = req->curr_bio;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800238 struct bvec_iter bi;
239 struct bio_vec bv;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800240
241 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800242 nr_bvec = 0;
243 bio_for_each_bvec(bv, bio, bi) {
244 nr_bvec++;
245 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800246 size = bio->bi_iter.bi_size;
247 offset = bio->bi_iter.bi_bvec_done;
248 }
249
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800250 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800251 req->iter.iov_offset = offset;
252}
253
254static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
255 int len)
256{
257 req->data_sent += len;
258 req->pdu_sent += len;
259 iov_iter_advance(&req->iter, len);
260 if (!iov_iter_count(&req->iter) &&
261 req->data_sent < req->data_len) {
262 req->curr_bio = req->curr_bio->bi_next;
263 nvme_tcp_init_iter(req, WRITE);
264 }
265}
266
Sagi Grimberg5c11f7d2020-12-21 00:03:39 -0800267static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
268{
269 int ret;
270
271 /* drain the send queue as much as we can... */
272 do {
273 ret = nvme_tcp_try_send(queue);
274 } while (ret > 0);
275}
276
Keith Busch70f437f2021-09-09 08:54:52 -0700277static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
278{
279 return !list_empty(&queue->send_list) ||
280 !llist_empty(&queue->req_list) || queue->more_requests;
281}
282
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700283static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
Sagi Grimberg86f03482020-06-18 17:30:23 -0700284 bool sync, bool last)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800285{
286 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700287 bool empty;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800288
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700289 empty = llist_add(&req->lentry, &queue->req_list) &&
290 list_empty(&queue->send_list) && !queue->request;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800291
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700292 /*
293 * if we're the first on the send_list and we can try to send
294 * directly, otherwise queue io_work. Also, only do that if we
295 * are on the same cpu, so we don't introduce contention.
296 */
Sagi Grimbergbb833372021-03-15 13:53:47 -0700297 if (queue->io_cpu == raw_smp_processor_id() &&
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700298 sync && empty && mutex_trylock(&queue->send_mutex)) {
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700299 queue->more_requests = !last;
Sagi Grimberg5c11f7d2020-12-21 00:03:39 -0800300 nvme_tcp_send_all(queue);
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700301 queue->more_requests = false;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700302 mutex_unlock(&queue->send_mutex);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700303 }
Keith Busch70f437f2021-09-09 08:54:52 -0700304
305 if (last && nvme_tcp_queue_more(queue))
306 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800307}
308
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700309static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
310{
311 struct nvme_tcp_request *req;
312 struct llist_node *node;
313
314 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
315 req = llist_entry(node, struct nvme_tcp_request, lentry);
316 list_add(&req->entry, &queue->send_list);
317 }
318}
319
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800320static inline struct nvme_tcp_request *
321nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
322{
323 struct nvme_tcp_request *req;
324
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800325 req = list_first_entry_or_null(&queue->send_list,
326 struct nvme_tcp_request, entry);
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700327 if (!req) {
328 nvme_tcp_process_req_list(queue);
329 req = list_first_entry_or_null(&queue->send_list,
330 struct nvme_tcp_request, entry);
331 if (unlikely(!req))
332 return NULL;
333 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800334
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700335 list_del(&req->entry);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800336 return req;
337}
338
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100339static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
340 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800341{
342 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
343 crypto_ahash_final(hash);
344}
345
346static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
347 struct page *page, off_t off, size_t len)
348{
349 struct scatterlist sg;
350
351 sg_init_marker(&sg, 1);
352 sg_set_page(&sg, page, len, off);
353 ahash_request_set_crypt(hash, &sg, NULL, len);
354 crypto_ahash_update(hash);
355}
356
357static inline void nvme_tcp_hdgst(struct ahash_request *hash,
358 void *pdu, size_t len)
359{
360 struct scatterlist sg;
361
362 sg_init_one(&sg, pdu, len);
363 ahash_request_set_crypt(hash, &sg, pdu + len, len);
364 crypto_ahash_digest(hash);
365}
366
367static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
368 void *pdu, size_t pdu_len)
369{
370 struct nvme_tcp_hdr *hdr = pdu;
371 __le32 recv_digest;
372 __le32 exp_digest;
373
374 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
375 dev_err(queue->ctrl->ctrl.device,
376 "queue %d: header digest flag is cleared\n",
377 nvme_tcp_queue_id(queue));
378 return -EPROTO;
379 }
380
381 recv_digest = *(__le32 *)(pdu + hdr->hlen);
382 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
383 exp_digest = *(__le32 *)(pdu + hdr->hlen);
384 if (recv_digest != exp_digest) {
385 dev_err(queue->ctrl->ctrl.device,
386 "header digest error: recv %#x expected %#x\n",
387 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
388 return -EIO;
389 }
390
391 return 0;
392}
393
394static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
395{
396 struct nvme_tcp_hdr *hdr = pdu;
397 u8 digest_len = nvme_tcp_hdgst_len(queue);
398 u32 len;
399
400 len = le32_to_cpu(hdr->plen) - hdr->hlen -
401 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
402
403 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
404 dev_err(queue->ctrl->ctrl.device,
405 "queue %d: data digest flag is cleared\n",
406 nvme_tcp_queue_id(queue));
407 return -EPROTO;
408 }
409 crypto_ahash_init(queue->rcv_hash);
410
411 return 0;
412}
413
414static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
415 struct request *rq, unsigned int hctx_idx)
416{
417 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
418
419 page_frag_free(req->pdu);
420}
421
422static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
423 struct request *rq, unsigned int hctx_idx,
424 unsigned int numa_node)
425{
426 struct nvme_tcp_ctrl *ctrl = set->driver_data;
427 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Keith Buschf4b9e6c2021-03-17 13:37:03 -0700428 struct nvme_tcp_cmd_pdu *pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800429 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
430 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
431 u8 hdgst = nvme_tcp_hdgst_len(queue);
432
433 req->pdu = page_frag_alloc(&queue->pf_cache,
434 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
435 GFP_KERNEL | __GFP_ZERO);
436 if (!req->pdu)
437 return -ENOMEM;
438
Keith Buschf4b9e6c2021-03-17 13:37:03 -0700439 pdu = req->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800440 req->queue = queue;
441 nvme_req(rq)->ctrl = &ctrl->ctrl;
Keith Buschf4b9e6c2021-03-17 13:37:03 -0700442 nvme_req(rq)->cmd = &pdu->cmd;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800443
444 return 0;
445}
446
447static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
448 unsigned int hctx_idx)
449{
450 struct nvme_tcp_ctrl *ctrl = data;
451 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
452
453 hctx->driver_data = queue;
454 return 0;
455}
456
457static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
458 unsigned int hctx_idx)
459{
460 struct nvme_tcp_ctrl *ctrl = data;
461 struct nvme_tcp_queue *queue = &ctrl->queues[0];
462
463 hctx->driver_data = queue;
464 return 0;
465}
466
467static enum nvme_tcp_recv_state
468nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
469{
470 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
471 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
472 NVME_TCP_RECV_DATA;
473}
474
475static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
476{
477 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
478 nvme_tcp_hdgst_len(queue);
479 queue->pdu_offset = 0;
480 queue->data_remaining = -1;
481 queue->ddgst_remaining = 0;
482}
483
484static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
485{
486 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
487 return;
488
Sagi Grimberg236187c2020-07-28 13:16:36 -0700489 dev_warn(ctrl->device, "starting error recovery\n");
Nigel Kirkland97b25122020-02-10 16:01:45 -0800490 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800491}
492
493static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
494 struct nvme_completion *cqe)
495{
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200496 struct nvme_tcp_request *req;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800497 struct request *rq;
498
Sagi Grimberge7006de2021-06-16 14:19:36 -0700499 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800500 if (!rq) {
501 dev_err(queue->ctrl->ctrl.device,
Sagi Grimberge7006de2021-06-16 14:19:36 -0700502 "got bad cqe.command_id %#x on queue %d\n",
503 cqe->command_id, nvme_tcp_queue_id(queue));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800504 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
505 return -EINVAL;
506 }
507
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200508 req = blk_mq_rq_to_pdu(rq);
509 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
510 req->status = cqe->status;
511
512 if (!nvme_try_complete_req(rq, req->status, cqe->result))
Christoph Hellwigff029452020-06-11 08:44:52 +0200513 nvme_complete_rq(rq);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700514 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800515
516 return 0;
517}
518
519static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
520 struct nvme_tcp_data_pdu *pdu)
521{
522 struct request *rq;
523
Sagi Grimberge7006de2021-06-16 14:19:36 -0700524 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800525 if (!rq) {
526 dev_err(queue->ctrl->ctrl.device,
Sagi Grimberge7006de2021-06-16 14:19:36 -0700527 "got bad c2hdata.command_id %#x on queue %d\n",
528 pdu->command_id, nvme_tcp_queue_id(queue));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800529 return -ENOENT;
530 }
531
532 if (!blk_rq_payload_bytes(rq)) {
533 dev_err(queue->ctrl->ctrl.device,
534 "queue %d tag %#x unexpected data\n",
535 nvme_tcp_queue_id(queue), rq->tag);
536 return -EIO;
537 }
538
539 queue->data_remaining = le32_to_cpu(pdu->data_length);
540
Sagi Grimberg602d6742019-03-13 18:55:10 +0100541 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
542 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
543 dev_err(queue->ctrl->ctrl.device,
544 "queue %d tag %#x SUCCESS set but not last PDU\n",
545 nvme_tcp_queue_id(queue), rq->tag);
546 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
547 return -EPROTO;
548 }
549
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800550 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800551}
552
553static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
554 struct nvme_tcp_rsp_pdu *pdu)
555{
556 struct nvme_completion *cqe = &pdu->cqe;
557 int ret = 0;
558
559 /*
560 * AEN requests are special as they don't time out and can
561 * survive any kind of queue freeze and often don't respond to
562 * aborts. We don't even bother to allocate a struct request
563 * for them but rather special case them here.
564 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300565 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
566 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800567 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
568 &cqe->result);
569 else
570 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
571
572 return ret;
573}
574
Varun Prakash1d3ef9c32021-11-23 16:28:56 +0530575static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800576 struct nvme_tcp_r2t_pdu *pdu)
577{
578 struct nvme_tcp_data_pdu *data = req->pdu;
579 struct nvme_tcp_queue *queue = req->queue;
580 struct request *rq = blk_mq_rq_from_pdu(req);
581 u8 hdgst = nvme_tcp_hdgst_len(queue);
582 u8 ddgst = nvme_tcp_ddgst_len(queue);
583
Varun Prakash1d3ef9c32021-11-23 16:28:56 +0530584 req->state = NVME_TCP_SEND_H2C_PDU;
585 req->offset = 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800586 req->pdu_len = le32_to_cpu(pdu->r2t_length);
587 req->pdu_sent = 0;
588
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800589 memset(data, 0, sizeof(*data));
590 data->hdr.type = nvme_tcp_h2c_data;
591 data->hdr.flags = NVME_TCP_F_DATA_LAST;
592 if (queue->hdr_digest)
593 data->hdr.flags |= NVME_TCP_F_HDGST;
594 if (queue->data_digest)
595 data->hdr.flags |= NVME_TCP_F_DDGST;
596 data->hdr.hlen = sizeof(*data);
597 data->hdr.pdo = data->hdr.hlen + hdgst;
598 data->hdr.plen =
599 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
600 data->ttag = pdu->ttag;
Sagi Grimberge7006de2021-06-16 14:19:36 -0700601 data->command_id = nvme_cid(rq);
Sagi Grimberge371af02021-09-14 18:38:55 +0300602 data->data_offset = pdu->r2t_offset;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800603 data->data_length = cpu_to_le32(req->pdu_len);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800604}
605
606static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
607 struct nvme_tcp_r2t_pdu *pdu)
608{
609 struct nvme_tcp_request *req;
610 struct request *rq;
Varun Prakash1d3ef9c32021-11-23 16:28:56 +0530611 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800612
Sagi Grimberge7006de2021-06-16 14:19:36 -0700613 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800614 if (!rq) {
615 dev_err(queue->ctrl->ctrl.device,
Sagi Grimberge7006de2021-06-16 14:19:36 -0700616 "got bad r2t.command_id %#x on queue %d\n",
617 pdu->command_id, nvme_tcp_queue_id(queue));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800618 return -ENOENT;
619 }
620 req = blk_mq_rq_to_pdu(rq);
621
Varun Prakash1d3ef9c32021-11-23 16:28:56 +0530622 if (unlikely(!r2t_length)) {
623 dev_err(queue->ctrl->ctrl.device,
624 "req %d r2t len is %u, probably a bug...\n",
625 rq->tag, r2t_length);
626 return -EPROTO;
627 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800628
Varun Prakash1d3ef9c32021-11-23 16:28:56 +0530629 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
630 dev_err(queue->ctrl->ctrl.device,
631 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
632 rq->tag, r2t_length, req->data_len, req->data_sent);
633 return -EPROTO;
634 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800635
Varun Prakash1d3ef9c32021-11-23 16:28:56 +0530636 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
637 dev_err(queue->ctrl->ctrl.device,
638 "req %d unexpected r2t offset %u (expected %zu)\n",
639 rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
640 return -EPROTO;
641 }
642
643 nvme_tcp_setup_h2c_data_pdu(req, pdu);
Sagi Grimberg86f03482020-06-18 17:30:23 -0700644 nvme_tcp_queue_request(req, false, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800645
646 return 0;
647}
648
649static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
650 unsigned int *offset, size_t *len)
651{
652 struct nvme_tcp_hdr *hdr;
653 char *pdu = queue->pdu;
654 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
655 int ret;
656
657 ret = skb_copy_bits(skb, *offset,
658 &pdu[queue->pdu_offset], rcv_len);
659 if (unlikely(ret))
660 return ret;
661
662 queue->pdu_remaining -= rcv_len;
663 queue->pdu_offset += rcv_len;
664 *offset += rcv_len;
665 *len -= rcv_len;
666 if (queue->pdu_remaining)
667 return 0;
668
669 hdr = queue->pdu;
670 if (queue->hdr_digest) {
671 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
672 if (unlikely(ret))
673 return ret;
674 }
675
676
677 if (queue->data_digest) {
678 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
679 if (unlikely(ret))
680 return ret;
681 }
682
683 switch (hdr->type) {
684 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700685 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800686 case nvme_tcp_rsp:
687 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700688 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800689 case nvme_tcp_r2t:
690 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700691 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800692 default:
693 dev_err(queue->ctrl->ctrl.device,
694 "unsupported pdu type (%d)\n", hdr->type);
695 return -EINVAL;
696 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800697}
698
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100699static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100700{
701 union nvme_result res = {};
702
Christoph Hellwig2eb81a32020-08-18 09:11:29 +0200703 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
Christoph Hellwigff029452020-06-11 08:44:52 +0200704 nvme_complete_rq(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100705}
706
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800707static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
708 unsigned int *offset, size_t *len)
709{
710 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3b01a9d2021-06-16 14:19:35 -0700711 struct request *rq =
Sagi Grimberge7006de2021-06-16 14:19:36 -0700712 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
Sagi Grimberg3b01a9d2021-06-16 14:19:35 -0700713 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800714
715 while (true) {
716 int recv_len, ret;
717
718 recv_len = min_t(size_t, *len, queue->data_remaining);
719 if (!recv_len)
720 break;
721
722 if (!iov_iter_count(&req->iter)) {
723 req->curr_bio = req->curr_bio->bi_next;
724
725 /*
726 * If we don`t have any bios it means that controller
727 * sent more data than we requested, hence error
728 */
729 if (!req->curr_bio) {
730 dev_err(queue->ctrl->ctrl.device,
731 "queue %d no space in request %#x",
732 nvme_tcp_queue_id(queue), rq->tag);
733 nvme_tcp_init_recv_ctx(queue);
734 return -EIO;
735 }
736 nvme_tcp_init_iter(req, READ);
737 }
738
739 /* we can read only from what is left in this bio */
740 recv_len = min_t(size_t, recv_len,
741 iov_iter_count(&req->iter));
742
743 if (queue->data_digest)
744 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
745 &req->iter, recv_len, queue->rcv_hash);
746 else
747 ret = skb_copy_datagram_iter(skb, *offset,
748 &req->iter, recv_len);
749 if (ret) {
750 dev_err(queue->ctrl->ctrl.device,
751 "queue %d failed to copy request %#x data",
752 nvme_tcp_queue_id(queue), rq->tag);
753 return ret;
754 }
755
756 *len -= recv_len;
757 *offset += recv_len;
758 queue->data_remaining -= recv_len;
759 }
760
761 if (!queue->data_remaining) {
762 if (queue->data_digest) {
763 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
764 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
765 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700766 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200767 nvme_tcp_end_request(rq,
768 le16_to_cpu(req->status));
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700769 queue->nr_cqe++;
770 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800771 nvme_tcp_init_recv_ctx(queue);
772 }
773 }
774
775 return 0;
776}
777
778static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
779 struct sk_buff *skb, unsigned int *offset, size_t *len)
780{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100781 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800782 char *ddgst = (char *)&queue->recv_ddgst;
783 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
784 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
785 int ret;
786
787 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
788 if (unlikely(ret))
789 return ret;
790
791 queue->ddgst_remaining -= recv_len;
792 *offset += recv_len;
793 *len -= recv_len;
794 if (queue->ddgst_remaining)
795 return 0;
796
797 if (queue->recv_ddgst != queue->exp_ddgst) {
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200798 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
799 pdu->command_id);
800 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
801
802 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
803
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800804 dev_err(queue->ctrl->ctrl.device,
805 "data digest error: recv %#x expected %#x\n",
806 le32_to_cpu(queue->recv_ddgst),
807 le32_to_cpu(queue->exp_ddgst));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800808 }
809
Sagi Grimberg602d6742019-03-13 18:55:10 +0100810 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberge7006de2021-06-16 14:19:36 -0700811 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
812 pdu->command_id);
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200813 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100814
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200815 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700816 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100817 }
818
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800819 nvme_tcp_init_recv_ctx(queue);
820 return 0;
821}
822
823static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
824 unsigned int offset, size_t len)
825{
826 struct nvme_tcp_queue *queue = desc->arg.data;
827 size_t consumed = len;
828 int result;
829
830 while (len) {
831 switch (nvme_tcp_recv_state(queue)) {
832 case NVME_TCP_RECV_PDU:
833 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
834 break;
835 case NVME_TCP_RECV_DATA:
836 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
837 break;
838 case NVME_TCP_RECV_DDGST:
839 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
840 break;
841 default:
842 result = -EFAULT;
843 }
844 if (result) {
845 dev_err(queue->ctrl->ctrl.device,
846 "receive failed: %d\n", result);
847 queue->rd_enabled = false;
848 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
849 return result;
850 }
851 }
852
853 return consumed;
854}
855
856static void nvme_tcp_data_ready(struct sock *sk)
857{
858 struct nvme_tcp_queue *queue;
859
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700860 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800861 queue = sk->sk_user_data;
Sagi Grimberg72e5d752020-05-01 14:25:44 -0700862 if (likely(queue && queue->rd_enabled) &&
863 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800864 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700865 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800866}
867
868static void nvme_tcp_write_space(struct sock *sk)
869{
870 struct nvme_tcp_queue *queue;
871
872 read_lock_bh(&sk->sk_callback_lock);
873 queue = sk->sk_user_data;
874 if (likely(queue && sk_stream_is_writeable(sk))) {
875 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
876 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
877 }
878 read_unlock_bh(&sk->sk_callback_lock);
879}
880
881static void nvme_tcp_state_change(struct sock *sk)
882{
883 struct nvme_tcp_queue *queue;
884
Sagi Grimberg8b73b452021-03-21 00:08:48 -0700885 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800886 queue = sk->sk_user_data;
887 if (!queue)
888 goto done;
889
890 switch (sk->sk_state) {
891 case TCP_CLOSE:
892 case TCP_CLOSE_WAIT:
893 case TCP_LAST_ACK:
894 case TCP_FIN_WAIT1:
895 case TCP_FIN_WAIT2:
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800896 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
897 break;
898 default:
899 dev_info(queue->ctrl->ctrl.device,
900 "queue %d socket state %d\n",
901 nvme_tcp_queue_id(queue), sk->sk_state);
902 }
903
904 queue->state_change(sk);
905done:
Sagi Grimberg8b73b452021-03-21 00:08:48 -0700906 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800907}
908
909static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
910{
911 queue->request = NULL;
912}
913
914static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
915{
Sagi Grimberg63573802022-02-07 00:40:13 +0200916 if (nvme_tcp_async_req(req)) {
917 union nvme_result res = {};
918
919 nvme_complete_async_event(&req->queue->ctrl->ctrl,
920 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
921 } else {
922 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
923 NVME_SC_HOST_PATH_ERROR);
924 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800925}
926
927static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
928{
929 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimberg25e1f672021-10-24 10:43:31 +0300930 int req_data_len = req->data_len;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800931
932 while (true) {
933 struct page *page = nvme_tcp_req_cur_page(req);
934 size_t offset = nvme_tcp_req_cur_offset(req);
935 size_t len = nvme_tcp_req_cur_length(req);
936 bool last = nvme_tcp_pdu_last_send(req, len);
Sagi Grimberg25e1f672021-10-24 10:43:31 +0300937 int req_data_sent = req->data_sent;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800938 int ret, flags = MSG_DONTWAIT;
939
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700940 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800941 flags |= MSG_EOR;
942 else
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700943 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800944
Coly Li7d4194a2020-10-02 16:27:30 +0800945 if (sendpage_ok(page)) {
946 ret = kernel_sendpage(queue->sock, page, offset, len,
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200947 flags);
948 } else {
Coly Li7d4194a2020-10-02 16:27:30 +0800949 ret = sock_no_sendpage(queue->sock, page, offset, len,
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200950 flags);
951 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800952 if (ret <= 0)
953 return ret;
954
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800955 if (queue->data_digest)
956 nvme_tcp_ddgst_update(queue->snd_hash, page,
957 offset, ret);
958
Sagi Grimberge371af02021-09-14 18:38:55 +0300959 /*
960 * update the request iterator except for the last payload send
961 * in the request where we don't want to modify it as we may
962 * compete with the RX path completing the request.
963 */
Sagi Grimberg25e1f672021-10-24 10:43:31 +0300964 if (req_data_sent + ret < req_data_len)
Sagi Grimberge371af02021-09-14 18:38:55 +0300965 nvme_tcp_advance_req(req, ret);
966
967 /* fully successful last send in current PDU */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800968 if (last && ret == len) {
969 if (queue->data_digest) {
970 nvme_tcp_ddgst_final(queue->snd_hash,
971 &req->ddgst);
972 req->state = NVME_TCP_SEND_DDGST;
973 req->offset = 0;
974 } else {
975 nvme_tcp_done_send_req(queue);
976 }
977 return 1;
978 }
979 }
980 return -EAGAIN;
981}
982
983static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
984{
985 struct nvme_tcp_queue *queue = req->queue;
986 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
987 bool inline_data = nvme_tcp_has_inline_data(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800988 u8 hdgst = nvme_tcp_hdgst_len(queue);
989 int len = sizeof(*pdu) + hdgst - req->offset;
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700990 int flags = MSG_DONTWAIT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800991 int ret;
992
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700993 if (inline_data || nvme_tcp_queue_more(queue))
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700994 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
995 else
996 flags |= MSG_EOR;
997
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800998 if (queue->hdr_digest && !req->offset)
999 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1000
1001 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1002 offset_in_page(pdu) + req->offset, len, flags);
1003 if (unlikely(ret <= 0))
1004 return ret;
1005
1006 len -= ret;
1007 if (!len) {
1008 if (inline_data) {
1009 req->state = NVME_TCP_SEND_DATA;
1010 if (queue->data_digest)
1011 crypto_ahash_init(queue->snd_hash);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001012 } else {
1013 nvme_tcp_done_send_req(queue);
1014 }
1015 return 1;
1016 }
1017 req->offset += ret;
1018
1019 return -EAGAIN;
1020}
1021
1022static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1023{
1024 struct nvme_tcp_queue *queue = req->queue;
1025 struct nvme_tcp_data_pdu *pdu = req->pdu;
1026 u8 hdgst = nvme_tcp_hdgst_len(queue);
1027 int len = sizeof(*pdu) - req->offset + hdgst;
1028 int ret;
1029
1030 if (queue->hdr_digest && !req->offset)
1031 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1032
1033 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1034 offset_in_page(pdu) + req->offset, len,
Sagi Grimberg5bb052d2020-05-04 22:20:01 -07001035 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001036 if (unlikely(ret <= 0))
1037 return ret;
1038
1039 len -= ret;
1040 if (!len) {
1041 req->state = NVME_TCP_SEND_DATA;
1042 if (queue->data_digest)
1043 crypto_ahash_init(queue->snd_hash);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001044 return 1;
1045 }
1046 req->offset += ret;
1047
1048 return -EAGAIN;
1049}
1050
1051static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1052{
1053 struct nvme_tcp_queue *queue = req->queue;
Varun Prakashce7723e2021-10-26 19:01:55 +05301054 size_t offset = req->offset;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001055 int ret;
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001056 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001057 struct kvec iov = {
Varun Prakashd89b9f32021-10-25 22:47:30 +05301058 .iov_base = (u8 *)&req->ddgst + req->offset,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001059 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1060 };
1061
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001062 if (nvme_tcp_queue_more(queue))
1063 msg.msg_flags |= MSG_MORE;
1064 else
1065 msg.msg_flags |= MSG_EOR;
1066
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001067 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1068 if (unlikely(ret <= 0))
1069 return ret;
1070
Varun Prakashce7723e2021-10-26 19:01:55 +05301071 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001072 nvme_tcp_done_send_req(queue);
1073 return 1;
1074 }
1075
1076 req->offset += ret;
1077 return -EAGAIN;
1078}
1079
1080static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1081{
1082 struct nvme_tcp_request *req;
1083 int ret = 1;
1084
1085 if (!queue->request) {
1086 queue->request = nvme_tcp_fetch_request(queue);
1087 if (!queue->request)
1088 return 0;
1089 }
1090 req = queue->request;
1091
1092 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1093 ret = nvme_tcp_try_send_cmd_pdu(req);
1094 if (ret <= 0)
1095 goto done;
1096 if (!nvme_tcp_has_inline_data(req))
1097 return ret;
1098 }
1099
1100 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1101 ret = nvme_tcp_try_send_data_pdu(req);
1102 if (ret <= 0)
1103 goto done;
1104 }
1105
1106 if (req->state == NVME_TCP_SEND_DATA) {
1107 ret = nvme_tcp_try_send_data(req);
1108 if (ret <= 0)
1109 goto done;
1110 }
1111
1112 if (req->state == NVME_TCP_SEND_DDGST)
1113 ret = nvme_tcp_try_send_ddgst(req);
1114done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001115 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001116 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001117 } else if (ret < 0) {
1118 dev_err(queue->ctrl->ctrl.device,
1119 "failed to send request %d\n", ret);
1120 if (ret != -EPIPE && ret != -ECONNRESET)
1121 nvme_tcp_fail_request(queue->request);
1122 nvme_tcp_done_send_req(queue);
1123 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001124 return ret;
1125}
1126
1127static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1128{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301129 struct socket *sock = queue->sock;
1130 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001131 read_descriptor_t rd_desc;
1132 int consumed;
1133
1134 rd_desc.arg.data = queue;
1135 rd_desc.count = 1;
1136 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001137 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301138 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001139 release_sock(sk);
1140 return consumed;
1141}
1142
1143static void nvme_tcp_io_work(struct work_struct *w)
1144{
1145 struct nvme_tcp_queue *queue =
1146 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001147 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001148
1149 do {
1150 bool pending = false;
1151 int result;
1152
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001153 if (mutex_trylock(&queue->send_mutex)) {
1154 result = nvme_tcp_try_send(queue);
1155 mutex_unlock(&queue->send_mutex);
1156 if (result > 0)
1157 pending = true;
1158 else if (unlikely(result < 0))
1159 break;
Keith Busch70f437f2021-09-09 08:54:52 -07001160 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001161
1162 result = nvme_tcp_try_recv(queue);
1163 if (result > 0)
1164 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001165 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001166 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001167
1168 if (!pending)
1169 return;
1170
Wunderlich, Markddef2952019-09-18 23:36:37 +00001171 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001172
1173 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1174}
1175
1176static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1177{
1178 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1179
1180 ahash_request_free(queue->rcv_hash);
1181 ahash_request_free(queue->snd_hash);
1182 crypto_free_ahash(tfm);
1183}
1184
1185static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1186{
1187 struct crypto_ahash *tfm;
1188
1189 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1190 if (IS_ERR(tfm))
1191 return PTR_ERR(tfm);
1192
1193 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1194 if (!queue->snd_hash)
1195 goto free_tfm;
1196 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1197
1198 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1199 if (!queue->rcv_hash)
1200 goto free_snd_hash;
1201 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1202
1203 return 0;
1204free_snd_hash:
1205 ahash_request_free(queue->snd_hash);
1206free_tfm:
1207 crypto_free_ahash(tfm);
1208 return -ENOMEM;
1209}
1210
1211static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1212{
1213 struct nvme_tcp_request *async = &ctrl->async_req;
1214
1215 page_frag_free(async->pdu);
1216}
1217
1218static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1219{
1220 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1221 struct nvme_tcp_request *async = &ctrl->async_req;
1222 u8 hdgst = nvme_tcp_hdgst_len(queue);
1223
1224 async->pdu = page_frag_alloc(&queue->pf_cache,
1225 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1226 GFP_KERNEL | __GFP_ZERO);
1227 if (!async->pdu)
1228 return -ENOMEM;
1229
1230 async->queue = &ctrl->queues[0];
1231 return 0;
1232}
1233
1234static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1235{
Maurizio Lombardia5053c92021-11-03 09:18:17 +01001236 struct page *page;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001237 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1238 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1239
1240 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1241 return;
1242
1243 if (queue->hdr_digest || queue->data_digest)
1244 nvme_tcp_free_crypto(queue);
1245
Maurizio Lombardia5053c92021-11-03 09:18:17 +01001246 if (queue->pf_cache.va) {
1247 page = virt_to_head_page(queue->pf_cache.va);
1248 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1249 queue->pf_cache.va = NULL;
1250 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001251 sock_release(queue->sock);
1252 kfree(queue->pdu);
Keith Buschd48f92c2021-08-06 08:41:43 -07001253 mutex_destroy(&queue->send_mutex);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001254 mutex_destroy(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001255}
1256
1257static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1258{
1259 struct nvme_tcp_icreq_pdu *icreq;
1260 struct nvme_tcp_icresp_pdu *icresp;
1261 struct msghdr msg = {};
1262 struct kvec iov;
1263 bool ctrl_hdgst, ctrl_ddgst;
1264 int ret;
1265
1266 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1267 if (!icreq)
1268 return -ENOMEM;
1269
1270 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1271 if (!icresp) {
1272 ret = -ENOMEM;
1273 goto free_icreq;
1274 }
1275
1276 icreq->hdr.type = nvme_tcp_icreq;
1277 icreq->hdr.hlen = sizeof(*icreq);
1278 icreq->hdr.pdo = 0;
1279 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1280 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1281 icreq->maxr2t = 0; /* single inflight r2t supported */
1282 icreq->hpda = 0; /* no alignment constraint */
1283 if (queue->hdr_digest)
1284 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1285 if (queue->data_digest)
1286 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1287
1288 iov.iov_base = icreq;
1289 iov.iov_len = sizeof(*icreq);
1290 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1291 if (ret < 0)
1292 goto free_icresp;
1293
1294 memset(&msg, 0, sizeof(msg));
1295 iov.iov_base = icresp;
1296 iov.iov_len = sizeof(*icresp);
1297 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1298 iov.iov_len, msg.msg_flags);
1299 if (ret < 0)
1300 goto free_icresp;
1301
1302 ret = -EINVAL;
1303 if (icresp->hdr.type != nvme_tcp_icresp) {
1304 pr_err("queue %d: bad type returned %d\n",
1305 nvme_tcp_queue_id(queue), icresp->hdr.type);
1306 goto free_icresp;
1307 }
1308
1309 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1310 pr_err("queue %d: bad pdu length returned %d\n",
1311 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1312 goto free_icresp;
1313 }
1314
1315 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1316 pr_err("queue %d: bad pfv returned %d\n",
1317 nvme_tcp_queue_id(queue), icresp->pfv);
1318 goto free_icresp;
1319 }
1320
1321 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1322 if ((queue->data_digest && !ctrl_ddgst) ||
1323 (!queue->data_digest && ctrl_ddgst)) {
1324 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1325 nvme_tcp_queue_id(queue),
1326 queue->data_digest ? "enabled" : "disabled",
1327 ctrl_ddgst ? "enabled" : "disabled");
1328 goto free_icresp;
1329 }
1330
1331 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1332 if ((queue->hdr_digest && !ctrl_hdgst) ||
1333 (!queue->hdr_digest && ctrl_hdgst)) {
1334 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1335 nvme_tcp_queue_id(queue),
1336 queue->hdr_digest ? "enabled" : "disabled",
1337 ctrl_hdgst ? "enabled" : "disabled");
1338 goto free_icresp;
1339 }
1340
1341 if (icresp->cpda != 0) {
1342 pr_err("queue %d: unsupported cpda returned %d\n",
1343 nvme_tcp_queue_id(queue), icresp->cpda);
1344 goto free_icresp;
1345 }
1346
1347 ret = 0;
1348free_icresp:
1349 kfree(icresp);
1350free_icreq:
1351 kfree(icreq);
1352 return ret;
1353}
1354
Sagi Grimberg40510a62020-02-25 15:53:09 -08001355static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1356{
1357 return nvme_tcp_queue_id(queue) == 0;
1358}
1359
1360static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1361{
1362 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1363 int qid = nvme_tcp_queue_id(queue);
1364
1365 return !nvme_tcp_admin_queue(queue) &&
1366 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1367}
1368
1369static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1370{
1371 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1372 int qid = nvme_tcp_queue_id(queue);
1373
1374 return !nvme_tcp_admin_queue(queue) &&
1375 !nvme_tcp_default_queue(queue) &&
1376 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1377 ctrl->io_queues[HCTX_TYPE_READ];
1378}
1379
1380static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1381{
1382 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1383 int qid = nvme_tcp_queue_id(queue);
1384
1385 return !nvme_tcp_admin_queue(queue) &&
1386 !nvme_tcp_default_queue(queue) &&
1387 !nvme_tcp_read_queue(queue) &&
1388 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1389 ctrl->io_queues[HCTX_TYPE_READ] +
1390 ctrl->io_queues[HCTX_TYPE_POLL];
1391}
1392
1393static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1394{
1395 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1396 int qid = nvme_tcp_queue_id(queue);
1397 int n = 0;
1398
1399 if (nvme_tcp_default_queue(queue))
1400 n = qid - 1;
1401 else if (nvme_tcp_read_queue(queue))
1402 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1403 else if (nvme_tcp_poll_queue(queue))
1404 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1405 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1406 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1407}
1408
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001409static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1410 int qid, size_t queue_size)
1411{
1412 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1413 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001414 int ret, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001415
Chao Leng9ebbfe42021-01-14 17:09:26 +08001416 mutex_init(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001417 queue->ctrl = ctrl;
Sagi Grimberg15ec9282020-06-18 17:30:22 -07001418 init_llist_head(&queue->req_list);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001419 INIT_LIST_HEAD(&queue->send_list);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001420 mutex_init(&queue->send_mutex);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001421 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1422 queue->queue_size = queue_size;
1423
1424 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001425 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001426 else
1427 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1428 NVME_TCP_ADMIN_CCSZ;
1429
1430 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1431 IPPROTO_TCP, &queue->sock);
1432 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001433 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001434 "failed to create socket: %d\n", ret);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001435 goto err_destroy_mutex;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001436 }
1437
1438 /* Single syn retry */
Christoph Hellwig557eadf2020-05-28 07:12:21 +02001439 tcp_sock_set_syncnt(queue->sock->sk, 1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001440
1441 /* Set TCP no delay */
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001442 tcp_sock_set_nodelay(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001443
1444 /*
1445 * Cleanup whatever is sitting in the TCP transmit queue on socket
1446 * close. This is done to prevent stale data from being sent should
1447 * the network connection be restored before TCP times out.
1448 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001449 sock_no_linger(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001450
Christoph Hellwig6e434962020-05-28 07:12:11 +02001451 if (so_priority > 0)
1452 sock_set_priority(queue->sock->sk, so_priority);
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001453
Israel Rukshinbb139852019-08-18 12:08:54 +03001454 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001455 if (nctrl->opts->tos >= 0)
1456 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
Israel Rukshinbb139852019-08-18 12:08:54 +03001457
Sagi Grimbergadc99fd2020-07-23 16:42:26 -07001458 /* Set 10 seconds timeout for icresp recvmsg */
1459 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1460
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001461 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001462 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001463 queue->request = NULL;
1464 queue->data_remaining = 0;
1465 queue->ddgst_remaining = 0;
1466 queue->pdu_remaining = 0;
1467 queue->pdu_offset = 0;
1468 sk_set_memalloc(queue->sock->sk);
1469
Israel Rukshin9924b032019-08-18 12:08:53 +03001470 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001471 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1472 sizeof(ctrl->src_addr));
1473 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001474 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001475 "failed to bind queue %d socket %d\n",
1476 qid, ret);
1477 goto err_sock;
1478 }
1479 }
1480
Martin Belanger3ede8f72021-05-20 15:09:34 -04001481 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1482 char *iface = nctrl->opts->host_iface;
1483 sockptr_t optval = KERNEL_SOCKPTR(iface);
1484
1485 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1486 optval, strlen(iface));
1487 if (ret) {
1488 dev_err(nctrl->device,
1489 "failed to bind to interface %s queue %d err %d\n",
1490 iface, qid, ret);
1491 goto err_sock;
1492 }
1493 }
1494
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001495 queue->hdr_digest = nctrl->opts->hdr_digest;
1496 queue->data_digest = nctrl->opts->data_digest;
1497 if (queue->hdr_digest || queue->data_digest) {
1498 ret = nvme_tcp_alloc_crypto(queue);
1499 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001500 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001501 "failed to allocate queue %d crypto\n", qid);
1502 goto err_sock;
1503 }
1504 }
1505
1506 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1507 nvme_tcp_hdgst_len(queue);
1508 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1509 if (!queue->pdu) {
1510 ret = -ENOMEM;
1511 goto err_crypto;
1512 }
1513
Israel Rukshin9924b032019-08-18 12:08:53 +03001514 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001515 nvme_tcp_queue_id(queue));
1516
1517 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1518 sizeof(ctrl->addr), 0);
1519 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001520 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001521 "failed to connect socket: %d\n", ret);
1522 goto err_rcv_pdu;
1523 }
1524
1525 ret = nvme_tcp_init_connection(queue);
1526 if (ret)
1527 goto err_init_connect;
1528
1529 queue->rd_enabled = true;
1530 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1531 nvme_tcp_init_recv_ctx(queue);
1532
1533 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1534 queue->sock->sk->sk_user_data = queue;
1535 queue->state_change = queue->sock->sk->sk_state_change;
1536 queue->data_ready = queue->sock->sk->sk_data_ready;
1537 queue->write_space = queue->sock->sk->sk_write_space;
1538 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1539 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1540 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001541#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001542 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001543#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001544 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1545
1546 return 0;
1547
1548err_init_connect:
1549 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1550err_rcv_pdu:
1551 kfree(queue->pdu);
1552err_crypto:
1553 if (queue->hdr_digest || queue->data_digest)
1554 nvme_tcp_free_crypto(queue);
1555err_sock:
1556 sock_release(queue->sock);
1557 queue->sock = NULL;
Chao Leng9ebbfe42021-01-14 17:09:26 +08001558err_destroy_mutex:
Keith Buschd48f92c2021-08-06 08:41:43 -07001559 mutex_destroy(&queue->send_mutex);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001560 mutex_destroy(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001561 return ret;
1562}
1563
1564static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1565{
1566 struct socket *sock = queue->sock;
1567
1568 write_lock_bh(&sock->sk->sk_callback_lock);
1569 sock->sk->sk_user_data = NULL;
1570 sock->sk->sk_data_ready = queue->data_ready;
1571 sock->sk->sk_state_change = queue->state_change;
1572 sock->sk->sk_write_space = queue->write_space;
1573 write_unlock_bh(&sock->sk->sk_callback_lock);
1574}
1575
1576static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1577{
1578 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1579 nvme_tcp_restore_sock_calls(queue);
1580 cancel_work_sync(&queue->io_work);
1581}
1582
1583static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1584{
1585 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1586 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1587
Chao Leng9ebbfe42021-01-14 17:09:26 +08001588 mutex_lock(&queue->queue_lock);
1589 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1590 __nvme_tcp_stop_queue(queue);
1591 mutex_unlock(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001592}
1593
1594static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1595{
1596 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1597 int ret;
1598
1599 if (idx)
Keith Buschbe42a332021-06-10 14:44:35 -07001600 ret = nvmf_connect_io_queue(nctrl, idx);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001601 else
1602 ret = nvmf_connect_admin_queue(nctrl);
1603
1604 if (!ret) {
1605 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1606 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001607 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1608 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001609 dev_err(nctrl->device,
1610 "failed to connect queue: %d ret=%d\n", idx, ret);
1611 }
1612 return ret;
1613}
1614
1615static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1616 bool admin)
1617{
1618 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1619 struct blk_mq_tag_set *set;
1620 int ret;
1621
1622 if (admin) {
1623 set = &ctrl->admin_tag_set;
1624 memset(set, 0, sizeof(*set));
1625 set->ops = &nvme_tcp_admin_mq_ops;
1626 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
Christoph Hellwiged01fee2021-03-03 13:28:22 +01001627 set->reserved_tags = NVMF_RESERVED_TAGS;
Max Gurtovoy610c8232020-06-16 12:34:24 +03001628 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001629 set->flags = BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001630 set->cmd_size = sizeof(struct nvme_tcp_request);
1631 set->driver_data = ctrl;
1632 set->nr_hw_queues = 1;
Chaitanya Kulkarnidc96f932020-11-09 16:33:45 -08001633 set->timeout = NVME_ADMIN_TIMEOUT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001634 } else {
1635 set = &ctrl->tag_set;
1636 memset(set, 0, sizeof(*set));
1637 set->ops = &nvme_tcp_mq_ops;
1638 set->queue_depth = nctrl->sqsize + 1;
Christoph Hellwiged01fee2021-03-03 13:28:22 +01001639 set->reserved_tags = NVMF_RESERVED_TAGS;
Max Gurtovoy610c8232020-06-16 12:34:24 +03001640 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001641 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001642 set->cmd_size = sizeof(struct nvme_tcp_request);
1643 set->driver_data = ctrl;
1644 set->nr_hw_queues = nctrl->queue_count - 1;
1645 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001646 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001647 }
1648
1649 ret = blk_mq_alloc_tag_set(set);
1650 if (ret)
1651 return ERR_PTR(ret);
1652
1653 return set;
1654}
1655
1656static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1657{
1658 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
David Milburnceb1e082020-09-02 17:42:53 -05001659 cancel_work_sync(&ctrl->async_event_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001660 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1661 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1662 }
1663
1664 nvme_tcp_free_queue(ctrl, 0);
1665}
1666
1667static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1668{
1669 int i;
1670
1671 for (i = 1; i < ctrl->queue_count; i++)
1672 nvme_tcp_free_queue(ctrl, i);
1673}
1674
1675static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1676{
1677 int i;
1678
1679 for (i = 1; i < ctrl->queue_count; i++)
1680 nvme_tcp_stop_queue(ctrl, i);
1681}
1682
1683static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1684{
1685 int i, ret = 0;
1686
1687 for (i = 1; i < ctrl->queue_count; i++) {
1688 ret = nvme_tcp_start_queue(ctrl, i);
1689 if (ret)
1690 goto out_stop_queues;
1691 }
1692
1693 return 0;
1694
1695out_stop_queues:
1696 for (i--; i >= 1; i--)
1697 nvme_tcp_stop_queue(ctrl, i);
1698 return ret;
1699}
1700
1701static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1702{
1703 int ret;
1704
1705 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1706 if (ret)
1707 return ret;
1708
1709 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1710 if (ret)
1711 goto out_free_queue;
1712
1713 return 0;
1714
1715out_free_queue:
1716 nvme_tcp_free_queue(ctrl, 0);
1717 return ret;
1718}
1719
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001720static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001721{
1722 int i, ret;
1723
1724 for (i = 1; i < ctrl->queue_count; i++) {
1725 ret = nvme_tcp_alloc_queue(ctrl, i,
1726 ctrl->sqsize + 1);
1727 if (ret)
1728 goto out_free_queues;
1729 }
1730
1731 return 0;
1732
1733out_free_queues:
1734 for (i--; i >= 1; i--)
1735 nvme_tcp_free_queue(ctrl, i);
1736
1737 return ret;
1738}
1739
1740static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1741{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001742 unsigned int nr_io_queues;
1743
1744 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1745 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001746 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001747
1748 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001749}
1750
Sagi Grimberg64861992019-05-28 22:49:05 -07001751static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1752 unsigned int nr_io_queues)
1753{
1754 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1755 struct nvmf_ctrl_options *opts = nctrl->opts;
1756
1757 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1758 /*
1759 * separate read/write queues
1760 * hand out dedicated default queues only after we have
1761 * sufficient read queues.
1762 */
1763 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1764 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1765 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1766 min(opts->nr_write_queues, nr_io_queues);
1767 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1768 } else {
1769 /*
1770 * shared read/write queues
1771 * either no write queues were requested, or we don't have
1772 * sufficient queue count to have dedicated default queues.
1773 */
1774 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1775 min(opts->nr_io_queues, nr_io_queues);
1776 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1777 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001778
1779 if (opts->nr_poll_queues && nr_io_queues) {
1780 /* map dedicated poll queues only if we have queues left */
1781 ctrl->io_queues[HCTX_TYPE_POLL] =
1782 min(opts->nr_poll_queues, nr_io_queues);
1783 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001784}
1785
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001786static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001787{
1788 unsigned int nr_io_queues;
1789 int ret;
1790
1791 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1792 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1793 if (ret)
1794 return ret;
1795
Ruozhu Li664227f2021-08-07 11:50:23 +08001796 if (nr_io_queues == 0) {
Sagi Grimberg72f57242021-03-15 14:04:26 -07001797 dev_err(ctrl->device,
1798 "unable to set any I/O queues\n");
1799 return -ENOMEM;
1800 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001801
Ruozhu Li664227f2021-08-07 11:50:23 +08001802 ctrl->queue_count = nr_io_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001803 dev_info(ctrl->device,
1804 "creating %d I/O queues.\n", nr_io_queues);
1805
Sagi Grimberg64861992019-05-28 22:49:05 -07001806 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1807
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001808 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001809}
1810
1811static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1812{
1813 nvme_tcp_stop_io_queues(ctrl);
1814 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001815 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001816 blk_mq_free_tag_set(ctrl->tagset);
1817 }
1818 nvme_tcp_free_io_queues(ctrl);
1819}
1820
1821static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1822{
1823 int ret;
1824
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001825 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001826 if (ret)
1827 return ret;
1828
1829 if (new) {
1830 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1831 if (IS_ERR(ctrl->tagset)) {
1832 ret = PTR_ERR(ctrl->tagset);
1833 goto out_free_io_queues;
1834 }
1835
Sagi Grimberge85037a2018-12-31 23:58:30 -08001836 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1837 if (IS_ERR(ctrl->connect_q)) {
1838 ret = PTR_ERR(ctrl->connect_q);
1839 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001840 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001841 }
1842
1843 ret = nvme_tcp_start_io_queues(ctrl);
1844 if (ret)
1845 goto out_cleanup_connect_q;
1846
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001847 if (!new) {
1848 nvme_start_queues(ctrl);
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001849 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1850 /*
1851 * If we timed out waiting for freeze we are likely to
1852 * be stuck. Fail the controller initialization just
1853 * to be safe.
1854 */
1855 ret = -ENODEV;
1856 goto out_wait_freeze_timed_out;
1857 }
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001858 blk_mq_update_nr_hw_queues(ctrl->tagset,
1859 ctrl->queue_count - 1);
1860 nvme_unfreeze(ctrl);
1861 }
1862
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001863 return 0;
1864
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001865out_wait_freeze_timed_out:
1866 nvme_stop_queues(ctrl);
Chao Leng70a99572021-01-21 11:32:38 +08001867 nvme_sync_io_queues(ctrl);
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001868 nvme_tcp_stop_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001869out_cleanup_connect_q:
Chao Leng70a99572021-01-21 11:32:38 +08001870 nvme_cancel_tagset(ctrl);
Sagi Grimberge85037a2018-12-31 23:58:30 -08001871 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001872 blk_cleanup_queue(ctrl->connect_q);
1873out_free_tag_set:
1874 if (new)
1875 blk_mq_free_tag_set(ctrl->tagset);
1876out_free_io_queues:
1877 nvme_tcp_free_io_queues(ctrl);
1878 return ret;
1879}
1880
1881static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1882{
1883 nvme_tcp_stop_queue(ctrl, 0);
1884 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001885 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001886 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001887 blk_mq_free_tag_set(ctrl->admin_tagset);
1888 }
1889 nvme_tcp_free_admin_queue(ctrl);
1890}
1891
1892static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1893{
1894 int error;
1895
1896 error = nvme_tcp_alloc_admin_queue(ctrl);
1897 if (error)
1898 return error;
1899
1900 if (new) {
1901 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1902 if (IS_ERR(ctrl->admin_tagset)) {
1903 error = PTR_ERR(ctrl->admin_tagset);
1904 goto out_free_queue;
1905 }
1906
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001907 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1908 if (IS_ERR(ctrl->fabrics_q)) {
1909 error = PTR_ERR(ctrl->fabrics_q);
1910 goto out_free_tagset;
1911 }
1912
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001913 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1914 if (IS_ERR(ctrl->admin_q)) {
1915 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001916 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001917 }
1918 }
1919
1920 error = nvme_tcp_start_queue(ctrl, 0);
1921 if (error)
1922 goto out_cleanup_queue;
1923
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001924 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001925 if (error)
1926 goto out_stop_queue;
1927
Ming Lei6ca1d902021-10-14 16:17:06 +08001928 nvme_start_admin_queue(ctrl);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001929
Chaitanya Kulkarnif21c47692021-02-28 18:06:04 -08001930 error = nvme_init_ctrl_finish(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001931 if (error)
Chao Leng70a99572021-01-21 11:32:38 +08001932 goto out_quiesce_queue;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001933
1934 return 0;
1935
Chao Leng70a99572021-01-21 11:32:38 +08001936out_quiesce_queue:
Ming Lei6ca1d902021-10-14 16:17:06 +08001937 nvme_stop_admin_queue(ctrl);
Chao Leng70a99572021-01-21 11:32:38 +08001938 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001939out_stop_queue:
1940 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng70a99572021-01-21 11:32:38 +08001941 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001942out_cleanup_queue:
1943 if (new)
1944 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001945out_cleanup_fabrics_q:
1946 if (new)
1947 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001948out_free_tagset:
1949 if (new)
1950 blk_mq_free_tag_set(ctrl->admin_tagset);
1951out_free_queue:
1952 nvme_tcp_free_admin_queue(ctrl);
1953 return error;
1954}
1955
1956static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1957 bool remove)
1958{
Ming Lei6ca1d902021-10-14 16:17:06 +08001959 nvme_stop_admin_queue(ctrl);
Chao Lengd6f66212020-10-22 10:15:15 +08001960 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001961 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng563c8152021-01-21 11:32:40 +08001962 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001963 if (remove)
Ming Lei6ca1d902021-10-14 16:17:06 +08001964 nvme_start_admin_queue(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001965 nvme_tcp_destroy_admin_queue(ctrl, remove);
1966}
1967
1968static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1969 bool remove)
1970{
1971 if (ctrl->queue_count <= 1)
Chao Lengd6f66212020-10-22 10:15:15 +08001972 return;
Ming Lei6ca1d902021-10-14 16:17:06 +08001973 nvme_stop_admin_queue(ctrl);
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001974 nvme_start_freeze(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001975 nvme_stop_queues(ctrl);
Chao Lengd6f66212020-10-22 10:15:15 +08001976 nvme_sync_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001977 nvme_tcp_stop_io_queues(ctrl);
Chao Leng563c8152021-01-21 11:32:40 +08001978 nvme_cancel_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001979 if (remove)
1980 nvme_start_queues(ctrl);
1981 nvme_tcp_destroy_io_queues(ctrl, remove);
1982}
1983
1984static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1985{
1986 /* If we are resetting/deleting then do nothing */
1987 if (ctrl->state != NVME_CTRL_CONNECTING) {
1988 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1989 ctrl->state == NVME_CTRL_LIVE);
1990 return;
1991 }
1992
1993 if (nvmf_should_reconnect(ctrl)) {
1994 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1995 ctrl->opts->reconnect_delay);
1996 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1997 ctrl->opts->reconnect_delay * HZ);
1998 } else {
1999 dev_info(ctrl->device, "Removing controller...\n");
2000 nvme_delete_ctrl(ctrl);
2001 }
2002}
2003
2004static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2005{
2006 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01002007 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002008
2009 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2010 if (ret)
2011 return ret;
2012
2013 if (ctrl->icdoff) {
Dan Carpenter522af602021-06-05 15:48:16 +03002014 ret = -EOPNOTSUPP;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002015 dev_err(ctrl->device, "icdoff is not supported!\n");
2016 goto destroy_admin;
2017 }
2018
Chaitanya Kulkarni3b540642021-06-09 18:28:26 -07002019 if (!nvme_ctrl_sgl_supported(ctrl)) {
Dan Carpenter522af602021-06-05 15:48:16 +03002020 ret = -EOPNOTSUPP;
Max Gurtovoy73ffcef2021-03-30 23:01:19 +00002021 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2022 goto destroy_admin;
2023 }
2024
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002025 if (opts->queue_size > ctrl->sqsize + 1)
2026 dev_warn(ctrl->device,
2027 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2028 opts->queue_size, ctrl->sqsize + 1);
2029
2030 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2031 dev_warn(ctrl->device,
2032 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2033 ctrl->sqsize + 1, ctrl->maxcmd);
2034 ctrl->sqsize = ctrl->maxcmd - 1;
2035 }
2036
2037 if (ctrl->queue_count > 1) {
2038 ret = nvme_tcp_configure_io_queues(ctrl, new);
2039 if (ret)
2040 goto destroy_admin;
2041 }
2042
2043 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02002044 /*
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002045 * state change failure is ok if we started ctrl delete,
Israel Rukshinbea54ef2020-03-24 17:29:45 +02002046 * unless we're during creation of a new controller to
2047 * avoid races with teardown flow.
2048 */
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002049 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2050 ctrl->state != NVME_CTRL_DELETING_NOIO);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02002051 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002052 ret = -EINVAL;
2053 goto destroy_io;
2054 }
2055
2056 nvme_start_ctrl(ctrl);
2057 return 0;
2058
2059destroy_io:
Chao Leng70a99572021-01-21 11:32:38 +08002060 if (ctrl->queue_count > 1) {
2061 nvme_stop_queues(ctrl);
2062 nvme_sync_io_queues(ctrl);
2063 nvme_tcp_stop_io_queues(ctrl);
2064 nvme_cancel_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002065 nvme_tcp_destroy_io_queues(ctrl, new);
Chao Leng70a99572021-01-21 11:32:38 +08002066 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002067destroy_admin:
Ming Lei6ca1d902021-10-14 16:17:06 +08002068 nvme_stop_admin_queue(ctrl);
Chao Leng70a99572021-01-21 11:32:38 +08002069 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002070 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng70a99572021-01-21 11:32:38 +08002071 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002072 nvme_tcp_destroy_admin_queue(ctrl, new);
2073 return ret;
2074}
2075
2076static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2077{
2078 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2079 struct nvme_tcp_ctrl, connect_work);
2080 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2081
2082 ++ctrl->nr_reconnects;
2083
2084 if (nvme_tcp_setup_ctrl(ctrl, false))
2085 goto requeue;
2086
Colin Ian King56a77d22018-12-14 11:42:43 +00002087 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002088 ctrl->nr_reconnects);
2089
2090 ctrl->nr_reconnects = 0;
2091
2092 return;
2093
2094requeue:
2095 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2096 ctrl->nr_reconnects);
2097 nvme_tcp_reconnect_or_remove(ctrl);
2098}
2099
2100static void nvme_tcp_error_recovery_work(struct work_struct *work)
2101{
2102 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2103 struct nvme_tcp_ctrl, err_work);
2104 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2105
2106 nvme_stop_keep_alive(ctrl);
Sagi Grimbergff9fc7e2022-02-01 14:54:20 +02002107 flush_work(&ctrl->async_event_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002108 nvme_tcp_teardown_io_queues(ctrl, false);
2109 /* unquiesce to fail fast pending requests */
2110 nvme_start_queues(ctrl);
2111 nvme_tcp_teardown_admin_queue(ctrl, false);
Ming Lei6ca1d902021-10-14 16:17:06 +08002112 nvme_start_admin_queue(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002113
2114 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002115 /* state change failure is ok if we started ctrl delete */
2116 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2117 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002118 return;
2119 }
2120
2121 nvme_tcp_reconnect_or_remove(ctrl);
2122}
2123
2124static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2125{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002126 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2127 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2128
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002129 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Ming Lei6ca1d902021-10-14 16:17:06 +08002130 nvme_stop_admin_queue(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002131 if (shutdown)
2132 nvme_shutdown_ctrl(ctrl);
2133 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002134 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002135 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2136}
2137
2138static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2139{
2140 nvme_tcp_teardown_ctrl(ctrl, true);
2141}
2142
2143static void nvme_reset_ctrl_work(struct work_struct *work)
2144{
2145 struct nvme_ctrl *ctrl =
2146 container_of(work, struct nvme_ctrl, reset_work);
2147
2148 nvme_stop_ctrl(ctrl);
2149 nvme_tcp_teardown_ctrl(ctrl, false);
2150
2151 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002152 /* state change failure is ok if we started ctrl delete */
2153 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2154 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002155 return;
2156 }
2157
2158 if (nvme_tcp_setup_ctrl(ctrl, false))
2159 goto out_fail;
2160
2161 return;
2162
2163out_fail:
2164 ++ctrl->nr_reconnects;
2165 nvme_tcp_reconnect_or_remove(ctrl);
2166}
2167
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002168static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2169{
2170 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2171
2172 if (list_empty(&ctrl->list))
2173 goto free_ctrl;
2174
2175 mutex_lock(&nvme_tcp_ctrl_mutex);
2176 list_del(&ctrl->list);
2177 mutex_unlock(&nvme_tcp_ctrl_mutex);
2178
2179 nvmf_free_options(nctrl->opts);
2180free_ctrl:
2181 kfree(ctrl->queues);
2182 kfree(ctrl);
2183}
2184
2185static void nvme_tcp_set_sg_null(struct nvme_command *c)
2186{
2187 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2188
2189 sg->addr = 0;
2190 sg->length = 0;
2191 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2192 NVME_SGL_FMT_TRANSPORT_A;
2193}
2194
2195static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2196 struct nvme_command *c, u32 data_len)
2197{
2198 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2199
2200 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2201 sg->length = cpu_to_le32(data_len);
2202 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2203}
2204
2205static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2206 u32 data_len)
2207{
2208 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2209
2210 sg->addr = 0;
2211 sg->length = cpu_to_le32(data_len);
2212 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2213 NVME_SGL_FMT_TRANSPORT_A;
2214}
2215
2216static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2217{
2218 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2219 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2220 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2221 struct nvme_command *cmd = &pdu->cmd;
2222 u8 hdgst = nvme_tcp_hdgst_len(queue);
2223
2224 memset(pdu, 0, sizeof(*pdu));
2225 pdu->hdr.type = nvme_tcp_cmd;
2226 if (queue->hdr_digest)
2227 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2228 pdu->hdr.hlen = sizeof(*pdu);
2229 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2230
2231 cmd->common.opcode = nvme_admin_async_event;
2232 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2233 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2234 nvme_tcp_set_sg_null(cmd);
2235
2236 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2237 ctrl->async_req.offset = 0;
2238 ctrl->async_req.curr_bio = NULL;
2239 ctrl->async_req.data_len = 0;
2240
Sagi Grimberg86f03482020-06-18 17:30:23 -07002241 nvme_tcp_queue_request(&ctrl->async_req, true, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002242}
2243
Sagi Grimberg236187c2020-07-28 13:16:36 -07002244static void nvme_tcp_complete_timed_out(struct request *rq)
2245{
2246 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2247 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2248
Sagi Grimberg236187c2020-07-28 13:16:36 -07002249 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
Sagi Grimberg0a8a2c852020-10-22 10:15:31 +08002250 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
Sagi Grimberg236187c2020-07-28 13:16:36 -07002251 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2252 blk_mq_complete_request(rq);
2253 }
Sagi Grimberg236187c2020-07-28 13:16:36 -07002254}
2255
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002256static enum blk_eh_timer_return
2257nvme_tcp_timeout(struct request *rq, bool reserved)
2258{
2259 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg236187c2020-07-28 13:16:36 -07002260 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002261 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2262
Sagi Grimberg236187c2020-07-28 13:16:36 -07002263 dev_warn(ctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002264 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002265 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002266
Sagi Grimberg236187c2020-07-28 13:16:36 -07002267 if (ctrl->state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002268 /*
Sagi Grimberg236187c2020-07-28 13:16:36 -07002269 * If we are resetting, connecting or deleting we should
2270 * complete immediately because we may block controller
2271 * teardown or setup sequence
2272 * - ctrl disable/shutdown fabrics requests
2273 * - connect requests
2274 * - initialization admin requests
2275 * - I/O requests that entered after unquiescing and
2276 * the controller stopped responding
2277 *
2278 * All other requests should be cancelled by the error
2279 * recovery work, so it's fine that we fail it here.
Sagi Grimberg39d57752019-01-08 01:01:30 -08002280 */
Sagi Grimberg236187c2020-07-28 13:16:36 -07002281 nvme_tcp_complete_timed_out(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002282 return BLK_EH_DONE;
2283 }
2284
Sagi Grimberg236187c2020-07-28 13:16:36 -07002285 /*
2286 * LIVE state should trigger the normal error recovery which will
2287 * handle completing this request.
2288 */
2289 nvme_tcp_error_recovery(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002290 return BLK_EH_RESET_TIMER;
2291}
2292
2293static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2294 struct request *rq)
2295{
2296 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2297 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2298 struct nvme_command *c = &pdu->cmd;
2299
2300 c->common.flags |= NVME_CMD_SGL_METABUF;
2301
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002302 if (!blk_rq_nr_phys_segments(rq))
2303 nvme_tcp_set_sg_null(c);
2304 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002305 req->data_len <= nvme_tcp_inline_data_size(queue))
2306 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2307 else
2308 nvme_tcp_set_sg_host_data(c, req->data_len);
2309
2310 return 0;
2311}
2312
2313static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2314 struct request *rq)
2315{
2316 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2317 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2318 struct nvme_tcp_queue *queue = req->queue;
2319 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2320 blk_status_t ret;
2321
Keith Buschf4b9e6c2021-03-17 13:37:03 -07002322 ret = nvme_setup_cmd(ns, rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002323 if (ret)
2324 return ret;
2325
2326 req->state = NVME_TCP_SEND_CMD_PDU;
Daniel Wagner1ba2e502021-08-30 15:36:26 +02002327 req->status = cpu_to_le16(NVME_SC_SUCCESS);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002328 req->offset = 0;
2329 req->data_sent = 0;
2330 req->pdu_len = 0;
2331 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002332 req->data_len = blk_rq_nr_phys_segments(rq) ?
2333 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002334 req->curr_bio = rq->bio;
Sagi Grimberge11e5112021-02-10 14:04:00 -08002335 if (req->curr_bio && req->data_len)
Sagi Grimbergcb9b8702021-01-14 13:15:24 -08002336 nvme_tcp_init_iter(req, rq_data_dir(rq));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002337
2338 if (rq_data_dir(rq) == WRITE &&
2339 req->data_len <= nvme_tcp_inline_data_size(queue))
2340 req->pdu_len = req->data_len;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002341
2342 pdu->hdr.type = nvme_tcp_cmd;
2343 pdu->hdr.flags = 0;
2344 if (queue->hdr_digest)
2345 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2346 if (queue->data_digest && req->pdu_len) {
2347 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2348 ddgst = nvme_tcp_ddgst_len(queue);
2349 }
2350 pdu->hdr.hlen = sizeof(*pdu);
2351 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2352 pdu->hdr.plen =
2353 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2354
2355 ret = nvme_tcp_map_data(queue, rq);
2356 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002357 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002358 dev_err(queue->ctrl->ctrl.device,
2359 "Failed to map data (%d)\n", ret);
2360 return ret;
2361 }
2362
2363 return 0;
2364}
2365
Sagi Grimberg86f03482020-06-18 17:30:23 -07002366static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2367{
2368 struct nvme_tcp_queue *queue = hctx->driver_data;
2369
2370 if (!llist_empty(&queue->req_list))
2371 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2372}
2373
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002374static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2375 const struct blk_mq_queue_data *bd)
2376{
2377 struct nvme_ns *ns = hctx->queue->queuedata;
2378 struct nvme_tcp_queue *queue = hctx->driver_data;
2379 struct request *rq = bd->rq;
2380 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2381 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2382 blk_status_t ret;
2383
Tao Chiua9715742021-04-26 10:53:10 +08002384 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2385 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002386
2387 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2388 if (unlikely(ret))
2389 return ret;
2390
2391 blk_mq_start_request(rq);
2392
Sagi Grimberg86f03482020-06-18 17:30:23 -07002393 nvme_tcp_queue_request(req, true, bd->last);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002394
2395 return BLK_STS_OK;
2396}
2397
Sagi Grimberg873946f2018-12-11 23:38:57 -08002398static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2399{
2400 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002401 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002402
Sagi Grimberg64861992019-05-28 22:49:05 -07002403 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002404 /* separate read/write queues */
2405 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002406 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2407 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2408 set->map[HCTX_TYPE_READ].nr_queues =
2409 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002410 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002411 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002412 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002413 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002414 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002415 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2416 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2417 set->map[HCTX_TYPE_READ].nr_queues =
2418 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002419 set->map[HCTX_TYPE_READ].queue_offset = 0;
2420 }
2421 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2422 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002423
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002424 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2425 /* map dedicated poll queues only if we have queues left */
2426 set->map[HCTX_TYPE_POLL].nr_queues =
2427 ctrl->io_queues[HCTX_TYPE_POLL];
2428 set->map[HCTX_TYPE_POLL].queue_offset =
2429 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2430 ctrl->io_queues[HCTX_TYPE_READ];
2431 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2432 }
2433
Sagi Grimberg64861992019-05-28 22:49:05 -07002434 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002435 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002436 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002437 ctrl->io_queues[HCTX_TYPE_READ],
2438 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002439
Sagi Grimberg873946f2018-12-11 23:38:57 -08002440 return 0;
2441}
2442
Jens Axboe5a72e892021-10-12 09:24:29 -06002443static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002444{
2445 struct nvme_tcp_queue *queue = hctx->driver_data;
2446 struct sock *sk = queue->sock->sk;
2447
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002448 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2449 return 0;
2450
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002451 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
Eric Dumazet3f926af2019-10-23 22:44:51 -07002452 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002453 sk_busy_loop(sk, true);
2454 nvme_tcp_try_recv(queue);
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002455 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002456 return queue->nr_cqe;
2457}
2458
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002459static const struct blk_mq_ops nvme_tcp_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002460 .queue_rq = nvme_tcp_queue_rq,
Sagi Grimberg86f03482020-06-18 17:30:23 -07002461 .commit_rqs = nvme_tcp_commit_rqs,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002462 .complete = nvme_complete_rq,
2463 .init_request = nvme_tcp_init_request,
2464 .exit_request = nvme_tcp_exit_request,
2465 .init_hctx = nvme_tcp_init_hctx,
2466 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002467 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002468 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002469};
2470
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002471static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002472 .queue_rq = nvme_tcp_queue_rq,
2473 .complete = nvme_complete_rq,
2474 .init_request = nvme_tcp_init_request,
2475 .exit_request = nvme_tcp_exit_request,
2476 .init_hctx = nvme_tcp_init_admin_hctx,
2477 .timeout = nvme_tcp_timeout,
2478};
2479
2480static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2481 .name = "tcp",
2482 .module = THIS_MODULE,
2483 .flags = NVME_F_FABRICS,
2484 .reg_read32 = nvmf_reg_read32,
2485 .reg_read64 = nvmf_reg_read64,
2486 .reg_write32 = nvmf_reg_write32,
2487 .free_ctrl = nvme_tcp_free_ctrl,
2488 .submit_async_event = nvme_tcp_submit_async_event,
2489 .delete_ctrl = nvme_tcp_delete_ctrl,
2490 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002491};
2492
2493static bool
2494nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2495{
2496 struct nvme_tcp_ctrl *ctrl;
2497 bool found = false;
2498
2499 mutex_lock(&nvme_tcp_ctrl_mutex);
2500 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2501 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2502 if (found)
2503 break;
2504 }
2505 mutex_unlock(&nvme_tcp_ctrl_mutex);
2506
2507 return found;
2508}
2509
2510static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2511 struct nvmf_ctrl_options *opts)
2512{
2513 struct nvme_tcp_ctrl *ctrl;
2514 int ret;
2515
2516 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2517 if (!ctrl)
2518 return ERR_PTR(-ENOMEM);
2519
2520 INIT_LIST_HEAD(&ctrl->list);
2521 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002522 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2523 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002524 ctrl->ctrl.sqsize = opts->queue_size - 1;
2525 ctrl->ctrl.kato = opts->kato;
2526
2527 INIT_DELAYED_WORK(&ctrl->connect_work,
2528 nvme_tcp_reconnect_ctrl_work);
2529 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2530 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2531
2532 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2533 opts->trsvcid =
2534 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2535 if (!opts->trsvcid) {
2536 ret = -ENOMEM;
2537 goto out_free_ctrl;
2538 }
2539 opts->mask |= NVMF_OPT_TRSVCID;
2540 }
2541
2542 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2543 opts->traddr, opts->trsvcid, &ctrl->addr);
2544 if (ret) {
2545 pr_err("malformed address passed: %s:%s\n",
2546 opts->traddr, opts->trsvcid);
2547 goto out_free_ctrl;
2548 }
2549
2550 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2551 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2552 opts->host_traddr, NULL, &ctrl->src_addr);
2553 if (ret) {
2554 pr_err("malformed src address passed: %s\n",
2555 opts->host_traddr);
2556 goto out_free_ctrl;
2557 }
2558 }
2559
Martin Belanger3ede8f72021-05-20 15:09:34 -04002560 if (opts->mask & NVMF_OPT_HOST_IFACE) {
Prabhakar Kushwaha8b43ced2021-07-13 11:31:56 +02002561 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
Martin Belanger3ede8f72021-05-20 15:09:34 -04002562 pr_err("invalid interface passed: %s\n",
2563 opts->host_iface);
2564 ret = -ENODEV;
2565 goto out_free_ctrl;
2566 }
2567 }
2568
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002569 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2570 ret = -EALREADY;
2571 goto out_free_ctrl;
2572 }
2573
Sagi Grimberg873946f2018-12-11 23:38:57 -08002574 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002575 GFP_KERNEL);
2576 if (!ctrl->queues) {
2577 ret = -ENOMEM;
2578 goto out_free_ctrl;
2579 }
2580
2581 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2582 if (ret)
2583 goto out_kfree_queues;
2584
2585 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2586 WARN_ON_ONCE(1);
2587 ret = -EINTR;
2588 goto out_uninit_ctrl;
2589 }
2590
2591 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2592 if (ret)
2593 goto out_uninit_ctrl;
2594
2595 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
Hannes Reineckee5ea42f2021-09-22 08:35:25 +02002596 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002597
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002598 mutex_lock(&nvme_tcp_ctrl_mutex);
2599 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2600 mutex_unlock(&nvme_tcp_ctrl_mutex);
2601
2602 return &ctrl->ctrl;
2603
2604out_uninit_ctrl:
2605 nvme_uninit_ctrl(&ctrl->ctrl);
2606 nvme_put_ctrl(&ctrl->ctrl);
2607 if (ret > 0)
2608 ret = -EIO;
2609 return ERR_PTR(ret);
2610out_kfree_queues:
2611 kfree(ctrl->queues);
2612out_free_ctrl:
2613 kfree(ctrl);
2614 return ERR_PTR(ret);
2615}
2616
2617static struct nvmf_transport_ops nvme_tcp_transport = {
2618 .name = "tcp",
2619 .module = THIS_MODULE,
2620 .required_opts = NVMF_OPT_TRADDR,
2621 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2622 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002623 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002624 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
Martin Belanger3ede8f72021-05-20 15:09:34 -04002625 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002626 .create_ctrl = nvme_tcp_create_ctrl,
2627};
2628
2629static int __init nvme_tcp_init_module(void)
2630{
2631 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2632 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2633 if (!nvme_tcp_wq)
2634 return -ENOMEM;
2635
2636 nvmf_register_transport(&nvme_tcp_transport);
2637 return 0;
2638}
2639
2640static void __exit nvme_tcp_cleanup_module(void)
2641{
2642 struct nvme_tcp_ctrl *ctrl;
2643
2644 nvmf_unregister_transport(&nvme_tcp_transport);
2645
2646 mutex_lock(&nvme_tcp_ctrl_mutex);
2647 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2648 nvme_delete_ctrl(&ctrl->ctrl);
2649 mutex_unlock(&nvme_tcp_ctrl_mutex);
2650 flush_workqueue(nvme_delete_wq);
2651
2652 destroy_workqueue(nvme_tcp_wq);
2653}
2654
2655module_init(nvme_tcp_init_module);
2656module_exit(nvme_tcp_cleanup_module);
2657
2658MODULE_LICENSE("GPL v2");