blob: 07156ea9d1a8ec9aa12c58d76fd780c553a89eb8 [file] [log] [blame]
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070016#include <net/busy_poll.h>
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080017
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
Wunderlich, Mark9912ade2020-01-16 00:46:12 +000023/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080033enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
36 NVME_TCP_SEND_DATA,
37 NVME_TCP_SEND_DDGST,
38};
39
40struct nvme_tcp_request {
41 struct nvme_request req;
42 void *pdu;
43 struct nvme_tcp_queue *queue;
44 u32 data_len;
45 u32 pdu_len;
46 u32 pdu_sent;
47 u16 ttag;
Daniel Wagner1ba2e502021-08-30 15:36:26 +020048 __le16 status;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080049 struct list_head entry;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070050 struct llist_node lentry;
Christoph Hellwiga7273d42018-12-13 09:46:59 +010051 __le32 ddgst;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080052
53 struct bio *curr_bio;
54 struct iov_iter iter;
55
56 /* send state */
57 size_t offset;
58 size_t data_sent;
59 enum nvme_tcp_send_state state;
60};
61
62enum nvme_tcp_queue_flags {
63 NVME_TCP_Q_ALLOCATED = 0,
64 NVME_TCP_Q_LIVE = 1,
Sagi Grimberg72e5d752020-05-01 14:25:44 -070065 NVME_TCP_Q_POLLING = 2,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080066};
67
68enum nvme_tcp_recv_state {
69 NVME_TCP_RECV_PDU = 0,
70 NVME_TCP_RECV_DATA,
71 NVME_TCP_RECV_DDGST,
72};
73
74struct nvme_tcp_ctrl;
75struct nvme_tcp_queue {
76 struct socket *sock;
77 struct work_struct io_work;
78 int io_cpu;
79
Chao Leng9ebbfe42021-01-14 17:09:26 +080080 struct mutex queue_lock;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -070081 struct mutex send_mutex;
Sagi Grimberg15ec9282020-06-18 17:30:22 -070082 struct llist_head req_list;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080083 struct list_head send_list;
Sagi Grimberg122e5b92020-06-18 17:30:24 -070084 bool more_requests;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080085
86 /* recv state */
87 void *pdu;
88 int pdu_remaining;
89 int pdu_offset;
90 size_t data_remaining;
91 size_t ddgst_remaining;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -070092 unsigned int nr_cqe;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -080093
94 /* send state */
95 struct nvme_tcp_request *request;
96
97 int queue_size;
98 size_t cmnd_capsule_len;
99 struct nvme_tcp_ctrl *ctrl;
100 unsigned long flags;
101 bool rd_enabled;
102
103 bool hdr_digest;
104 bool data_digest;
105 struct ahash_request *rcv_hash;
106 struct ahash_request *snd_hash;
107 __le32 exp_ddgst;
108 __le32 recv_ddgst;
109
110 struct page_frag_cache pf_cache;
111
112 void (*state_change)(struct sock *);
113 void (*data_ready)(struct sock *);
114 void (*write_space)(struct sock *);
115};
116
117struct nvme_tcp_ctrl {
118 /* read only in the hot path */
119 struct nvme_tcp_queue *queues;
120 struct blk_mq_tag_set tag_set;
121
122 /* other member variables */
123 struct list_head list;
124 struct blk_mq_tag_set admin_tag_set;
125 struct sockaddr_storage addr;
126 struct sockaddr_storage src_addr;
127 struct nvme_ctrl ctrl;
128
129 struct work_struct err_work;
130 struct delayed_work connect_work;
131 struct nvme_tcp_request async_req;
Sagi Grimberg64861992019-05-28 22:49:05 -0700132 u32 io_queues[HCTX_MAX_TYPES];
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800133};
134
135static LIST_HEAD(nvme_tcp_ctrl_list);
136static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
137static struct workqueue_struct *nvme_tcp_wq;
Rikard Falkeborn6acbd962020-05-29 00:25:07 +0200138static const struct blk_mq_ops nvme_tcp_mq_ops;
139static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700140static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800141
142static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
143{
144 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
145}
146
147static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
148{
149 return queue - queue->ctrl->queues;
150}
151
152static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
153{
154 u32 queue_idx = nvme_tcp_queue_id(queue);
155
156 if (queue_idx == 0)
157 return queue->ctrl->admin_tag_set.tags[queue_idx];
158 return queue->ctrl->tag_set.tags[queue_idx - 1];
159}
160
161static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
162{
163 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
164}
165
166static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
167{
168 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
169}
170
171static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
172{
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
174}
175
176static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
177{
178 return req == &req->queue->ctrl->async_req;
179}
180
181static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
182{
183 struct request *rq;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800184
185 if (unlikely(nvme_tcp_async_req(req)))
186 return false; /* async events don't have a request */
187
188 rq = blk_mq_rq_from_pdu(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800189
Sagi Grimberg25e5cb72020-03-23 15:06:30 -0700190 return rq_data_dir(rq) == WRITE && req->data_len &&
191 req->data_len <= nvme_tcp_inline_data_size(req->queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800192}
193
194static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
195{
196 return req->iter.bvec->bv_page;
197}
198
199static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
200{
201 return req->iter.bvec->bv_offset + req->iter.iov_offset;
202}
203
204static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
205{
Sagi Grimbergca1ff672021-01-13 13:56:57 -0800206 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800207 req->pdu_len - req->pdu_sent);
208}
209
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800210static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
211{
212 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
213 req->pdu_len - req->pdu_sent : 0;
214}
215
216static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
217 int len)
218{
219 return nvme_tcp_pdu_data_left(req) <= len;
220}
221
222static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
223 unsigned int dir)
224{
225 struct request *rq = blk_mq_rq_from_pdu(req);
226 struct bio_vec *vec;
227 unsigned int size;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800228 int nr_bvec;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800229 size_t offset;
230
231 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
232 vec = &rq->special_vec;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800233 nr_bvec = 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800234 size = blk_rq_payload_bytes(rq);
235 offset = 0;
236 } else {
237 struct bio *bio = req->curr_bio;
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800238 struct bvec_iter bi;
239 struct bio_vec bv;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800240
241 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800242 nr_bvec = 0;
243 bio_for_each_bvec(bv, bio, bi) {
244 nr_bvec++;
245 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800246 size = bio->bi_iter.bi_size;
247 offset = bio->bi_iter.bi_bvec_done;
248 }
249
Sagi Grimberg0dc9eda2021-01-14 13:15:26 -0800250 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800251 req->iter.iov_offset = offset;
252}
253
254static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
255 int len)
256{
257 req->data_sent += len;
258 req->pdu_sent += len;
259 iov_iter_advance(&req->iter, len);
260 if (!iov_iter_count(&req->iter) &&
261 req->data_sent < req->data_len) {
262 req->curr_bio = req->curr_bio->bi_next;
263 nvme_tcp_init_iter(req, WRITE);
264 }
265}
266
Sagi Grimberg5c11f7d2020-12-21 00:03:39 -0800267static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
268{
269 int ret;
270
271 /* drain the send queue as much as we can... */
272 do {
273 ret = nvme_tcp_try_send(queue);
274 } while (ret > 0);
275}
276
Keith Busch70f437f2021-09-09 08:54:52 -0700277static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
278{
279 return !list_empty(&queue->send_list) ||
280 !llist_empty(&queue->req_list) || queue->more_requests;
281}
282
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700283static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
Sagi Grimberg86f03482020-06-18 17:30:23 -0700284 bool sync, bool last)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800285{
286 struct nvme_tcp_queue *queue = req->queue;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700287 bool empty;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800288
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700289 empty = llist_add(&req->lentry, &queue->req_list) &&
290 list_empty(&queue->send_list) && !queue->request;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800291
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700292 /*
293 * if we're the first on the send_list and we can try to send
294 * directly, otherwise queue io_work. Also, only do that if we
295 * are on the same cpu, so we don't introduce contention.
296 */
Sagi Grimbergbb833372021-03-15 13:53:47 -0700297 if (queue->io_cpu == raw_smp_processor_id() &&
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700298 sync && empty && mutex_trylock(&queue->send_mutex)) {
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700299 queue->more_requests = !last;
Sagi Grimberg5c11f7d2020-12-21 00:03:39 -0800300 nvme_tcp_send_all(queue);
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700301 queue->more_requests = false;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700302 mutex_unlock(&queue->send_mutex);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -0700303 }
Keith Busch70f437f2021-09-09 08:54:52 -0700304
305 if (last && nvme_tcp_queue_more(queue))
306 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800307}
308
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700309static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
310{
311 struct nvme_tcp_request *req;
312 struct llist_node *node;
313
314 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
315 req = llist_entry(node, struct nvme_tcp_request, lentry);
316 list_add(&req->entry, &queue->send_list);
317 }
318}
319
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800320static inline struct nvme_tcp_request *
321nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
322{
323 struct nvme_tcp_request *req;
324
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800325 req = list_first_entry_or_null(&queue->send_list,
326 struct nvme_tcp_request, entry);
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700327 if (!req) {
328 nvme_tcp_process_req_list(queue);
329 req = list_first_entry_or_null(&queue->send_list,
330 struct nvme_tcp_request, entry);
331 if (unlikely(!req))
332 return NULL;
333 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800334
Sagi Grimberg15ec9282020-06-18 17:30:22 -0700335 list_del(&req->entry);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800336 return req;
337}
338
Christoph Hellwiga7273d42018-12-13 09:46:59 +0100339static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
340 __le32 *dgst)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800341{
342 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
343 crypto_ahash_final(hash);
344}
345
346static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
347 struct page *page, off_t off, size_t len)
348{
349 struct scatterlist sg;
350
351 sg_init_marker(&sg, 1);
352 sg_set_page(&sg, page, len, off);
353 ahash_request_set_crypt(hash, &sg, NULL, len);
354 crypto_ahash_update(hash);
355}
356
357static inline void nvme_tcp_hdgst(struct ahash_request *hash,
358 void *pdu, size_t len)
359{
360 struct scatterlist sg;
361
362 sg_init_one(&sg, pdu, len);
363 ahash_request_set_crypt(hash, &sg, pdu + len, len);
364 crypto_ahash_digest(hash);
365}
366
367static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
368 void *pdu, size_t pdu_len)
369{
370 struct nvme_tcp_hdr *hdr = pdu;
371 __le32 recv_digest;
372 __le32 exp_digest;
373
374 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
375 dev_err(queue->ctrl->ctrl.device,
376 "queue %d: header digest flag is cleared\n",
377 nvme_tcp_queue_id(queue));
378 return -EPROTO;
379 }
380
381 recv_digest = *(__le32 *)(pdu + hdr->hlen);
382 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
383 exp_digest = *(__le32 *)(pdu + hdr->hlen);
384 if (recv_digest != exp_digest) {
385 dev_err(queue->ctrl->ctrl.device,
386 "header digest error: recv %#x expected %#x\n",
387 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
388 return -EIO;
389 }
390
391 return 0;
392}
393
394static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
395{
396 struct nvme_tcp_hdr *hdr = pdu;
397 u8 digest_len = nvme_tcp_hdgst_len(queue);
398 u32 len;
399
400 len = le32_to_cpu(hdr->plen) - hdr->hlen -
401 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
402
403 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
404 dev_err(queue->ctrl->ctrl.device,
405 "queue %d: data digest flag is cleared\n",
406 nvme_tcp_queue_id(queue));
407 return -EPROTO;
408 }
409 crypto_ahash_init(queue->rcv_hash);
410
411 return 0;
412}
413
414static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
415 struct request *rq, unsigned int hctx_idx)
416{
417 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
418
419 page_frag_free(req->pdu);
420}
421
422static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
423 struct request *rq, unsigned int hctx_idx,
424 unsigned int numa_node)
425{
426 struct nvme_tcp_ctrl *ctrl = set->driver_data;
427 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Keith Buschf4b9e6c2021-03-17 13:37:03 -0700428 struct nvme_tcp_cmd_pdu *pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800429 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
430 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
431 u8 hdgst = nvme_tcp_hdgst_len(queue);
432
433 req->pdu = page_frag_alloc(&queue->pf_cache,
434 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
435 GFP_KERNEL | __GFP_ZERO);
436 if (!req->pdu)
437 return -ENOMEM;
438
Keith Buschf4b9e6c2021-03-17 13:37:03 -0700439 pdu = req->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800440 req->queue = queue;
441 nvme_req(rq)->ctrl = &ctrl->ctrl;
Keith Buschf4b9e6c2021-03-17 13:37:03 -0700442 nvme_req(rq)->cmd = &pdu->cmd;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800443
444 return 0;
445}
446
447static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
448 unsigned int hctx_idx)
449{
450 struct nvme_tcp_ctrl *ctrl = data;
451 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
452
453 hctx->driver_data = queue;
454 return 0;
455}
456
457static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
458 unsigned int hctx_idx)
459{
460 struct nvme_tcp_ctrl *ctrl = data;
461 struct nvme_tcp_queue *queue = &ctrl->queues[0];
462
463 hctx->driver_data = queue;
464 return 0;
465}
466
467static enum nvme_tcp_recv_state
468nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
469{
470 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
471 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
472 NVME_TCP_RECV_DATA;
473}
474
475static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
476{
477 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
478 nvme_tcp_hdgst_len(queue);
479 queue->pdu_offset = 0;
480 queue->data_remaining = -1;
481 queue->ddgst_remaining = 0;
482}
483
484static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
485{
486 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
487 return;
488
Sagi Grimberg236187c2020-07-28 13:16:36 -0700489 dev_warn(ctrl->device, "starting error recovery\n");
Nigel Kirkland97b25122020-02-10 16:01:45 -0800490 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800491}
492
493static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
494 struct nvme_completion *cqe)
495{
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200496 struct nvme_tcp_request *req;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800497 struct request *rq;
498
Sagi Grimberge7006de2021-06-16 14:19:36 -0700499 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800500 if (!rq) {
501 dev_err(queue->ctrl->ctrl.device,
Sagi Grimberge7006de2021-06-16 14:19:36 -0700502 "got bad cqe.command_id %#x on queue %d\n",
503 cqe->command_id, nvme_tcp_queue_id(queue));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800504 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
505 return -EINVAL;
506 }
507
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200508 req = blk_mq_rq_to_pdu(rq);
509 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
510 req->status = cqe->status;
511
512 if (!nvme_try_complete_req(rq, req->status, cqe->result))
Christoph Hellwigff029452020-06-11 08:44:52 +0200513 nvme_complete_rq(rq);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700514 queue->nr_cqe++;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800515
516 return 0;
517}
518
519static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
520 struct nvme_tcp_data_pdu *pdu)
521{
522 struct request *rq;
523
Sagi Grimberge7006de2021-06-16 14:19:36 -0700524 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800525 if (!rq) {
526 dev_err(queue->ctrl->ctrl.device,
Sagi Grimberge7006de2021-06-16 14:19:36 -0700527 "got bad c2hdata.command_id %#x on queue %d\n",
528 pdu->command_id, nvme_tcp_queue_id(queue));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800529 return -ENOENT;
530 }
531
532 if (!blk_rq_payload_bytes(rq)) {
533 dev_err(queue->ctrl->ctrl.device,
534 "queue %d tag %#x unexpected data\n",
535 nvme_tcp_queue_id(queue), rq->tag);
536 return -EIO;
537 }
538
539 queue->data_remaining = le32_to_cpu(pdu->data_length);
540
Sagi Grimberg602d6742019-03-13 18:55:10 +0100541 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
542 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
543 dev_err(queue->ctrl->ctrl.device,
544 "queue %d tag %#x SUCCESS set but not last PDU\n",
545 nvme_tcp_queue_id(queue), rq->tag);
546 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
547 return -EPROTO;
548 }
549
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800550 return 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800551}
552
553static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
554 struct nvme_tcp_rsp_pdu *pdu)
555{
556 struct nvme_completion *cqe = &pdu->cqe;
557 int ret = 0;
558
559 /*
560 * AEN requests are special as they don't time out and can
561 * survive any kind of queue freeze and often don't respond to
562 * aborts. We don't even bother to allocate a struct request
563 * for them but rather special case them here.
564 */
Israel Rukshin58a8df62019-10-13 19:57:31 +0300565 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
566 cqe->command_id)))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800567 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
568 &cqe->result);
569 else
570 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
571
572 return ret;
573}
574
575static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
576 struct nvme_tcp_r2t_pdu *pdu)
577{
578 struct nvme_tcp_data_pdu *data = req->pdu;
579 struct nvme_tcp_queue *queue = req->queue;
580 struct request *rq = blk_mq_rq_from_pdu(req);
581 u8 hdgst = nvme_tcp_hdgst_len(queue);
582 u8 ddgst = nvme_tcp_ddgst_len(queue);
583
584 req->pdu_len = le32_to_cpu(pdu->r2t_length);
585 req->pdu_sent = 0;
586
Sagi Grimbergfd0823f2021-03-15 14:08:11 -0700587 if (unlikely(!req->pdu_len)) {
588 dev_err(queue->ctrl->ctrl.device,
589 "req %d r2t len is %u, probably a bug...\n",
590 rq->tag, req->pdu_len);
591 return -EPROTO;
592 }
593
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800594 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
595 dev_err(queue->ctrl->ctrl.device,
596 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
597 rq->tag, req->pdu_len, req->data_len,
598 req->data_sent);
599 return -EPROTO;
600 }
601
602 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
603 dev_err(queue->ctrl->ctrl.device,
604 "req %d unexpected r2t offset %u (expected %zu)\n",
605 rq->tag, le32_to_cpu(pdu->r2t_offset),
606 req->data_sent);
607 return -EPROTO;
608 }
609
610 memset(data, 0, sizeof(*data));
611 data->hdr.type = nvme_tcp_h2c_data;
612 data->hdr.flags = NVME_TCP_F_DATA_LAST;
613 if (queue->hdr_digest)
614 data->hdr.flags |= NVME_TCP_F_HDGST;
615 if (queue->data_digest)
616 data->hdr.flags |= NVME_TCP_F_DDGST;
617 data->hdr.hlen = sizeof(*data);
618 data->hdr.pdo = data->hdr.hlen + hdgst;
619 data->hdr.plen =
620 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
621 data->ttag = pdu->ttag;
Sagi Grimberge7006de2021-06-16 14:19:36 -0700622 data->command_id = nvme_cid(rq);
Sagi Grimberge371af02021-09-14 18:38:55 +0300623 data->data_offset = pdu->r2t_offset;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800624 data->data_length = cpu_to_le32(req->pdu_len);
625 return 0;
626}
627
628static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
629 struct nvme_tcp_r2t_pdu *pdu)
630{
631 struct nvme_tcp_request *req;
632 struct request *rq;
633 int ret;
634
Sagi Grimberge7006de2021-06-16 14:19:36 -0700635 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800636 if (!rq) {
637 dev_err(queue->ctrl->ctrl.device,
Sagi Grimberge7006de2021-06-16 14:19:36 -0700638 "got bad r2t.command_id %#x on queue %d\n",
639 pdu->command_id, nvme_tcp_queue_id(queue));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800640 return -ENOENT;
641 }
642 req = blk_mq_rq_to_pdu(rq);
643
644 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
645 if (unlikely(ret))
646 return ret;
647
648 req->state = NVME_TCP_SEND_H2C_PDU;
649 req->offset = 0;
650
Sagi Grimberg86f03482020-06-18 17:30:23 -0700651 nvme_tcp_queue_request(req, false, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800652
653 return 0;
654}
655
656static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
657 unsigned int *offset, size_t *len)
658{
659 struct nvme_tcp_hdr *hdr;
660 char *pdu = queue->pdu;
661 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
662 int ret;
663
664 ret = skb_copy_bits(skb, *offset,
665 &pdu[queue->pdu_offset], rcv_len);
666 if (unlikely(ret))
667 return ret;
668
669 queue->pdu_remaining -= rcv_len;
670 queue->pdu_offset += rcv_len;
671 *offset += rcv_len;
672 *len -= rcv_len;
673 if (queue->pdu_remaining)
674 return 0;
675
676 hdr = queue->pdu;
677 if (queue->hdr_digest) {
678 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
679 if (unlikely(ret))
680 return ret;
681 }
682
683
684 if (queue->data_digest) {
685 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
686 if (unlikely(ret))
687 return ret;
688 }
689
690 switch (hdr->type) {
691 case nvme_tcp_c2h_data:
Sagi Grimberg6be18262019-07-19 12:46:46 -0700692 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800693 case nvme_tcp_rsp:
694 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700695 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800696 case nvme_tcp_r2t:
697 nvme_tcp_init_recv_ctx(queue);
Sagi Grimberg6be18262019-07-19 12:46:46 -0700698 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800699 default:
700 dev_err(queue->ctrl->ctrl.device,
701 "unsupported pdu type (%d)\n", hdr->type);
702 return -EINVAL;
703 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800704}
705
Christoph Hellwig988aef9e2019-03-15 08:41:04 +0100706static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Sagi Grimberg602d6742019-03-13 18:55:10 +0100707{
708 union nvme_result res = {};
709
Christoph Hellwig2eb81a32020-08-18 09:11:29 +0200710 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
Christoph Hellwigff029452020-06-11 08:44:52 +0200711 nvme_complete_rq(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100712}
713
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800714static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
715 unsigned int *offset, size_t *len)
716{
717 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3b01a9d2021-06-16 14:19:35 -0700718 struct request *rq =
Sagi Grimberge7006de2021-06-16 14:19:36 -0700719 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
Sagi Grimberg3b01a9d2021-06-16 14:19:35 -0700720 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800721
722 while (true) {
723 int recv_len, ret;
724
725 recv_len = min_t(size_t, *len, queue->data_remaining);
726 if (!recv_len)
727 break;
728
729 if (!iov_iter_count(&req->iter)) {
730 req->curr_bio = req->curr_bio->bi_next;
731
732 /*
733 * If we don`t have any bios it means that controller
734 * sent more data than we requested, hence error
735 */
736 if (!req->curr_bio) {
737 dev_err(queue->ctrl->ctrl.device,
738 "queue %d no space in request %#x",
739 nvme_tcp_queue_id(queue), rq->tag);
740 nvme_tcp_init_recv_ctx(queue);
741 return -EIO;
742 }
743 nvme_tcp_init_iter(req, READ);
744 }
745
746 /* we can read only from what is left in this bio */
747 recv_len = min_t(size_t, recv_len,
748 iov_iter_count(&req->iter));
749
750 if (queue->data_digest)
751 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
752 &req->iter, recv_len, queue->rcv_hash);
753 else
754 ret = skb_copy_datagram_iter(skb, *offset,
755 &req->iter, recv_len);
756 if (ret) {
757 dev_err(queue->ctrl->ctrl.device,
758 "queue %d failed to copy request %#x data",
759 nvme_tcp_queue_id(queue), rq->tag);
760 return ret;
761 }
762
763 *len -= recv_len;
764 *offset += recv_len;
765 queue->data_remaining -= recv_len;
766 }
767
768 if (!queue->data_remaining) {
769 if (queue->data_digest) {
770 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
771 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
772 } else {
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700773 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200774 nvme_tcp_end_request(rq,
775 le16_to_cpu(req->status));
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700776 queue->nr_cqe++;
777 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800778 nvme_tcp_init_recv_ctx(queue);
779 }
780 }
781
782 return 0;
783}
784
785static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
786 struct sk_buff *skb, unsigned int *offset, size_t *len)
787{
Sagi Grimberg602d6742019-03-13 18:55:10 +0100788 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800789 char *ddgst = (char *)&queue->recv_ddgst;
790 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
791 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
792 int ret;
793
794 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
795 if (unlikely(ret))
796 return ret;
797
798 queue->ddgst_remaining -= recv_len;
799 *offset += recv_len;
800 *len -= recv_len;
801 if (queue->ddgst_remaining)
802 return 0;
803
804 if (queue->recv_ddgst != queue->exp_ddgst) {
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200805 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
806 pdu->command_id);
807 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
808
809 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
810
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800811 dev_err(queue->ctrl->ctrl.device,
812 "data digest error: recv %#x expected %#x\n",
813 le32_to_cpu(queue->recv_ddgst),
814 le32_to_cpu(queue->exp_ddgst));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800815 }
816
Sagi Grimberg602d6742019-03-13 18:55:10 +0100817 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
Sagi Grimberge7006de2021-06-16 14:19:36 -0700818 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
819 pdu->command_id);
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200820 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg602d6742019-03-13 18:55:10 +0100821
Daniel Wagner1ba2e502021-08-30 15:36:26 +0200822 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
Sagi Grimberg1a9460c2019-07-03 14:08:04 -0700823 queue->nr_cqe++;
Sagi Grimberg602d6742019-03-13 18:55:10 +0100824 }
825
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800826 nvme_tcp_init_recv_ctx(queue);
827 return 0;
828}
829
830static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
831 unsigned int offset, size_t len)
832{
833 struct nvme_tcp_queue *queue = desc->arg.data;
834 size_t consumed = len;
835 int result;
836
837 while (len) {
838 switch (nvme_tcp_recv_state(queue)) {
839 case NVME_TCP_RECV_PDU:
840 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
841 break;
842 case NVME_TCP_RECV_DATA:
843 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
844 break;
845 case NVME_TCP_RECV_DDGST:
846 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
847 break;
848 default:
849 result = -EFAULT;
850 }
851 if (result) {
852 dev_err(queue->ctrl->ctrl.device,
853 "receive failed: %d\n", result);
854 queue->rd_enabled = false;
855 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
856 return result;
857 }
858 }
859
860 return consumed;
861}
862
863static void nvme_tcp_data_ready(struct sock *sk)
864{
865 struct nvme_tcp_queue *queue;
866
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700867 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800868 queue = sk->sk_user_data;
Sagi Grimberg72e5d752020-05-01 14:25:44 -0700869 if (likely(queue && queue->rd_enabled) &&
870 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800871 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
Sagi Grimberg386e5e62020-04-30 13:59:32 -0700872 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800873}
874
875static void nvme_tcp_write_space(struct sock *sk)
876{
877 struct nvme_tcp_queue *queue;
878
879 read_lock_bh(&sk->sk_callback_lock);
880 queue = sk->sk_user_data;
881 if (likely(queue && sk_stream_is_writeable(sk))) {
882 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
883 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
884 }
885 read_unlock_bh(&sk->sk_callback_lock);
886}
887
888static void nvme_tcp_state_change(struct sock *sk)
889{
890 struct nvme_tcp_queue *queue;
891
Sagi Grimberg8b73b452021-03-21 00:08:48 -0700892 read_lock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800893 queue = sk->sk_user_data;
894 if (!queue)
895 goto done;
896
897 switch (sk->sk_state) {
898 case TCP_CLOSE:
899 case TCP_CLOSE_WAIT:
900 case TCP_LAST_ACK:
901 case TCP_FIN_WAIT1:
902 case TCP_FIN_WAIT2:
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800903 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
904 break;
905 default:
906 dev_info(queue->ctrl->ctrl.device,
907 "queue %d socket state %d\n",
908 nvme_tcp_queue_id(queue), sk->sk_state);
909 }
910
911 queue->state_change(sk);
912done:
Sagi Grimberg8b73b452021-03-21 00:08:48 -0700913 read_unlock_bh(&sk->sk_callback_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800914}
915
916static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
917{
918 queue->request = NULL;
919}
920
921static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
922{
Sagi Grimberg16686012019-08-02 18:17:52 -0700923 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800924}
925
926static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
927{
928 struct nvme_tcp_queue *queue = req->queue;
929
930 while (true) {
931 struct page *page = nvme_tcp_req_cur_page(req);
932 size_t offset = nvme_tcp_req_cur_offset(req);
933 size_t len = nvme_tcp_req_cur_length(req);
934 bool last = nvme_tcp_pdu_last_send(req, len);
935 int ret, flags = MSG_DONTWAIT;
936
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700937 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800938 flags |= MSG_EOR;
939 else
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700940 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800941
Coly Li7d4194a2020-10-02 16:27:30 +0800942 if (sendpage_ok(page)) {
943 ret = kernel_sendpage(queue->sock, page, offset, len,
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200944 flags);
945 } else {
Coly Li7d4194a2020-10-02 16:27:30 +0800946 ret = sock_no_sendpage(queue->sock, page, offset, len,
Mikhail Skorzhinskii37c15212019-07-08 12:31:29 +0200947 flags);
948 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800949 if (ret <= 0)
950 return ret;
951
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800952 if (queue->data_digest)
953 nvme_tcp_ddgst_update(queue->snd_hash, page,
954 offset, ret);
955
Sagi Grimberge371af02021-09-14 18:38:55 +0300956 /*
957 * update the request iterator except for the last payload send
958 * in the request where we don't want to modify it as we may
959 * compete with the RX path completing the request.
960 */
961 if (req->data_sent + ret < req->data_len)
962 nvme_tcp_advance_req(req, ret);
963
964 /* fully successful last send in current PDU */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800965 if (last && ret == len) {
966 if (queue->data_digest) {
967 nvme_tcp_ddgst_final(queue->snd_hash,
968 &req->ddgst);
969 req->state = NVME_TCP_SEND_DDGST;
970 req->offset = 0;
971 } else {
972 nvme_tcp_done_send_req(queue);
973 }
974 return 1;
975 }
976 }
977 return -EAGAIN;
978}
979
980static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
981{
982 struct nvme_tcp_queue *queue = req->queue;
983 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
984 bool inline_data = nvme_tcp_has_inline_data(req);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800985 u8 hdgst = nvme_tcp_hdgst_len(queue);
986 int len = sizeof(*pdu) + hdgst - req->offset;
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700987 int flags = MSG_DONTWAIT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800988 int ret;
989
Sagi Grimberg122e5b92020-06-18 17:30:24 -0700990 if (inline_data || nvme_tcp_queue_more(queue))
Sagi Grimberg5bb052d2020-05-04 22:20:01 -0700991 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
992 else
993 flags |= MSG_EOR;
994
Sagi Grimberg3f2304f2018-12-03 17:52:17 -0800995 if (queue->hdr_digest && !req->offset)
996 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
997
998 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
999 offset_in_page(pdu) + req->offset, len, flags);
1000 if (unlikely(ret <= 0))
1001 return ret;
1002
1003 len -= ret;
1004 if (!len) {
1005 if (inline_data) {
1006 req->state = NVME_TCP_SEND_DATA;
1007 if (queue->data_digest)
1008 crypto_ahash_init(queue->snd_hash);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001009 } else {
1010 nvme_tcp_done_send_req(queue);
1011 }
1012 return 1;
1013 }
1014 req->offset += ret;
1015
1016 return -EAGAIN;
1017}
1018
1019static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1020{
1021 struct nvme_tcp_queue *queue = req->queue;
1022 struct nvme_tcp_data_pdu *pdu = req->pdu;
1023 u8 hdgst = nvme_tcp_hdgst_len(queue);
1024 int len = sizeof(*pdu) - req->offset + hdgst;
1025 int ret;
1026
1027 if (queue->hdr_digest && !req->offset)
1028 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1029
1030 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1031 offset_in_page(pdu) + req->offset, len,
Sagi Grimberg5bb052d2020-05-04 22:20:01 -07001032 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001033 if (unlikely(ret <= 0))
1034 return ret;
1035
1036 len -= ret;
1037 if (!len) {
1038 req->state = NVME_TCP_SEND_DATA;
1039 if (queue->data_digest)
1040 crypto_ahash_init(queue->snd_hash);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001041 return 1;
1042 }
1043 req->offset += ret;
1044
1045 return -EAGAIN;
1046}
1047
1048static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1049{
1050 struct nvme_tcp_queue *queue = req->queue;
1051 int ret;
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001052 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001053 struct kvec iov = {
1054 .iov_base = &req->ddgst + req->offset,
1055 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1056 };
1057
Sagi Grimberg122e5b92020-06-18 17:30:24 -07001058 if (nvme_tcp_queue_more(queue))
1059 msg.msg_flags |= MSG_MORE;
1060 else
1061 msg.msg_flags |= MSG_EOR;
1062
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001063 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1064 if (unlikely(ret <= 0))
1065 return ret;
1066
1067 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1068 nvme_tcp_done_send_req(queue);
1069 return 1;
1070 }
1071
1072 req->offset += ret;
1073 return -EAGAIN;
1074}
1075
1076static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1077{
1078 struct nvme_tcp_request *req;
1079 int ret = 1;
1080
1081 if (!queue->request) {
1082 queue->request = nvme_tcp_fetch_request(queue);
1083 if (!queue->request)
1084 return 0;
1085 }
1086 req = queue->request;
1087
1088 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1089 ret = nvme_tcp_try_send_cmd_pdu(req);
1090 if (ret <= 0)
1091 goto done;
1092 if (!nvme_tcp_has_inline_data(req))
1093 return ret;
1094 }
1095
1096 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1097 ret = nvme_tcp_try_send_data_pdu(req);
1098 if (ret <= 0)
1099 goto done;
1100 }
1101
1102 if (req->state == NVME_TCP_SEND_DATA) {
1103 ret = nvme_tcp_try_send_data(req);
1104 if (ret <= 0)
1105 goto done;
1106 }
1107
1108 if (req->state == NVME_TCP_SEND_DDGST)
1109 ret = nvme_tcp_try_send_ddgst(req);
1110done:
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001111 if (ret == -EAGAIN) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001112 ret = 0;
Sagi Grimberg5ff4e112020-02-25 16:43:23 -08001113 } else if (ret < 0) {
1114 dev_err(queue->ctrl->ctrl.device,
1115 "failed to send request %d\n", ret);
1116 if (ret != -EPIPE && ret != -ECONNRESET)
1117 nvme_tcp_fail_request(queue->request);
1118 nvme_tcp_done_send_req(queue);
1119 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001120 return ret;
1121}
1122
1123static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1124{
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301125 struct socket *sock = queue->sock;
1126 struct sock *sk = sock->sk;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001127 read_descriptor_t rd_desc;
1128 int consumed;
1129
1130 rd_desc.arg.data = queue;
1131 rd_desc.count = 1;
1132 lock_sock(sk);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001133 queue->nr_cqe = 0;
Potnuri Bharat Teja10407ec2019-07-08 15:22:00 +05301134 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001135 release_sock(sk);
1136 return consumed;
1137}
1138
1139static void nvme_tcp_io_work(struct work_struct *w)
1140{
1141 struct nvme_tcp_queue *queue =
1142 container_of(w, struct nvme_tcp_queue, io_work);
Wunderlich, Markddef2952019-09-18 23:36:37 +00001143 unsigned long deadline = jiffies + msecs_to_jiffies(1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001144
1145 do {
1146 bool pending = false;
1147 int result;
1148
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001149 if (mutex_trylock(&queue->send_mutex)) {
1150 result = nvme_tcp_try_send(queue);
1151 mutex_unlock(&queue->send_mutex);
1152 if (result > 0)
1153 pending = true;
1154 else if (unlikely(result < 0))
1155 break;
Keith Busch70f437f2021-09-09 08:54:52 -07001156 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001157
1158 result = nvme_tcp_try_recv(queue);
1159 if (result > 0)
1160 pending = true;
Sagi Grimberg761ad262020-02-25 16:43:24 -08001161 else if (unlikely(result < 0))
Sagi Grimberg39d06079a2020-03-31 22:44:23 -07001162 return;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001163
1164 if (!pending)
1165 return;
1166
Wunderlich, Markddef2952019-09-18 23:36:37 +00001167 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001168
1169 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1170}
1171
1172static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1173{
1174 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1175
1176 ahash_request_free(queue->rcv_hash);
1177 ahash_request_free(queue->snd_hash);
1178 crypto_free_ahash(tfm);
1179}
1180
1181static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1182{
1183 struct crypto_ahash *tfm;
1184
1185 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1186 if (IS_ERR(tfm))
1187 return PTR_ERR(tfm);
1188
1189 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1190 if (!queue->snd_hash)
1191 goto free_tfm;
1192 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1193
1194 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1195 if (!queue->rcv_hash)
1196 goto free_snd_hash;
1197 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1198
1199 return 0;
1200free_snd_hash:
1201 ahash_request_free(queue->snd_hash);
1202free_tfm:
1203 crypto_free_ahash(tfm);
1204 return -ENOMEM;
1205}
1206
1207static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1208{
1209 struct nvme_tcp_request *async = &ctrl->async_req;
1210
1211 page_frag_free(async->pdu);
1212}
1213
1214static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1215{
1216 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1217 struct nvme_tcp_request *async = &ctrl->async_req;
1218 u8 hdgst = nvme_tcp_hdgst_len(queue);
1219
1220 async->pdu = page_frag_alloc(&queue->pf_cache,
1221 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1222 GFP_KERNEL | __GFP_ZERO);
1223 if (!async->pdu)
1224 return -ENOMEM;
1225
1226 async->queue = &ctrl->queues[0];
1227 return 0;
1228}
1229
1230static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1231{
1232 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1233 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1234
1235 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1236 return;
1237
1238 if (queue->hdr_digest || queue->data_digest)
1239 nvme_tcp_free_crypto(queue);
1240
1241 sock_release(queue->sock);
1242 kfree(queue->pdu);
Keith Buschd48f92c2021-08-06 08:41:43 -07001243 mutex_destroy(&queue->send_mutex);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001244 mutex_destroy(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001245}
1246
1247static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1248{
1249 struct nvme_tcp_icreq_pdu *icreq;
1250 struct nvme_tcp_icresp_pdu *icresp;
1251 struct msghdr msg = {};
1252 struct kvec iov;
1253 bool ctrl_hdgst, ctrl_ddgst;
1254 int ret;
1255
1256 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1257 if (!icreq)
1258 return -ENOMEM;
1259
1260 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1261 if (!icresp) {
1262 ret = -ENOMEM;
1263 goto free_icreq;
1264 }
1265
1266 icreq->hdr.type = nvme_tcp_icreq;
1267 icreq->hdr.hlen = sizeof(*icreq);
1268 icreq->hdr.pdo = 0;
1269 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1270 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1271 icreq->maxr2t = 0; /* single inflight r2t supported */
1272 icreq->hpda = 0; /* no alignment constraint */
1273 if (queue->hdr_digest)
1274 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1275 if (queue->data_digest)
1276 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1277
1278 iov.iov_base = icreq;
1279 iov.iov_len = sizeof(*icreq);
1280 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1281 if (ret < 0)
1282 goto free_icresp;
1283
1284 memset(&msg, 0, sizeof(msg));
1285 iov.iov_base = icresp;
1286 iov.iov_len = sizeof(*icresp);
1287 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1288 iov.iov_len, msg.msg_flags);
1289 if (ret < 0)
1290 goto free_icresp;
1291
1292 ret = -EINVAL;
1293 if (icresp->hdr.type != nvme_tcp_icresp) {
1294 pr_err("queue %d: bad type returned %d\n",
1295 nvme_tcp_queue_id(queue), icresp->hdr.type);
1296 goto free_icresp;
1297 }
1298
1299 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1300 pr_err("queue %d: bad pdu length returned %d\n",
1301 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1302 goto free_icresp;
1303 }
1304
1305 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1306 pr_err("queue %d: bad pfv returned %d\n",
1307 nvme_tcp_queue_id(queue), icresp->pfv);
1308 goto free_icresp;
1309 }
1310
1311 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1312 if ((queue->data_digest && !ctrl_ddgst) ||
1313 (!queue->data_digest && ctrl_ddgst)) {
1314 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1315 nvme_tcp_queue_id(queue),
1316 queue->data_digest ? "enabled" : "disabled",
1317 ctrl_ddgst ? "enabled" : "disabled");
1318 goto free_icresp;
1319 }
1320
1321 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1322 if ((queue->hdr_digest && !ctrl_hdgst) ||
1323 (!queue->hdr_digest && ctrl_hdgst)) {
1324 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1325 nvme_tcp_queue_id(queue),
1326 queue->hdr_digest ? "enabled" : "disabled",
1327 ctrl_hdgst ? "enabled" : "disabled");
1328 goto free_icresp;
1329 }
1330
1331 if (icresp->cpda != 0) {
1332 pr_err("queue %d: unsupported cpda returned %d\n",
1333 nvme_tcp_queue_id(queue), icresp->cpda);
1334 goto free_icresp;
1335 }
1336
1337 ret = 0;
1338free_icresp:
1339 kfree(icresp);
1340free_icreq:
1341 kfree(icreq);
1342 return ret;
1343}
1344
Sagi Grimberg40510a62020-02-25 15:53:09 -08001345static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1346{
1347 return nvme_tcp_queue_id(queue) == 0;
1348}
1349
1350static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1351{
1352 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1353 int qid = nvme_tcp_queue_id(queue);
1354
1355 return !nvme_tcp_admin_queue(queue) &&
1356 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1357}
1358
1359static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1360{
1361 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1362 int qid = nvme_tcp_queue_id(queue);
1363
1364 return !nvme_tcp_admin_queue(queue) &&
1365 !nvme_tcp_default_queue(queue) &&
1366 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1367 ctrl->io_queues[HCTX_TYPE_READ];
1368}
1369
1370static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1371{
1372 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1373 int qid = nvme_tcp_queue_id(queue);
1374
1375 return !nvme_tcp_admin_queue(queue) &&
1376 !nvme_tcp_default_queue(queue) &&
1377 !nvme_tcp_read_queue(queue) &&
1378 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1379 ctrl->io_queues[HCTX_TYPE_READ] +
1380 ctrl->io_queues[HCTX_TYPE_POLL];
1381}
1382
1383static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1384{
1385 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1386 int qid = nvme_tcp_queue_id(queue);
1387 int n = 0;
1388
1389 if (nvme_tcp_default_queue(queue))
1390 n = qid - 1;
1391 else if (nvme_tcp_read_queue(queue))
1392 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1393 else if (nvme_tcp_poll_queue(queue))
1394 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1395 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1396 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1397}
1398
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001399static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1400 int qid, size_t queue_size)
1401{
1402 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1403 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001404 int ret, rcv_pdu_size;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001405
Chao Leng9ebbfe42021-01-14 17:09:26 +08001406 mutex_init(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001407 queue->ctrl = ctrl;
Sagi Grimberg15ec9282020-06-18 17:30:22 -07001408 init_llist_head(&queue->req_list);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001409 INIT_LIST_HEAD(&queue->send_list);
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001410 mutex_init(&queue->send_mutex);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001411 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1412 queue->queue_size = queue_size;
1413
1414 if (qid > 0)
Israel Rukshin9924b032019-08-18 12:08:53 +03001415 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001416 else
1417 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1418 NVME_TCP_ADMIN_CCSZ;
1419
1420 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1421 IPPROTO_TCP, &queue->sock);
1422 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001423 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001424 "failed to create socket: %d\n", ret);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001425 goto err_destroy_mutex;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001426 }
1427
1428 /* Single syn retry */
Christoph Hellwig557eadf2020-05-28 07:12:21 +02001429 tcp_sock_set_syncnt(queue->sock->sk, 1);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001430
1431 /* Set TCP no delay */
Christoph Hellwig12abc5e2020-05-28 07:12:19 +02001432 tcp_sock_set_nodelay(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001433
1434 /*
1435 * Cleanup whatever is sitting in the TCP transmit queue on socket
1436 * close. This is done to prevent stale data from being sent should
1437 * the network connection be restored before TCP times out.
1438 */
Christoph Hellwigc4335942020-05-28 07:12:10 +02001439 sock_no_linger(queue->sock->sk);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001440
Christoph Hellwig6e434962020-05-28 07:12:11 +02001441 if (so_priority > 0)
1442 sock_set_priority(queue->sock->sk, so_priority);
Wunderlich, Mark9912ade2020-01-16 00:46:12 +00001443
Israel Rukshinbb139852019-08-18 12:08:54 +03001444 /* Set socket type of service */
Christoph Hellwig6ebf71b2020-05-28 07:12:26 +02001445 if (nctrl->opts->tos >= 0)
1446 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
Israel Rukshinbb139852019-08-18 12:08:54 +03001447
Sagi Grimbergadc99fd2020-07-23 16:42:26 -07001448 /* Set 10 seconds timeout for icresp recvmsg */
1449 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1450
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001451 queue->sock->sk->sk_allocation = GFP_ATOMIC;
Sagi Grimberg40510a62020-02-25 15:53:09 -08001452 nvme_tcp_set_queue_io_cpu(queue);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001453 queue->request = NULL;
1454 queue->data_remaining = 0;
1455 queue->ddgst_remaining = 0;
1456 queue->pdu_remaining = 0;
1457 queue->pdu_offset = 0;
1458 sk_set_memalloc(queue->sock->sk);
1459
Israel Rukshin9924b032019-08-18 12:08:53 +03001460 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001461 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1462 sizeof(ctrl->src_addr));
1463 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001464 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001465 "failed to bind queue %d socket %d\n",
1466 qid, ret);
1467 goto err_sock;
1468 }
1469 }
1470
Martin Belanger3ede8f72021-05-20 15:09:34 -04001471 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1472 char *iface = nctrl->opts->host_iface;
1473 sockptr_t optval = KERNEL_SOCKPTR(iface);
1474
1475 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1476 optval, strlen(iface));
1477 if (ret) {
1478 dev_err(nctrl->device,
1479 "failed to bind to interface %s queue %d err %d\n",
1480 iface, qid, ret);
1481 goto err_sock;
1482 }
1483 }
1484
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001485 queue->hdr_digest = nctrl->opts->hdr_digest;
1486 queue->data_digest = nctrl->opts->data_digest;
1487 if (queue->hdr_digest || queue->data_digest) {
1488 ret = nvme_tcp_alloc_crypto(queue);
1489 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001490 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001491 "failed to allocate queue %d crypto\n", qid);
1492 goto err_sock;
1493 }
1494 }
1495
1496 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1497 nvme_tcp_hdgst_len(queue);
1498 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1499 if (!queue->pdu) {
1500 ret = -ENOMEM;
1501 goto err_crypto;
1502 }
1503
Israel Rukshin9924b032019-08-18 12:08:53 +03001504 dev_dbg(nctrl->device, "connecting queue %d\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001505 nvme_tcp_queue_id(queue));
1506
1507 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1508 sizeof(ctrl->addr), 0);
1509 if (ret) {
Israel Rukshin9924b032019-08-18 12:08:53 +03001510 dev_err(nctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001511 "failed to connect socket: %d\n", ret);
1512 goto err_rcv_pdu;
1513 }
1514
1515 ret = nvme_tcp_init_connection(queue);
1516 if (ret)
1517 goto err_init_connect;
1518
1519 queue->rd_enabled = true;
1520 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1521 nvme_tcp_init_recv_ctx(queue);
1522
1523 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1524 queue->sock->sk->sk_user_data = queue;
1525 queue->state_change = queue->sock->sk->sk_state_change;
1526 queue->data_ready = queue->sock->sk->sk_data_ready;
1527 queue->write_space = queue->sock->sk->sk_write_space;
1528 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1529 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1530 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001531#ifdef CONFIG_NET_RX_BUSY_POLL
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001532 queue->sock->sk->sk_ll_usec = 1;
Sebastian Andrzej Siewiorac1c4e12019-10-10 17:34:12 +02001533#endif
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001534 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1535
1536 return 0;
1537
1538err_init_connect:
1539 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1540err_rcv_pdu:
1541 kfree(queue->pdu);
1542err_crypto:
1543 if (queue->hdr_digest || queue->data_digest)
1544 nvme_tcp_free_crypto(queue);
1545err_sock:
1546 sock_release(queue->sock);
1547 queue->sock = NULL;
Chao Leng9ebbfe42021-01-14 17:09:26 +08001548err_destroy_mutex:
Keith Buschd48f92c2021-08-06 08:41:43 -07001549 mutex_destroy(&queue->send_mutex);
Chao Leng9ebbfe42021-01-14 17:09:26 +08001550 mutex_destroy(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001551 return ret;
1552}
1553
1554static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1555{
1556 struct socket *sock = queue->sock;
1557
1558 write_lock_bh(&sock->sk->sk_callback_lock);
1559 sock->sk->sk_user_data = NULL;
1560 sock->sk->sk_data_ready = queue->data_ready;
1561 sock->sk->sk_state_change = queue->state_change;
1562 sock->sk->sk_write_space = queue->write_space;
1563 write_unlock_bh(&sock->sk->sk_callback_lock);
1564}
1565
1566static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1567{
1568 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1569 nvme_tcp_restore_sock_calls(queue);
1570 cancel_work_sync(&queue->io_work);
1571}
1572
1573static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1574{
1575 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1576 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1577
Chao Leng9ebbfe42021-01-14 17:09:26 +08001578 mutex_lock(&queue->queue_lock);
1579 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1580 __nvme_tcp_stop_queue(queue);
1581 mutex_unlock(&queue->queue_lock);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001582}
1583
1584static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1585{
1586 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1587 int ret;
1588
1589 if (idx)
Keith Buschbe42a332021-06-10 14:44:35 -07001590 ret = nvmf_connect_io_queue(nctrl, idx);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001591 else
1592 ret = nvmf_connect_admin_queue(nctrl);
1593
1594 if (!ret) {
1595 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1596 } else {
Sagi Grimbergf34e2582019-04-29 16:25:48 -07001597 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1598 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001599 dev_err(nctrl->device,
1600 "failed to connect queue: %d ret=%d\n", idx, ret);
1601 }
1602 return ret;
1603}
1604
1605static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1606 bool admin)
1607{
1608 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1609 struct blk_mq_tag_set *set;
1610 int ret;
1611
1612 if (admin) {
1613 set = &ctrl->admin_tag_set;
1614 memset(set, 0, sizeof(*set));
1615 set->ops = &nvme_tcp_admin_mq_ops;
1616 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
Christoph Hellwiged01fee2021-03-03 13:28:22 +01001617 set->reserved_tags = NVMF_RESERVED_TAGS;
Max Gurtovoy610c8232020-06-16 12:34:24 +03001618 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001619 set->flags = BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001620 set->cmd_size = sizeof(struct nvme_tcp_request);
1621 set->driver_data = ctrl;
1622 set->nr_hw_queues = 1;
Chaitanya Kulkarnidc96f932020-11-09 16:33:45 -08001623 set->timeout = NVME_ADMIN_TIMEOUT;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001624 } else {
1625 set = &ctrl->tag_set;
1626 memset(set, 0, sizeof(*set));
1627 set->ops = &nvme_tcp_mq_ops;
1628 set->queue_depth = nctrl->sqsize + 1;
Christoph Hellwiged01fee2021-03-03 13:28:22 +01001629 set->reserved_tags = NVMF_RESERVED_TAGS;
Max Gurtovoy610c8232020-06-16 12:34:24 +03001630 set->numa_node = nctrl->numa_node;
Sagi Grimbergdb5ad6b2020-05-01 14:25:45 -07001631 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001632 set->cmd_size = sizeof(struct nvme_tcp_request);
1633 set->driver_data = ctrl;
1634 set->nr_hw_queues = nctrl->queue_count - 1;
1635 set->timeout = NVME_IO_TIMEOUT;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001636 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001637 }
1638
1639 ret = blk_mq_alloc_tag_set(set);
1640 if (ret)
1641 return ERR_PTR(ret);
1642
1643 return set;
1644}
1645
1646static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1647{
1648 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
David Milburnceb1e082020-09-02 17:42:53 -05001649 cancel_work_sync(&ctrl->async_event_work);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001650 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1651 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1652 }
1653
1654 nvme_tcp_free_queue(ctrl, 0);
1655}
1656
1657static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1658{
1659 int i;
1660
1661 for (i = 1; i < ctrl->queue_count; i++)
1662 nvme_tcp_free_queue(ctrl, i);
1663}
1664
1665static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1666{
1667 int i;
1668
1669 for (i = 1; i < ctrl->queue_count; i++)
1670 nvme_tcp_stop_queue(ctrl, i);
1671}
1672
1673static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1674{
1675 int i, ret = 0;
1676
1677 for (i = 1; i < ctrl->queue_count; i++) {
1678 ret = nvme_tcp_start_queue(ctrl, i);
1679 if (ret)
1680 goto out_stop_queues;
1681 }
1682
1683 return 0;
1684
1685out_stop_queues:
1686 for (i--; i >= 1; i--)
1687 nvme_tcp_stop_queue(ctrl, i);
1688 return ret;
1689}
1690
1691static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1692{
1693 int ret;
1694
1695 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1696 if (ret)
1697 return ret;
1698
1699 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1700 if (ret)
1701 goto out_free_queue;
1702
1703 return 0;
1704
1705out_free_queue:
1706 nvme_tcp_free_queue(ctrl, 0);
1707 return ret;
1708}
1709
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001710static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001711{
1712 int i, ret;
1713
1714 for (i = 1; i < ctrl->queue_count; i++) {
1715 ret = nvme_tcp_alloc_queue(ctrl, i,
1716 ctrl->sqsize + 1);
1717 if (ret)
1718 goto out_free_queues;
1719 }
1720
1721 return 0;
1722
1723out_free_queues:
1724 for (i--; i >= 1; i--)
1725 nvme_tcp_free_queue(ctrl, i);
1726
1727 return ret;
1728}
1729
1730static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1731{
Sagi Grimberg873946f2018-12-11 23:38:57 -08001732 unsigned int nr_io_queues;
1733
1734 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1735 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001736 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
Sagi Grimberg873946f2018-12-11 23:38:57 -08001737
1738 return nr_io_queues;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001739}
1740
Sagi Grimberg64861992019-05-28 22:49:05 -07001741static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1742 unsigned int nr_io_queues)
1743{
1744 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1745 struct nvmf_ctrl_options *opts = nctrl->opts;
1746
1747 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1748 /*
1749 * separate read/write queues
1750 * hand out dedicated default queues only after we have
1751 * sufficient read queues.
1752 */
1753 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1754 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1755 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1756 min(opts->nr_write_queues, nr_io_queues);
1757 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1758 } else {
1759 /*
1760 * shared read/write queues
1761 * either no write queues were requested, or we don't have
1762 * sufficient queue count to have dedicated default queues.
1763 */
1764 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1765 min(opts->nr_io_queues, nr_io_queues);
1766 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1767 }
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07001768
1769 if (opts->nr_poll_queues && nr_io_queues) {
1770 /* map dedicated poll queues only if we have queues left */
1771 ctrl->io_queues[HCTX_TYPE_POLL] =
1772 min(opts->nr_poll_queues, nr_io_queues);
1773 }
Sagi Grimberg64861992019-05-28 22:49:05 -07001774}
1775
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001776static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001777{
1778 unsigned int nr_io_queues;
1779 int ret;
1780
1781 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1782 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1783 if (ret)
1784 return ret;
1785
Ruozhu Li664227f2021-08-07 11:50:23 +08001786 if (nr_io_queues == 0) {
Sagi Grimberg72f57242021-03-15 14:04:26 -07001787 dev_err(ctrl->device,
1788 "unable to set any I/O queues\n");
1789 return -ENOMEM;
1790 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001791
Ruozhu Li664227f2021-08-07 11:50:23 +08001792 ctrl->queue_count = nr_io_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001793 dev_info(ctrl->device,
1794 "creating %d I/O queues.\n", nr_io_queues);
1795
Sagi Grimberg64861992019-05-28 22:49:05 -07001796 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1797
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001798 return __nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001799}
1800
1801static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1802{
1803 nvme_tcp_stop_io_queues(ctrl);
1804 if (remove) {
Sagi Grimberge85037a2018-12-31 23:58:30 -08001805 blk_cleanup_queue(ctrl->connect_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001806 blk_mq_free_tag_set(ctrl->tagset);
1807 }
1808 nvme_tcp_free_io_queues(ctrl);
1809}
1810
1811static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1812{
1813 int ret;
1814
Sagi Grimbergefb973b2019-04-24 11:53:19 -07001815 ret = nvme_tcp_alloc_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001816 if (ret)
1817 return ret;
1818
1819 if (new) {
1820 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1821 if (IS_ERR(ctrl->tagset)) {
1822 ret = PTR_ERR(ctrl->tagset);
1823 goto out_free_io_queues;
1824 }
1825
Sagi Grimberge85037a2018-12-31 23:58:30 -08001826 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1827 if (IS_ERR(ctrl->connect_q)) {
1828 ret = PTR_ERR(ctrl->connect_q);
1829 goto out_free_tag_set;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001830 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001831 }
1832
1833 ret = nvme_tcp_start_io_queues(ctrl);
1834 if (ret)
1835 goto out_cleanup_connect_q;
1836
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001837 if (!new) {
1838 nvme_start_queues(ctrl);
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001839 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1840 /*
1841 * If we timed out waiting for freeze we are likely to
1842 * be stuck. Fail the controller initialization just
1843 * to be safe.
1844 */
1845 ret = -ENODEV;
1846 goto out_wait_freeze_timed_out;
1847 }
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001848 blk_mq_update_nr_hw_queues(ctrl->tagset,
1849 ctrl->queue_count - 1);
1850 nvme_unfreeze(ctrl);
1851 }
1852
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001853 return 0;
1854
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001855out_wait_freeze_timed_out:
1856 nvme_stop_queues(ctrl);
Chao Leng70a99572021-01-21 11:32:38 +08001857 nvme_sync_io_queues(ctrl);
Sagi Grimberge5c01f42020-07-30 13:25:34 -07001858 nvme_tcp_stop_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001859out_cleanup_connect_q:
Chao Leng70a99572021-01-21 11:32:38 +08001860 nvme_cancel_tagset(ctrl);
Sagi Grimberge85037a2018-12-31 23:58:30 -08001861 if (new)
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001862 blk_cleanup_queue(ctrl->connect_q);
1863out_free_tag_set:
1864 if (new)
1865 blk_mq_free_tag_set(ctrl->tagset);
1866out_free_io_queues:
1867 nvme_tcp_free_io_queues(ctrl);
1868 return ret;
1869}
1870
1871static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1872{
1873 nvme_tcp_stop_queue(ctrl, 0);
1874 if (remove) {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001875 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001876 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001877 blk_mq_free_tag_set(ctrl->admin_tagset);
1878 }
1879 nvme_tcp_free_admin_queue(ctrl);
1880}
1881
1882static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1883{
1884 int error;
1885
1886 error = nvme_tcp_alloc_admin_queue(ctrl);
1887 if (error)
1888 return error;
1889
1890 if (new) {
1891 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1892 if (IS_ERR(ctrl->admin_tagset)) {
1893 error = PTR_ERR(ctrl->admin_tagset);
1894 goto out_free_queue;
1895 }
1896
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001897 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1898 if (IS_ERR(ctrl->fabrics_q)) {
1899 error = PTR_ERR(ctrl->fabrics_q);
1900 goto out_free_tagset;
1901 }
1902
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001903 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1904 if (IS_ERR(ctrl->admin_q)) {
1905 error = PTR_ERR(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001906 goto out_cleanup_fabrics_q;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001907 }
1908 }
1909
1910 error = nvme_tcp_start_queue(ctrl, 0);
1911 if (error)
1912 goto out_cleanup_queue;
1913
Sagi Grimbergc0f2f452019-07-22 17:06:53 -07001914 error = nvme_enable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001915 if (error)
1916 goto out_stop_queue;
1917
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001918 blk_mq_unquiesce_queue(ctrl->admin_q);
1919
Chaitanya Kulkarnif21c47692021-02-28 18:06:04 -08001920 error = nvme_init_ctrl_finish(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001921 if (error)
Chao Leng70a99572021-01-21 11:32:38 +08001922 goto out_quiesce_queue;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001923
1924 return 0;
1925
Chao Leng70a99572021-01-21 11:32:38 +08001926out_quiesce_queue:
1927 blk_mq_quiesce_queue(ctrl->admin_q);
1928 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001929out_stop_queue:
1930 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng70a99572021-01-21 11:32:38 +08001931 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001932out_cleanup_queue:
1933 if (new)
1934 blk_cleanup_queue(ctrl->admin_q);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001935out_cleanup_fabrics_q:
1936 if (new)
1937 blk_cleanup_queue(ctrl->fabrics_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001938out_free_tagset:
1939 if (new)
1940 blk_mq_free_tag_set(ctrl->admin_tagset);
1941out_free_queue:
1942 nvme_tcp_free_admin_queue(ctrl);
1943 return error;
1944}
1945
1946static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1947 bool remove)
1948{
1949 blk_mq_quiesce_queue(ctrl->admin_q);
Chao Lengd6f66212020-10-22 10:15:15 +08001950 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001951 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng563c8152021-01-21 11:32:40 +08001952 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07001953 if (remove)
1954 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001955 nvme_tcp_destroy_admin_queue(ctrl, remove);
1956}
1957
1958static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1959 bool remove)
1960{
1961 if (ctrl->queue_count <= 1)
Chao Lengd6f66212020-10-22 10:15:15 +08001962 return;
Sagi Grimbergd4d61472020-08-05 18:13:48 -07001963 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg2875b0a2020-07-24 15:10:12 -07001964 nvme_start_freeze(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001965 nvme_stop_queues(ctrl);
Chao Lengd6f66212020-10-22 10:15:15 +08001966 nvme_sync_io_queues(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001967 nvme_tcp_stop_io_queues(ctrl);
Chao Leng563c8152021-01-21 11:32:40 +08001968 nvme_cancel_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001969 if (remove)
1970 nvme_start_queues(ctrl);
1971 nvme_tcp_destroy_io_queues(ctrl, remove);
1972}
1973
1974static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1975{
1976 /* If we are resetting/deleting then do nothing */
1977 if (ctrl->state != NVME_CTRL_CONNECTING) {
1978 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1979 ctrl->state == NVME_CTRL_LIVE);
1980 return;
1981 }
1982
1983 if (nvmf_should_reconnect(ctrl)) {
1984 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1985 ctrl->opts->reconnect_delay);
1986 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1987 ctrl->opts->reconnect_delay * HZ);
1988 } else {
1989 dev_info(ctrl->device, "Removing controller...\n");
1990 nvme_delete_ctrl(ctrl);
1991 }
1992}
1993
1994static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1995{
1996 struct nvmf_ctrl_options *opts = ctrl->opts;
Colin Ian King312910f2019-09-05 15:34:35 +01001997 int ret;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08001998
1999 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2000 if (ret)
2001 return ret;
2002
2003 if (ctrl->icdoff) {
Dan Carpenter522af602021-06-05 15:48:16 +03002004 ret = -EOPNOTSUPP;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002005 dev_err(ctrl->device, "icdoff is not supported!\n");
2006 goto destroy_admin;
2007 }
2008
Chaitanya Kulkarni3b540642021-06-09 18:28:26 -07002009 if (!nvme_ctrl_sgl_supported(ctrl)) {
Dan Carpenter522af602021-06-05 15:48:16 +03002010 ret = -EOPNOTSUPP;
Max Gurtovoy73ffcef2021-03-30 23:01:19 +00002011 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2012 goto destroy_admin;
2013 }
2014
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002015 if (opts->queue_size > ctrl->sqsize + 1)
2016 dev_warn(ctrl->device,
2017 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2018 opts->queue_size, ctrl->sqsize + 1);
2019
2020 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2021 dev_warn(ctrl->device,
2022 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2023 ctrl->sqsize + 1, ctrl->maxcmd);
2024 ctrl->sqsize = ctrl->maxcmd - 1;
2025 }
2026
2027 if (ctrl->queue_count > 1) {
2028 ret = nvme_tcp_configure_io_queues(ctrl, new);
2029 if (ret)
2030 goto destroy_admin;
2031 }
2032
2033 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
Israel Rukshinbea54ef2020-03-24 17:29:45 +02002034 /*
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002035 * state change failure is ok if we started ctrl delete,
Israel Rukshinbea54ef2020-03-24 17:29:45 +02002036 * unless we're during creation of a new controller to
2037 * avoid races with teardown flow.
2038 */
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002039 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2040 ctrl->state != NVME_CTRL_DELETING_NOIO);
Israel Rukshinbea54ef2020-03-24 17:29:45 +02002041 WARN_ON_ONCE(new);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002042 ret = -EINVAL;
2043 goto destroy_io;
2044 }
2045
2046 nvme_start_ctrl(ctrl);
2047 return 0;
2048
2049destroy_io:
Chao Leng70a99572021-01-21 11:32:38 +08002050 if (ctrl->queue_count > 1) {
2051 nvme_stop_queues(ctrl);
2052 nvme_sync_io_queues(ctrl);
2053 nvme_tcp_stop_io_queues(ctrl);
2054 nvme_cancel_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002055 nvme_tcp_destroy_io_queues(ctrl, new);
Chao Leng70a99572021-01-21 11:32:38 +08002056 }
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002057destroy_admin:
Chao Leng70a99572021-01-21 11:32:38 +08002058 blk_mq_quiesce_queue(ctrl->admin_q);
2059 blk_sync_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002060 nvme_tcp_stop_queue(ctrl, 0);
Chao Leng70a99572021-01-21 11:32:38 +08002061 nvme_cancel_admin_tagset(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002062 nvme_tcp_destroy_admin_queue(ctrl, new);
2063 return ret;
2064}
2065
2066static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2067{
2068 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2069 struct nvme_tcp_ctrl, connect_work);
2070 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2071
2072 ++ctrl->nr_reconnects;
2073
2074 if (nvme_tcp_setup_ctrl(ctrl, false))
2075 goto requeue;
2076
Colin Ian King56a77d22018-12-14 11:42:43 +00002077 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002078 ctrl->nr_reconnects);
2079
2080 ctrl->nr_reconnects = 0;
2081
2082 return;
2083
2084requeue:
2085 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2086 ctrl->nr_reconnects);
2087 nvme_tcp_reconnect_or_remove(ctrl);
2088}
2089
2090static void nvme_tcp_error_recovery_work(struct work_struct *work)
2091{
2092 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2093 struct nvme_tcp_ctrl, err_work);
2094 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2095
2096 nvme_stop_keep_alive(ctrl);
2097 nvme_tcp_teardown_io_queues(ctrl, false);
2098 /* unquiesce to fail fast pending requests */
2099 nvme_start_queues(ctrl);
2100 nvme_tcp_teardown_admin_queue(ctrl, false);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002101 blk_mq_unquiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002102
2103 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002104 /* state change failure is ok if we started ctrl delete */
2105 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2106 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002107 return;
2108 }
2109
2110 nvme_tcp_reconnect_or_remove(ctrl);
2111}
2112
2113static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2114{
Sagi Grimberg794a4cb2019-01-01 00:19:30 -08002115 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2116 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2117
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002118 nvme_tcp_teardown_io_queues(ctrl, shutdown);
Sagi Grimberge7832cb2019-08-02 19:33:59 -07002119 blk_mq_quiesce_queue(ctrl->admin_q);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002120 if (shutdown)
2121 nvme_shutdown_ctrl(ctrl);
2122 else
Sagi Grimbergb5b05042019-07-22 17:06:54 -07002123 nvme_disable_ctrl(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002124 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2125}
2126
2127static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2128{
2129 nvme_tcp_teardown_ctrl(ctrl, true);
2130}
2131
2132static void nvme_reset_ctrl_work(struct work_struct *work)
2133{
2134 struct nvme_ctrl *ctrl =
2135 container_of(work, struct nvme_ctrl, reset_work);
2136
2137 nvme_stop_ctrl(ctrl);
2138 nvme_tcp_teardown_ctrl(ctrl, false);
2139
2140 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
Sagi Grimbergecca390e2020-07-22 16:32:19 -07002141 /* state change failure is ok if we started ctrl delete */
2142 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2143 ctrl->state != NVME_CTRL_DELETING_NOIO);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002144 return;
2145 }
2146
2147 if (nvme_tcp_setup_ctrl(ctrl, false))
2148 goto out_fail;
2149
2150 return;
2151
2152out_fail:
2153 ++ctrl->nr_reconnects;
2154 nvme_tcp_reconnect_or_remove(ctrl);
2155}
2156
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002157static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2158{
2159 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2160
2161 if (list_empty(&ctrl->list))
2162 goto free_ctrl;
2163
2164 mutex_lock(&nvme_tcp_ctrl_mutex);
2165 list_del(&ctrl->list);
2166 mutex_unlock(&nvme_tcp_ctrl_mutex);
2167
2168 nvmf_free_options(nctrl->opts);
2169free_ctrl:
2170 kfree(ctrl->queues);
2171 kfree(ctrl);
2172}
2173
2174static void nvme_tcp_set_sg_null(struct nvme_command *c)
2175{
2176 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2177
2178 sg->addr = 0;
2179 sg->length = 0;
2180 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2181 NVME_SGL_FMT_TRANSPORT_A;
2182}
2183
2184static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2185 struct nvme_command *c, u32 data_len)
2186{
2187 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2188
2189 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2190 sg->length = cpu_to_le32(data_len);
2191 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2192}
2193
2194static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2195 u32 data_len)
2196{
2197 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2198
2199 sg->addr = 0;
2200 sg->length = cpu_to_le32(data_len);
2201 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2202 NVME_SGL_FMT_TRANSPORT_A;
2203}
2204
2205static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2206{
2207 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2208 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2209 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2210 struct nvme_command *cmd = &pdu->cmd;
2211 u8 hdgst = nvme_tcp_hdgst_len(queue);
2212
2213 memset(pdu, 0, sizeof(*pdu));
2214 pdu->hdr.type = nvme_tcp_cmd;
2215 if (queue->hdr_digest)
2216 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2217 pdu->hdr.hlen = sizeof(*pdu);
2218 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2219
2220 cmd->common.opcode = nvme_admin_async_event;
2221 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2222 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2223 nvme_tcp_set_sg_null(cmd);
2224
2225 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2226 ctrl->async_req.offset = 0;
2227 ctrl->async_req.curr_bio = NULL;
2228 ctrl->async_req.data_len = 0;
2229
Sagi Grimberg86f03482020-06-18 17:30:23 -07002230 nvme_tcp_queue_request(&ctrl->async_req, true, true);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002231}
2232
Sagi Grimberg236187c2020-07-28 13:16:36 -07002233static void nvme_tcp_complete_timed_out(struct request *rq)
2234{
2235 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2236 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2237
Sagi Grimberg236187c2020-07-28 13:16:36 -07002238 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
Sagi Grimberg0a8a2c852020-10-22 10:15:31 +08002239 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
Sagi Grimberg236187c2020-07-28 13:16:36 -07002240 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2241 blk_mq_complete_request(rq);
2242 }
Sagi Grimberg236187c2020-07-28 13:16:36 -07002243}
2244
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002245static enum blk_eh_timer_return
2246nvme_tcp_timeout(struct request *rq, bool reserved)
2247{
2248 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
Sagi Grimberg236187c2020-07-28 13:16:36 -07002249 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002250 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2251
Sagi Grimberg236187c2020-07-28 13:16:36 -07002252 dev_warn(ctrl->device,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002253 "queue %d: timeout request %#x type %d\n",
Sagi Grimberg39d57752019-01-08 01:01:30 -08002254 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002255
Sagi Grimberg236187c2020-07-28 13:16:36 -07002256 if (ctrl->state != NVME_CTRL_LIVE) {
Sagi Grimberg39d57752019-01-08 01:01:30 -08002257 /*
Sagi Grimberg236187c2020-07-28 13:16:36 -07002258 * If we are resetting, connecting or deleting we should
2259 * complete immediately because we may block controller
2260 * teardown or setup sequence
2261 * - ctrl disable/shutdown fabrics requests
2262 * - connect requests
2263 * - initialization admin requests
2264 * - I/O requests that entered after unquiescing and
2265 * the controller stopped responding
2266 *
2267 * All other requests should be cancelled by the error
2268 * recovery work, so it's fine that we fail it here.
Sagi Grimberg39d57752019-01-08 01:01:30 -08002269 */
Sagi Grimberg236187c2020-07-28 13:16:36 -07002270 nvme_tcp_complete_timed_out(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002271 return BLK_EH_DONE;
2272 }
2273
Sagi Grimberg236187c2020-07-28 13:16:36 -07002274 /*
2275 * LIVE state should trigger the normal error recovery which will
2276 * handle completing this request.
2277 */
2278 nvme_tcp_error_recovery(ctrl);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002279 return BLK_EH_RESET_TIMER;
2280}
2281
2282static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2283 struct request *rq)
2284{
2285 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2286 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2287 struct nvme_command *c = &pdu->cmd;
2288
2289 c->common.flags |= NVME_CMD_SGL_METABUF;
2290
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002291 if (!blk_rq_nr_phys_segments(rq))
2292 nvme_tcp_set_sg_null(c);
2293 else if (rq_data_dir(rq) == WRITE &&
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002294 req->data_len <= nvme_tcp_inline_data_size(queue))
2295 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2296 else
2297 nvme_tcp_set_sg_host_data(c, req->data_len);
2298
2299 return 0;
2300}
2301
2302static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2303 struct request *rq)
2304{
2305 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2306 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2307 struct nvme_tcp_queue *queue = req->queue;
2308 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2309 blk_status_t ret;
2310
Keith Buschf4b9e6c2021-03-17 13:37:03 -07002311 ret = nvme_setup_cmd(ns, rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002312 if (ret)
2313 return ret;
2314
2315 req->state = NVME_TCP_SEND_CMD_PDU;
Daniel Wagner1ba2e502021-08-30 15:36:26 +02002316 req->status = cpu_to_le16(NVME_SC_SUCCESS);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002317 req->offset = 0;
2318 req->data_sent = 0;
2319 req->pdu_len = 0;
2320 req->pdu_sent = 0;
Sagi Grimberg25e5cb72020-03-23 15:06:30 -07002321 req->data_len = blk_rq_nr_phys_segments(rq) ?
2322 blk_rq_payload_bytes(rq) : 0;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002323 req->curr_bio = rq->bio;
Sagi Grimberge11e5112021-02-10 14:04:00 -08002324 if (req->curr_bio && req->data_len)
Sagi Grimbergcb9b8702021-01-14 13:15:24 -08002325 nvme_tcp_init_iter(req, rq_data_dir(rq));
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002326
2327 if (rq_data_dir(rq) == WRITE &&
2328 req->data_len <= nvme_tcp_inline_data_size(queue))
2329 req->pdu_len = req->data_len;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002330
2331 pdu->hdr.type = nvme_tcp_cmd;
2332 pdu->hdr.flags = 0;
2333 if (queue->hdr_digest)
2334 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2335 if (queue->data_digest && req->pdu_len) {
2336 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2337 ddgst = nvme_tcp_ddgst_len(queue);
2338 }
2339 pdu->hdr.hlen = sizeof(*pdu);
2340 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2341 pdu->hdr.plen =
2342 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2343
2344 ret = nvme_tcp_map_data(queue, rq);
2345 if (unlikely(ret)) {
Max Gurtovoy28a4cac2019-10-13 19:57:38 +03002346 nvme_cleanup_cmd(rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002347 dev_err(queue->ctrl->ctrl.device,
2348 "Failed to map data (%d)\n", ret);
2349 return ret;
2350 }
2351
2352 return 0;
2353}
2354
Sagi Grimberg86f03482020-06-18 17:30:23 -07002355static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2356{
2357 struct nvme_tcp_queue *queue = hctx->driver_data;
2358
2359 if (!llist_empty(&queue->req_list))
2360 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2361}
2362
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002363static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2364 const struct blk_mq_queue_data *bd)
2365{
2366 struct nvme_ns *ns = hctx->queue->queuedata;
2367 struct nvme_tcp_queue *queue = hctx->driver_data;
2368 struct request *rq = bd->rq;
2369 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2370 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2371 blk_status_t ret;
2372
Tao Chiua9715742021-04-26 10:53:10 +08002373 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2374 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002375
2376 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2377 if (unlikely(ret))
2378 return ret;
2379
2380 blk_mq_start_request(rq);
2381
Sagi Grimberg86f03482020-06-18 17:30:23 -07002382 nvme_tcp_queue_request(req, true, bd->last);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002383
2384 return BLK_STS_OK;
2385}
2386
Sagi Grimberg873946f2018-12-11 23:38:57 -08002387static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2388{
2389 struct nvme_tcp_ctrl *ctrl = set->driver_data;
Sagi Grimberg64861992019-05-28 22:49:05 -07002390 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
Sagi Grimberg873946f2018-12-11 23:38:57 -08002391
Sagi Grimberg64861992019-05-28 22:49:05 -07002392 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
Sagi Grimberg873946f2018-12-11 23:38:57 -08002393 /* separate read/write queues */
2394 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002395 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2396 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2397 set->map[HCTX_TYPE_READ].nr_queues =
2398 ctrl->io_queues[HCTX_TYPE_READ];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002399 set->map[HCTX_TYPE_READ].queue_offset =
Sagi Grimberg64861992019-05-28 22:49:05 -07002400 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002401 } else {
Sagi Grimberg64861992019-05-28 22:49:05 -07002402 /* shared read/write queues */
Sagi Grimberg873946f2018-12-11 23:38:57 -08002403 set->map[HCTX_TYPE_DEFAULT].nr_queues =
Sagi Grimberg64861992019-05-28 22:49:05 -07002404 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2405 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2406 set->map[HCTX_TYPE_READ].nr_queues =
2407 ctrl->io_queues[HCTX_TYPE_DEFAULT];
Sagi Grimberg873946f2018-12-11 23:38:57 -08002408 set->map[HCTX_TYPE_READ].queue_offset = 0;
2409 }
2410 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2411 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002412
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002413 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2414 /* map dedicated poll queues only if we have queues left */
2415 set->map[HCTX_TYPE_POLL].nr_queues =
2416 ctrl->io_queues[HCTX_TYPE_POLL];
2417 set->map[HCTX_TYPE_POLL].queue_offset =
2418 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2419 ctrl->io_queues[HCTX_TYPE_READ];
2420 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2421 }
2422
Sagi Grimberg64861992019-05-28 22:49:05 -07002423 dev_info(ctrl->ctrl.device,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002424 "mapped %d/%d/%d default/read/poll queues.\n",
Sagi Grimberg64861992019-05-28 22:49:05 -07002425 ctrl->io_queues[HCTX_TYPE_DEFAULT],
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002426 ctrl->io_queues[HCTX_TYPE_READ],
2427 ctrl->io_queues[HCTX_TYPE_POLL]);
Sagi Grimberg64861992019-05-28 22:49:05 -07002428
Sagi Grimberg873946f2018-12-11 23:38:57 -08002429 return 0;
2430}
2431
Jens Axboe5a72e892021-10-12 09:24:29 -06002432static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002433{
2434 struct nvme_tcp_queue *queue = hctx->driver_data;
2435 struct sock *sk = queue->sock->sk;
2436
Sagi Grimbergf86e5bf2020-03-23 16:43:52 -07002437 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2438 return 0;
2439
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002440 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
Eric Dumazet3f926af2019-10-23 22:44:51 -07002441 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002442 sk_busy_loop(sk, true);
2443 nvme_tcp_try_recv(queue);
Sagi Grimberg72e5d752020-05-01 14:25:44 -07002444 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002445 return queue->nr_cqe;
2446}
2447
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002448static const struct blk_mq_ops nvme_tcp_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002449 .queue_rq = nvme_tcp_queue_rq,
Sagi Grimberg86f03482020-06-18 17:30:23 -07002450 .commit_rqs = nvme_tcp_commit_rqs,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002451 .complete = nvme_complete_rq,
2452 .init_request = nvme_tcp_init_request,
2453 .exit_request = nvme_tcp_exit_request,
2454 .init_hctx = nvme_tcp_init_hctx,
2455 .timeout = nvme_tcp_timeout,
Sagi Grimberg873946f2018-12-11 23:38:57 -08002456 .map_queues = nvme_tcp_map_queues,
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002457 .poll = nvme_tcp_poll,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002458};
2459
Rikard Falkeborn6acbd962020-05-29 00:25:07 +02002460static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002461 .queue_rq = nvme_tcp_queue_rq,
2462 .complete = nvme_complete_rq,
2463 .init_request = nvme_tcp_init_request,
2464 .exit_request = nvme_tcp_exit_request,
2465 .init_hctx = nvme_tcp_init_admin_hctx,
2466 .timeout = nvme_tcp_timeout,
2467};
2468
2469static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2470 .name = "tcp",
2471 .module = THIS_MODULE,
2472 .flags = NVME_F_FABRICS,
2473 .reg_read32 = nvmf_reg_read32,
2474 .reg_read64 = nvmf_reg_read64,
2475 .reg_write32 = nvmf_reg_write32,
2476 .free_ctrl = nvme_tcp_free_ctrl,
2477 .submit_async_event = nvme_tcp_submit_async_event,
2478 .delete_ctrl = nvme_tcp_delete_ctrl,
2479 .get_address = nvmf_get_address,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002480};
2481
2482static bool
2483nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2484{
2485 struct nvme_tcp_ctrl *ctrl;
2486 bool found = false;
2487
2488 mutex_lock(&nvme_tcp_ctrl_mutex);
2489 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2490 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2491 if (found)
2492 break;
2493 }
2494 mutex_unlock(&nvme_tcp_ctrl_mutex);
2495
2496 return found;
2497}
2498
2499static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2500 struct nvmf_ctrl_options *opts)
2501{
2502 struct nvme_tcp_ctrl *ctrl;
2503 int ret;
2504
2505 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2506 if (!ctrl)
2507 return ERR_PTR(-ENOMEM);
2508
2509 INIT_LIST_HEAD(&ctrl->list);
2510 ctrl->ctrl.opts = opts;
Sagi Grimberg1a9460c2019-07-03 14:08:04 -07002511 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2512 opts->nr_poll_queues + 1;
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002513 ctrl->ctrl.sqsize = opts->queue_size - 1;
2514 ctrl->ctrl.kato = opts->kato;
2515
2516 INIT_DELAYED_WORK(&ctrl->connect_work,
2517 nvme_tcp_reconnect_ctrl_work);
2518 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2519 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2520
2521 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2522 opts->trsvcid =
2523 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2524 if (!opts->trsvcid) {
2525 ret = -ENOMEM;
2526 goto out_free_ctrl;
2527 }
2528 opts->mask |= NVMF_OPT_TRSVCID;
2529 }
2530
2531 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2532 opts->traddr, opts->trsvcid, &ctrl->addr);
2533 if (ret) {
2534 pr_err("malformed address passed: %s:%s\n",
2535 opts->traddr, opts->trsvcid);
2536 goto out_free_ctrl;
2537 }
2538
2539 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2540 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2541 opts->host_traddr, NULL, &ctrl->src_addr);
2542 if (ret) {
2543 pr_err("malformed src address passed: %s\n",
2544 opts->host_traddr);
2545 goto out_free_ctrl;
2546 }
2547 }
2548
Martin Belanger3ede8f72021-05-20 15:09:34 -04002549 if (opts->mask & NVMF_OPT_HOST_IFACE) {
Prabhakar Kushwaha8b43ced2021-07-13 11:31:56 +02002550 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
Martin Belanger3ede8f72021-05-20 15:09:34 -04002551 pr_err("invalid interface passed: %s\n",
2552 opts->host_iface);
2553 ret = -ENODEV;
2554 goto out_free_ctrl;
2555 }
2556 }
2557
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002558 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2559 ret = -EALREADY;
2560 goto out_free_ctrl;
2561 }
2562
Sagi Grimberg873946f2018-12-11 23:38:57 -08002563 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002564 GFP_KERNEL);
2565 if (!ctrl->queues) {
2566 ret = -ENOMEM;
2567 goto out_free_ctrl;
2568 }
2569
2570 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2571 if (ret)
2572 goto out_kfree_queues;
2573
2574 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2575 WARN_ON_ONCE(1);
2576 ret = -EINTR;
2577 goto out_uninit_ctrl;
2578 }
2579
2580 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2581 if (ret)
2582 goto out_uninit_ctrl;
2583
2584 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
Hannes Reineckee5ea42f2021-09-22 08:35:25 +02002585 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002586
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002587 mutex_lock(&nvme_tcp_ctrl_mutex);
2588 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2589 mutex_unlock(&nvme_tcp_ctrl_mutex);
2590
2591 return &ctrl->ctrl;
2592
2593out_uninit_ctrl:
2594 nvme_uninit_ctrl(&ctrl->ctrl);
2595 nvme_put_ctrl(&ctrl->ctrl);
2596 if (ret > 0)
2597 ret = -EIO;
2598 return ERR_PTR(ret);
2599out_kfree_queues:
2600 kfree(ctrl->queues);
2601out_free_ctrl:
2602 kfree(ctrl);
2603 return ERR_PTR(ret);
2604}
2605
2606static struct nvmf_transport_ops nvme_tcp_transport = {
2607 .name = "tcp",
2608 .module = THIS_MODULE,
2609 .required_opts = NVMF_OPT_TRADDR,
2610 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2611 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
Sagi Grimberg873946f2018-12-11 23:38:57 -08002612 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
Israel Rukshinbb139852019-08-18 12:08:54 +03002613 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
Martin Belanger3ede8f72021-05-20 15:09:34 -04002614 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
Sagi Grimberg3f2304f2018-12-03 17:52:17 -08002615 .create_ctrl = nvme_tcp_create_ctrl,
2616};
2617
2618static int __init nvme_tcp_init_module(void)
2619{
2620 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2621 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2622 if (!nvme_tcp_wq)
2623 return -ENOMEM;
2624
2625 nvmf_register_transport(&nvme_tcp_transport);
2626 return 0;
2627}
2628
2629static void __exit nvme_tcp_cleanup_module(void)
2630{
2631 struct nvme_tcp_ctrl *ctrl;
2632
2633 nvmf_unregister_transport(&nvme_tcp_transport);
2634
2635 mutex_lock(&nvme_tcp_ctrl_mutex);
2636 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2637 nvme_delete_ctrl(&ctrl->ctrl);
2638 mutex_unlock(&nvme_tcp_ctrl_mutex);
2639 flush_workqueue(nvme_delete_wq);
2640
2641 destroy_workqueue(nvme_tcp_wq);
2642}
2643
2644module_init(nvme_tcp_init_module);
2645module_exit(nvme_tcp_cleanup_module);
2646
2647MODULE_LICENSE("GPL v2");