blob: 103c04fa7746f010d4c880250ccf5c97c6195440 [file] [log] [blame]
Christoph Hellwigbc50ad72019-02-18 09:36:29 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwig32acab32017-11-02 12:59:30 +01002/*
Christoph Hellwig0d0b6602018-05-14 08:48:54 +02003 * Copyright (c) 2017-2018 Christoph Hellwig.
Christoph Hellwig32acab32017-11-02 12:59:30 +01004 */
5
6#include <linux/moduleparam.h>
Hannes Reinecke2796b562018-06-07 10:38:47 +02007#include <trace/events/block.h>
Christoph Hellwig32acab32017-11-02 12:59:30 +01008#include "nvme.h"
9
10static bool multipath = true;
Keith Busch5cadde82018-04-26 14:24:29 -060011module_param(multipath, bool, 0444);
Christoph Hellwig32acab32017-11-02 12:59:30 +010012MODULE_PARM_DESC(multipath,
13 "turn on native support for multiple controllers per subsystem");
14
Sagi Grimbergb9156da2019-07-31 11:00:26 -070015void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
16{
17 struct nvme_ns_head *h;
18
19 lockdep_assert_held(&subsys->lock);
20 list_for_each_entry(h, &subsys->nsheads, entry)
21 if (h->disk)
22 blk_mq_unfreeze_queue(h->disk->queue);
23}
24
25void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
26{
27 struct nvme_ns_head *h;
28
29 lockdep_assert_held(&subsys->lock);
30 list_for_each_entry(h, &subsys->nsheads, entry)
31 if (h->disk)
32 blk_mq_freeze_queue_wait(h->disk->queue);
33}
34
35void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
36{
37 struct nvme_ns_head *h;
38
39 lockdep_assert_held(&subsys->lock);
40 list_for_each_entry(h, &subsys->nsheads, entry)
41 if (h->disk)
42 blk_freeze_queue_start(h->disk->queue);
43}
44
Keith Buscha785dbc2018-04-26 14:22:41 -060045/*
46 * If multipathing is enabled we need to always use the subsystem instance
47 * number for numbering our devices to avoid conflicts between subsystems that
48 * have multiple controllers and thus use the multipath-aware subsystem node
49 * and those that have a single controller and use the controller node
50 * directly.
51 */
52void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
53 struct nvme_ctrl *ctrl, int *flags)
54{
55 if (!multipath) {
56 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
57 } else if (ns->head->disk) {
58 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
Hannes Reinecke8a03b272019-05-03 15:37:35 +020059 ctrl->instance, ns->head->instance);
Keith Buscha785dbc2018-04-26 14:22:41 -060060 *flags = GENHD_FL_HIDDEN;
61 } else {
62 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
63 ns->head->instance);
64 }
65}
66
Christoph Hellwig32acab32017-11-02 12:59:30 +010067void nvme_failover_req(struct request *req)
68{
69 struct nvme_ns *ns = req->q->queuedata;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020070 u16 status = nvme_req(req)->status;
Christoph Hellwig32acab32017-11-02 12:59:30 +010071 unsigned long flags;
72
73 spin_lock_irqsave(&ns->head->requeue_lock, flags);
74 blk_steal_bios(&ns->head->requeue_list, req);
75 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
76 blk_mq_end_request(req, 0);
77
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020078 switch (status & 0x7ff) {
79 case NVME_SC_ANA_TRANSITION:
80 case NVME_SC_ANA_INACCESSIBLE:
81 case NVME_SC_ANA_PERSISTENT_LOSS:
82 /*
83 * If we got back an ANA error we know the controller is alive,
84 * but not ready to serve this namespaces. The spec suggests
85 * we should update our general state here, but due to the fact
86 * that the admin and I/O queues are not serialized that is
87 * fundamentally racy. So instead just clear the current path,
88 * mark the the path as pending and kick of a re-read of the ANA
89 * log page ASAP.
90 */
91 nvme_mpath_clear_current_path(ns);
92 if (ns->ctrl->ana_log_buf) {
93 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
94 queue_work(nvme_wq, &ns->ctrl->ana_work);
95 }
96 break;
James Smart783f4a42018-09-27 16:58:54 -070097 case NVME_SC_HOST_PATH_ERROR:
Max Gurtovoy2dc39472019-10-13 19:57:35 +030098 case NVME_SC_HOST_ABORTED_CMD:
James Smart783f4a42018-09-27 16:58:54 -070099 /*
100 * Temporary transport disruption in talking to the controller.
101 * Try to send on a new path.
102 */
103 nvme_mpath_clear_current_path(ns);
104 break;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200105 default:
106 /*
107 * Reset the controller for any non-ANA error as we don't know
108 * what caused the error.
109 */
110 nvme_reset_ctrl(ns->ctrl);
111 break;
112 }
113
Christoph Hellwig32acab32017-11-02 12:59:30 +0100114 kblockd_schedule_work(&ns->head->requeue_work);
115}
116
Christoph Hellwig32acab32017-11-02 12:59:30 +0100117void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
118{
119 struct nvme_ns *ns;
120
Jianchao Wang765cc0312018-02-12 20:54:46 +0800121 down_read(&ctrl->namespaces_rwsem);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100122 list_for_each_entry(ns, &ctrl->namespaces, list) {
123 if (ns->head->disk)
124 kblockd_schedule_work(&ns->head->requeue_work);
125 }
Jianchao Wang765cc0312018-02-12 20:54:46 +0800126 up_read(&ctrl->namespaces_rwsem);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100127}
128
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200129static const char *nvme_ana_state_names[] = {
130 [0] = "invalid state",
131 [NVME_ANA_OPTIMIZED] = "optimized",
132 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
133 [NVME_ANA_INACCESSIBLE] = "inaccessible",
134 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
135 [NVME_ANA_CHANGE] = "change",
136};
137
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700138bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
Christoph Hellwig32acab32017-11-02 12:59:30 +0100139{
Christoph Hellwigf3334442018-09-11 09:51:29 +0200140 struct nvme_ns_head *head = ns->head;
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700141 bool changed = false;
Christoph Hellwigf3334442018-09-11 09:51:29 +0200142 int node;
143
144 if (!head)
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700145 goto out;
Christoph Hellwigf3334442018-09-11 09:51:29 +0200146
147 for_each_node(node) {
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700148 if (ns == rcu_access_pointer(head->current_path[node])) {
Christoph Hellwigf3334442018-09-11 09:51:29 +0200149 rcu_assign_pointer(head->current_path[node], NULL);
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700150 changed = true;
151 }
Christoph Hellwigf3334442018-09-11 09:51:29 +0200152 }
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700153out:
154 return changed;
155}
156
157void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
158{
159 struct nvme_ns *ns;
160
161 mutex_lock(&ctrl->scan_lock);
162 list_for_each_entry(ns, &ctrl->namespaces, list)
163 if (nvme_mpath_clear_current_path(ns))
164 kblockd_schedule_work(&ns->head->requeue_work);
165 mutex_unlock(&ctrl->scan_lock);
Christoph Hellwigf3334442018-09-11 09:51:29 +0200166}
167
Hannes Reineckeca7ae5c2019-07-04 08:10:46 +0200168static bool nvme_path_is_disabled(struct nvme_ns *ns)
169{
170 return ns->ctrl->state != NVME_CTRL_LIVE ||
Hannes Reinecke04e70bd2019-07-04 08:10:47 +0200171 test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
172 test_bit(NVME_NS_REMOVING, &ns->flags);
Hannes Reineckeca7ae5c2019-07-04 08:10:46 +0200173}
174
Christoph Hellwigf3334442018-09-11 09:51:29 +0200175static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
176{
177 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
178 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100179
180 list_for_each_entry_rcu(ns, &head->list, siblings) {
Hannes Reineckeca7ae5c2019-07-04 08:10:46 +0200181 if (nvme_path_is_disabled(ns))
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200182 continue;
Christoph Hellwigf3334442018-09-11 09:51:29 +0200183
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100184 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
185 distance = node_distance(node, ns->ctrl->numa_node);
186 else
187 distance = LOCAL_DISTANCE;
Christoph Hellwigf3334442018-09-11 09:51:29 +0200188
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200189 switch (ns->ana_state) {
190 case NVME_ANA_OPTIMIZED:
Christoph Hellwigf3334442018-09-11 09:51:29 +0200191 if (distance < found_distance) {
192 found_distance = distance;
193 found = ns;
194 }
195 break;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200196 case NVME_ANA_NONOPTIMIZED:
Christoph Hellwigf3334442018-09-11 09:51:29 +0200197 if (distance < fallback_distance) {
198 fallback_distance = distance;
199 fallback = ns;
200 }
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200201 break;
202 default:
203 break;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100204 }
205 }
206
Christoph Hellwigf3334442018-09-11 09:51:29 +0200207 if (!found)
208 found = fallback;
209 if (found)
210 rcu_assign_pointer(head->current_path[node], found);
211 return found;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200212}
213
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100214static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
215 struct nvme_ns *ns)
216{
217 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
218 siblings);
219 if (ns)
220 return ns;
221 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
222}
223
224static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
225 int node, struct nvme_ns *old)
226{
227 struct nvme_ns *ns, *found, *fallback = NULL;
228
Hannes Reinecke2032d072019-07-04 08:10:46 +0200229 if (list_is_singular(&head->list)) {
230 if (nvme_path_is_disabled(old))
231 return NULL;
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100232 return old;
Hannes Reinecke2032d072019-07-04 08:10:46 +0200233 }
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100234
235 for (ns = nvme_next_ns(head, old);
236 ns != old;
237 ns = nvme_next_ns(head, ns)) {
Hannes Reineckeca7ae5c2019-07-04 08:10:46 +0200238 if (nvme_path_is_disabled(ns))
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100239 continue;
240
241 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
242 found = ns;
243 goto out;
244 }
245 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
246 fallback = ns;
247 }
248
249 if (!fallback)
250 return NULL;
251 found = fallback;
252out:
253 rcu_assign_pointer(head->current_path[node], found);
254 return found;
255}
256
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200257static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
258{
259 return ns->ctrl->state == NVME_CTRL_LIVE &&
260 ns->ana_state == NVME_ANA_OPTIMIZED;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100261}
262
263inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
264{
Christoph Hellwigf3334442018-09-11 09:51:29 +0200265 int node = numa_node_id();
266 struct nvme_ns *ns;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100267
Christoph Hellwigf3334442018-09-11 09:51:29 +0200268 ns = srcu_dereference(head->current_path[node], &head->srcu);
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100269 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns)
270 ns = nvme_round_robin_path(head, node, ns);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200271 if (unlikely(!ns || !nvme_path_is_optimized(ns)))
Christoph Hellwigf3334442018-09-11 09:51:29 +0200272 ns = __nvme_find_path(head, node);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100273 return ns;
274}
275
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700276static bool nvme_available_path(struct nvme_ns_head *head)
277{
278 struct nvme_ns *ns;
279
280 list_for_each_entry_rcu(ns, &head->list, siblings) {
281 switch (ns->ctrl->state) {
282 case NVME_CTRL_LIVE:
283 case NVME_CTRL_RESETTING:
284 case NVME_CTRL_CONNECTING:
285 /* fallthru */
286 return true;
287 default:
288 break;
289 }
290 }
291 return false;
292}
293
Christoph Hellwig32acab32017-11-02 12:59:30 +0100294static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
295 struct bio *bio)
296{
297 struct nvme_ns_head *head = q->queuedata;
298 struct device *dev = disk_to_dev(head->disk);
299 struct nvme_ns *ns;
300 blk_qc_t ret = BLK_QC_T_NONE;
301 int srcu_idx;
302
Hannes Reinecke525aa5a2019-04-30 18:57:09 +0200303 /*
304 * The namespace might be going away and the bio might
305 * be moved to a different queue via blk_steal_bios(),
306 * so we need to use the bio_split pool from the original
307 * queue to allocate the bvecs from.
308 */
309 blk_queue_split(q, &bio);
310
Christoph Hellwig32acab32017-11-02 12:59:30 +0100311 srcu_idx = srcu_read_lock(&head->srcu);
312 ns = nvme_find_path(head);
313 if (likely(ns)) {
314 bio->bi_disk = ns->disk;
315 bio->bi_opf |= REQ_NVME_MPATH;
Hannes Reinecke2796b562018-06-07 10:38:47 +0200316 trace_block_bio_remap(bio->bi_disk->queue, bio,
317 disk_devt(ns->head->disk),
318 bio->bi_iter.bi_sector);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100319 ret = direct_make_request(bio);
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700320 } else if (nvme_available_path(head)) {
321 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
Christoph Hellwig32acab32017-11-02 12:59:30 +0100322
323 spin_lock_irq(&head->requeue_lock);
324 bio_list_add(&head->requeue_list, bio);
325 spin_unlock_irq(&head->requeue_lock);
326 } else {
Sagi Grimberg0157ec82019-07-25 11:56:57 -0700327 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
Christoph Hellwig32acab32017-11-02 12:59:30 +0100328
329 bio->bi_status = BLK_STS_IOERR;
330 bio_endio(bio);
331 }
332
333 srcu_read_unlock(&head->srcu, srcu_idx);
334 return ret;
335}
336
Christoph Hellwig32acab32017-11-02 12:59:30 +0100337static void nvme_requeue_work(struct work_struct *work)
338{
339 struct nvme_ns_head *head =
340 container_of(work, struct nvme_ns_head, requeue_work);
341 struct bio *bio, *next;
342
343 spin_lock_irq(&head->requeue_lock);
344 next = bio_list_get(&head->requeue_list);
345 spin_unlock_irq(&head->requeue_lock);
346
347 while ((bio = next) != NULL) {
348 next = bio->bi_next;
349 bio->bi_next = NULL;
350
351 /*
352 * Reset disk to the mpath node and resubmit to select a new
353 * path.
354 */
355 bio->bi_disk = head->disk;
356 generic_make_request(bio);
357 }
358}
359
360int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
361{
362 struct request_queue *q;
363 bool vwc = false;
364
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200365 mutex_init(&head->lock);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100366 bio_list_init(&head->requeue_list);
367 spin_lock_init(&head->requeue_lock);
368 INIT_WORK(&head->requeue_work, nvme_requeue_work);
369
370 /*
371 * Add a multipath node if the subsystems supports multiple controllers.
372 * We also do this for private namespaces as the namespace sharing data could
373 * change after a rescan.
374 */
375 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
376 return 0;
377
Hannes Reinecke103e5152018-11-16 09:22:29 +0100378 q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100379 if (!q)
380 goto out;
381 q->queuedata = head;
382 blk_queue_make_request(q, nvme_ns_head_make_request);
Bart Van Assche8b904b52018-03-07 17:10:10 -0800383 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100384 /* set to a default value for 512 until disk is validated */
385 blk_queue_logical_block_size(q, 512);
Sagi Grimberg8f676b852018-11-02 11:22:13 -0700386 blk_set_stacking_limits(&q->limits);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100387
388 /* we need to propagate up the VMC settings */
389 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
390 vwc = true;
391 blk_queue_write_cache(q, vwc, vwc);
392
393 head->disk = alloc_disk(0);
394 if (!head->disk)
395 goto out_cleanup_queue;
396 head->disk->fops = &nvme_ns_head_ops;
397 head->disk->private_data = head;
398 head->disk->queue = q;
399 head->disk->flags = GENHD_FL_EXT_DEVT;
400 sprintf(head->disk->disk_name, "nvme%dn%d",
401 ctrl->subsys->instance, head->instance);
402 return 0;
403
404out_cleanup_queue:
405 blk_cleanup_queue(q);
406out:
407 return -ENOMEM;
408}
409
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200410static void nvme_mpath_set_live(struct nvme_ns *ns)
Christoph Hellwig32acab32017-11-02 12:59:30 +0100411{
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200412 struct nvme_ns_head *head = ns->head;
413
414 lockdep_assert_held(&ns->head->lock);
415
Christoph Hellwig32acab32017-11-02 12:59:30 +0100416 if (!head->disk)
417 return;
Baegjae Sung9bd82b12018-02-28 16:06:04 +0900418
Hannes Reinecke33b14f672018-09-28 08:17:20 +0200419 if (!(head->disk->flags & GENHD_FL_UP))
420 device_add_disk(&head->subsys->dev, head->disk,
421 nvme_ns_id_attr_groups);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200422
Keith Busch886fabf2018-10-05 09:49:37 -0600423 if (nvme_path_is_optimized(ns)) {
424 int node, srcu_idx;
425
426 srcu_idx = srcu_read_lock(&head->srcu);
427 for_each_node(node)
428 __nvme_find_path(head, node);
429 srcu_read_unlock(&head->srcu, srcu_idx);
430 }
431
Anton Eidelman504db082019-08-12 23:00:36 +0300432 synchronize_srcu(&ns->head->srcu);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200433 kblockd_schedule_work(&ns->head->requeue_work);
434}
435
436static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
437 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
438 void *))
439{
440 void *base = ctrl->ana_log_buf;
441 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
442 int error, i;
443
444 lockdep_assert_held(&ctrl->ana_lock);
445
446 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
447 struct nvme_ana_group_desc *desc = base + offset;
448 u32 nr_nsids = le32_to_cpu(desc->nnsids);
449 size_t nsid_buf_size = nr_nsids * sizeof(__le32);
450
451 if (WARN_ON_ONCE(desc->grpid == 0))
452 return -EINVAL;
453 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
454 return -EINVAL;
455 if (WARN_ON_ONCE(desc->state == 0))
456 return -EINVAL;
457 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
458 return -EINVAL;
459
460 offset += sizeof(*desc);
461 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
462 return -EINVAL;
463
464 error = cb(ctrl, desc, data);
465 if (error)
466 return error;
467
468 offset += nsid_buf_size;
469 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
470 return -EINVAL;
471 }
472
473 return 0;
474}
475
476static inline bool nvme_state_is_live(enum nvme_ana_state state)
477{
478 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
479}
480
481static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
482 struct nvme_ns *ns)
483{
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200484 mutex_lock(&ns->head->lock);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200485 ns->ana_grpid = le32_to_cpu(desc->grpid);
486 ns->ana_state = desc->state;
487 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
488
Martin Georgecc2278c2019-03-27 09:52:56 +0100489 if (nvme_state_is_live(ns->ana_state))
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200490 nvme_mpath_set_live(ns);
491 mutex_unlock(&ns->head->lock);
492}
493
494static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
495 struct nvme_ana_group_desc *desc, void *data)
496{
497 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
498 unsigned *nr_change_groups = data;
499 struct nvme_ns *ns;
500
Hannes Reinecke592b6e72019-04-28 20:24:42 -0700501 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200502 le32_to_cpu(desc->grpid),
503 nvme_ana_state_names[desc->state]);
504
505 if (desc->state == NVME_ANA_CHANGE)
506 (*nr_change_groups)++;
507
508 if (!nr_nsids)
509 return 0;
510
511 down_write(&ctrl->namespaces_rwsem);
512 list_for_each_entry(ns, &ctrl->namespaces, list) {
Anton Eidelmane01f91d2019-08-16 13:00:10 -0700513 unsigned nsid = le32_to_cpu(desc->nsids[n]);
514
515 if (ns->head->ns_id < nsid)
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200516 continue;
Anton Eidelmane01f91d2019-08-16 13:00:10 -0700517 if (ns->head->ns_id == nsid)
518 nvme_update_ns_ana_state(desc, ns);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200519 if (++n == nr_nsids)
520 break;
521 }
522 up_write(&ctrl->namespaces_rwsem);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200523 return 0;
524}
525
526static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
527{
528 u32 nr_change_groups = 0;
529 int error;
530
531 mutex_lock(&ctrl->ana_lock);
532 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
533 groups_only ? NVME_ANA_LOG_RGO : 0,
534 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
535 if (error) {
536 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
537 goto out_unlock;
538 }
539
540 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
541 nvme_update_ana_state);
542 if (error)
543 goto out_unlock;
544
545 /*
546 * In theory we should have an ANATT timer per group as they might enter
547 * the change state at different times. But that is a lot of overhead
548 * just to protect against a target that keeps entering new changes
549 * states while never finishing previous ones. But we'll still
550 * eventually time out once all groups are in change state, so this
551 * isn't a big deal.
552 *
553 * We also double the ANATT value to provide some slack for transports
554 * or AEN processing overhead.
555 */
556 if (nr_change_groups)
557 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
558 else
559 del_timer_sync(&ctrl->anatt_timer);
560out_unlock:
561 mutex_unlock(&ctrl->ana_lock);
562 return error;
563}
564
565static void nvme_ana_work(struct work_struct *work)
566{
567 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
568
569 nvme_read_ana_log(ctrl, false);
570}
571
572static void nvme_anatt_timeout(struct timer_list *t)
573{
574 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
575
576 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
577 nvme_reset_ctrl(ctrl);
578}
579
580void nvme_mpath_stop(struct nvme_ctrl *ctrl)
581{
582 if (!nvme_ctrl_use_ana(ctrl))
583 return;
584 del_timer_sync(&ctrl->anatt_timer);
585 cancel_work_sync(&ctrl->ana_work);
586}
587
Hannes Reinecke75c10e72019-02-18 11:43:26 +0100588#define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
589 struct device_attribute subsys_attr_##_name = \
590 __ATTR(_name, _mode, _show, _store)
591
592static const char *nvme_iopolicy_names[] = {
593 [NVME_IOPOLICY_NUMA] = "numa",
594 [NVME_IOPOLICY_RR] = "round-robin",
595};
596
597static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
598 struct device_attribute *attr, char *buf)
599{
600 struct nvme_subsystem *subsys =
601 container_of(dev, struct nvme_subsystem, dev);
602
603 return sprintf(buf, "%s\n",
604 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
605}
606
607static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
608 struct device_attribute *attr, const char *buf, size_t count)
609{
610 struct nvme_subsystem *subsys =
611 container_of(dev, struct nvme_subsystem, dev);
612 int i;
613
614 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
615 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
616 WRITE_ONCE(subsys->iopolicy, i);
617 return count;
618 }
619 }
620
621 return -EINVAL;
622}
623SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
624 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
625
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200626static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
627 char *buf)
628{
629 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
630}
631DEVICE_ATTR_RO(ana_grpid);
632
633static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
634 char *buf)
635{
636 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
637
638 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
639}
640DEVICE_ATTR_RO(ana_state);
641
642static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
643 struct nvme_ana_group_desc *desc, void *data)
644{
645 struct nvme_ns *ns = data;
646
647 if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
648 nvme_update_ns_ana_state(desc, ns);
649 return -ENXIO; /* just break out of the loop */
650 }
651
652 return 0;
653}
654
655void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
656{
657 if (nvme_ctrl_use_ana(ns->ctrl)) {
658 mutex_lock(&ns->ctrl->ana_lock);
659 ns->ana_grpid = le32_to_cpu(id->anagrpid);
660 nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
661 mutex_unlock(&ns->ctrl->ana_lock);
662 } else {
663 mutex_lock(&ns->head->lock);
664 ns->ana_state = NVME_ANA_OPTIMIZED;
665 nvme_mpath_set_live(ns);
666 mutex_unlock(&ns->head->lock);
667 }
Christoph Hellwig32acab32017-11-02 12:59:30 +0100668}
669
670void nvme_mpath_remove_disk(struct nvme_ns_head *head)
671{
672 if (!head->disk)
673 return;
Hannes Reinecke33b14f672018-09-28 08:17:20 +0200674 if (head->disk->flags & GENHD_FL_UP)
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200675 del_gendisk(head->disk);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100676 blk_set_queue_dying(head->disk->queue);
677 /* make sure all pending bios are cleaned up */
678 kblockd_schedule_work(&head->requeue_work);
679 flush_work(&head->requeue_work);
680 blk_cleanup_queue(head->disk->queue);
681 put_disk(head->disk);
682}
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200683
684int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
685{
686 int error;
687
Marta Rybczynska66b20ac2019-07-23 07:41:20 +0200688 /* check if multipath is enabled and we have the capability */
689 if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200690 return 0;
691
692 ctrl->anacap = id->anacap;
693 ctrl->anatt = id->anatt;
694 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
695 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
696
697 mutex_init(&ctrl->ana_lock);
698 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
699 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
700 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
Hannes Reinecke78a61cd2019-01-09 09:45:15 +0100701 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200702
703 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
704 dev_err(ctrl->device,
705 "ANA log page size (%zd) larger than MDTS (%d).\n",
706 ctrl->ana_log_size,
707 ctrl->max_hw_sectors << SECTOR_SHIFT);
708 dev_err(ctrl->device, "disabling ANA support.\n");
709 return 0;
710 }
711
712 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
713 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
Susobhan Deybb830ad2018-09-25 12:29:15 -0700714 if (!ctrl->ana_log_buf) {
715 error = -ENOMEM;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200716 goto out;
Susobhan Deybb830ad2018-09-25 12:29:15 -0700717 }
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200718
719 error = nvme_read_ana_log(ctrl, true);
720 if (error)
721 goto out_free_ana_log_buf;
722 return 0;
723out_free_ana_log_buf:
724 kfree(ctrl->ana_log_buf);
Hannes Reineckec7055fd2019-01-08 12:46:58 +0100725 ctrl->ana_log_buf = NULL;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200726out:
Susobhan Deybb830ad2018-09-25 12:29:15 -0700727 return error;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200728}
729
730void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
731{
732 kfree(ctrl->ana_log_buf);
Hannes Reineckec7055fd2019-01-08 12:46:58 +0100733 ctrl->ana_log_buf = NULL;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200734}
735