blob: 52987052b7fc0f17fee9587a01d5dea3e5f41467 [file] [log] [blame]
Christoph Hellwig32acab32017-11-02 12:59:30 +01001/*
Christoph Hellwig0d0b6602018-05-14 08:48:54 +02002 * Copyright (c) 2017-2018 Christoph Hellwig.
Christoph Hellwig32acab32017-11-02 12:59:30 +01003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <linux/moduleparam.h>
Hannes Reinecke2796b562018-06-07 10:38:47 +020015#include <trace/events/block.h>
Christoph Hellwig32acab32017-11-02 12:59:30 +010016#include "nvme.h"
17
18static bool multipath = true;
Keith Busch5cadde82018-04-26 14:24:29 -060019module_param(multipath, bool, 0444);
Christoph Hellwig32acab32017-11-02 12:59:30 +010020MODULE_PARM_DESC(multipath,
21 "turn on native support for multiple controllers per subsystem");
22
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020023inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
24{
Hannes Reinecke8f220c42018-08-07 12:43:42 +020025 return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020026}
27
Keith Buscha785dbc2018-04-26 14:22:41 -060028/*
29 * If multipathing is enabled we need to always use the subsystem instance
30 * number for numbering our devices to avoid conflicts between subsystems that
31 * have multiple controllers and thus use the multipath-aware subsystem node
32 * and those that have a single controller and use the controller node
33 * directly.
34 */
35void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
36 struct nvme_ctrl *ctrl, int *flags)
37{
38 if (!multipath) {
39 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
40 } else if (ns->head->disk) {
41 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
42 ctrl->cntlid, ns->head->instance);
43 *flags = GENHD_FL_HIDDEN;
44 } else {
45 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
46 ns->head->instance);
47 }
48}
49
Christoph Hellwig32acab32017-11-02 12:59:30 +010050void nvme_failover_req(struct request *req)
51{
52 struct nvme_ns *ns = req->q->queuedata;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020053 u16 status = nvme_req(req)->status;
Christoph Hellwig32acab32017-11-02 12:59:30 +010054 unsigned long flags;
55
56 spin_lock_irqsave(&ns->head->requeue_lock, flags);
57 blk_steal_bios(&ns->head->requeue_list, req);
58 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
59 blk_mq_end_request(req, 0);
60
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020061 switch (status & 0x7ff) {
62 case NVME_SC_ANA_TRANSITION:
63 case NVME_SC_ANA_INACCESSIBLE:
64 case NVME_SC_ANA_PERSISTENT_LOSS:
65 /*
66 * If we got back an ANA error we know the controller is alive,
67 * but not ready to serve this namespaces. The spec suggests
68 * we should update our general state here, but due to the fact
69 * that the admin and I/O queues are not serialized that is
70 * fundamentally racy. So instead just clear the current path,
71 * mark the the path as pending and kick of a re-read of the ANA
72 * log page ASAP.
73 */
74 nvme_mpath_clear_current_path(ns);
75 if (ns->ctrl->ana_log_buf) {
76 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
77 queue_work(nvme_wq, &ns->ctrl->ana_work);
78 }
79 break;
James Smart783f4a42018-09-27 16:58:54 -070080 case NVME_SC_HOST_PATH_ERROR:
81 /*
82 * Temporary transport disruption in talking to the controller.
83 * Try to send on a new path.
84 */
85 nvme_mpath_clear_current_path(ns);
86 break;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020087 default:
88 /*
89 * Reset the controller for any non-ANA error as we don't know
90 * what caused the error.
91 */
92 nvme_reset_ctrl(ns->ctrl);
93 break;
94 }
95
Christoph Hellwig32acab32017-11-02 12:59:30 +010096 kblockd_schedule_work(&ns->head->requeue_work);
97}
98
Christoph Hellwig32acab32017-11-02 12:59:30 +010099void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
100{
101 struct nvme_ns *ns;
102
Jianchao Wang765cc0312018-02-12 20:54:46 +0800103 down_read(&ctrl->namespaces_rwsem);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100104 list_for_each_entry(ns, &ctrl->namespaces, list) {
105 if (ns->head->disk)
106 kblockd_schedule_work(&ns->head->requeue_work);
107 }
Jianchao Wang765cc0312018-02-12 20:54:46 +0800108 up_read(&ctrl->namespaces_rwsem);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100109}
110
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200111static const char *nvme_ana_state_names[] = {
112 [0] = "invalid state",
113 [NVME_ANA_OPTIMIZED] = "optimized",
114 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
115 [NVME_ANA_INACCESSIBLE] = "inaccessible",
116 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
117 [NVME_ANA_CHANGE] = "change",
118};
119
Christoph Hellwigf3334442018-09-11 09:51:29 +0200120void nvme_mpath_clear_current_path(struct nvme_ns *ns)
Christoph Hellwig32acab32017-11-02 12:59:30 +0100121{
Christoph Hellwigf3334442018-09-11 09:51:29 +0200122 struct nvme_ns_head *head = ns->head;
123 int node;
124
125 if (!head)
126 return;
127
128 for_each_node(node) {
129 if (ns == rcu_access_pointer(head->current_path[node]))
130 rcu_assign_pointer(head->current_path[node], NULL);
131 }
132}
133
134static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
135{
136 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
137 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100138
139 list_for_each_entry_rcu(ns, &head->list, siblings) {
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200140 if (ns->ctrl->state != NVME_CTRL_LIVE ||
141 test_bit(NVME_NS_ANA_PENDING, &ns->flags))
142 continue;
Christoph Hellwigf3334442018-09-11 09:51:29 +0200143
144 distance = node_distance(node, dev_to_node(ns->ctrl->dev));
145
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200146 switch (ns->ana_state) {
147 case NVME_ANA_OPTIMIZED:
Christoph Hellwigf3334442018-09-11 09:51:29 +0200148 if (distance < found_distance) {
149 found_distance = distance;
150 found = ns;
151 }
152 break;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200153 case NVME_ANA_NONOPTIMIZED:
Christoph Hellwigf3334442018-09-11 09:51:29 +0200154 if (distance < fallback_distance) {
155 fallback_distance = distance;
156 fallback = ns;
157 }
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200158 break;
159 default:
160 break;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100161 }
162 }
163
Christoph Hellwigf3334442018-09-11 09:51:29 +0200164 if (!found)
165 found = fallback;
166 if (found)
167 rcu_assign_pointer(head->current_path[node], found);
168 return found;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200169}
170
171static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
172{
173 return ns->ctrl->state == NVME_CTRL_LIVE &&
174 ns->ana_state == NVME_ANA_OPTIMIZED;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100175}
176
177inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
178{
Christoph Hellwigf3334442018-09-11 09:51:29 +0200179 int node = numa_node_id();
180 struct nvme_ns *ns;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100181
Christoph Hellwigf3334442018-09-11 09:51:29 +0200182 ns = srcu_dereference(head->current_path[node], &head->srcu);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200183 if (unlikely(!ns || !nvme_path_is_optimized(ns)))
Christoph Hellwigf3334442018-09-11 09:51:29 +0200184 ns = __nvme_find_path(head, node);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100185 return ns;
186}
187
188static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
189 struct bio *bio)
190{
191 struct nvme_ns_head *head = q->queuedata;
192 struct device *dev = disk_to_dev(head->disk);
193 struct nvme_ns *ns;
194 blk_qc_t ret = BLK_QC_T_NONE;
195 int srcu_idx;
196
197 srcu_idx = srcu_read_lock(&head->srcu);
198 ns = nvme_find_path(head);
199 if (likely(ns)) {
200 bio->bi_disk = ns->disk;
201 bio->bi_opf |= REQ_NVME_MPATH;
Hannes Reinecke2796b562018-06-07 10:38:47 +0200202 trace_block_bio_remap(bio->bi_disk->queue, bio,
203 disk_devt(ns->head->disk),
204 bio->bi_iter.bi_sector);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100205 ret = direct_make_request(bio);
206 } else if (!list_empty_careful(&head->list)) {
Colin Ian King89c4aff2017-11-14 14:26:27 +0000207 dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
Christoph Hellwig32acab32017-11-02 12:59:30 +0100208
209 spin_lock_irq(&head->requeue_lock);
210 bio_list_add(&head->requeue_list, bio);
211 spin_unlock_irq(&head->requeue_lock);
212 } else {
213 dev_warn_ratelimited(dev, "no path - failing I/O\n");
214
215 bio->bi_status = BLK_STS_IOERR;
216 bio_endio(bio);
217 }
218
219 srcu_read_unlock(&head->srcu, srcu_idx);
220 return ret;
221}
222
223static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
224{
225 struct nvme_ns_head *head = q->queuedata;
226 struct nvme_ns *ns;
227 bool found = false;
228 int srcu_idx;
229
230 srcu_idx = srcu_read_lock(&head->srcu);
Christoph Hellwigf3334442018-09-11 09:51:29 +0200231 ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200232 if (likely(ns && nvme_path_is_optimized(ns)))
Christoph Hellwig32acab32017-11-02 12:59:30 +0100233 found = ns->queue->poll_fn(q, qc);
234 srcu_read_unlock(&head->srcu, srcu_idx);
235 return found;
236}
237
238static void nvme_requeue_work(struct work_struct *work)
239{
240 struct nvme_ns_head *head =
241 container_of(work, struct nvme_ns_head, requeue_work);
242 struct bio *bio, *next;
243
244 spin_lock_irq(&head->requeue_lock);
245 next = bio_list_get(&head->requeue_list);
246 spin_unlock_irq(&head->requeue_lock);
247
248 while ((bio = next) != NULL) {
249 next = bio->bi_next;
250 bio->bi_next = NULL;
251
252 /*
253 * Reset disk to the mpath node and resubmit to select a new
254 * path.
255 */
256 bio->bi_disk = head->disk;
257 generic_make_request(bio);
258 }
259}
260
261int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
262{
263 struct request_queue *q;
264 bool vwc = false;
265
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200266 mutex_init(&head->lock);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100267 bio_list_init(&head->requeue_list);
268 spin_lock_init(&head->requeue_lock);
269 INIT_WORK(&head->requeue_work, nvme_requeue_work);
270
271 /*
272 * Add a multipath node if the subsystems supports multiple controllers.
273 * We also do this for private namespaces as the namespace sharing data could
274 * change after a rescan.
275 */
276 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
277 return 0;
278
Bart Van Assche5ee05242018-02-28 10:15:31 -0800279 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100280 if (!q)
281 goto out;
282 q->queuedata = head;
283 blk_queue_make_request(q, nvme_ns_head_make_request);
284 q->poll_fn = nvme_ns_head_poll;
Bart Van Assche8b904b52018-03-07 17:10:10 -0800285 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100286 /* set to a default value for 512 until disk is validated */
287 blk_queue_logical_block_size(q, 512);
288
289 /* we need to propagate up the VMC settings */
290 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
291 vwc = true;
292 blk_queue_write_cache(q, vwc, vwc);
293
294 head->disk = alloc_disk(0);
295 if (!head->disk)
296 goto out_cleanup_queue;
297 head->disk->fops = &nvme_ns_head_ops;
298 head->disk->private_data = head;
299 head->disk->queue = q;
300 head->disk->flags = GENHD_FL_EXT_DEVT;
301 sprintf(head->disk->disk_name, "nvme%dn%d",
302 ctrl->subsys->instance, head->instance);
303 return 0;
304
305out_cleanup_queue:
306 blk_cleanup_queue(q);
307out:
308 return -ENOMEM;
309}
310
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200311static void nvme_mpath_set_live(struct nvme_ns *ns)
Christoph Hellwig32acab32017-11-02 12:59:30 +0100312{
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200313 struct nvme_ns_head *head = ns->head;
314
315 lockdep_assert_held(&ns->head->lock);
316
Christoph Hellwig32acab32017-11-02 12:59:30 +0100317 if (!head->disk)
318 return;
Baegjae Sung9bd82b12018-02-28 16:06:04 +0900319
Hannes Reinecke33b14f62018-09-28 08:17:20 +0200320 if (!(head->disk->flags & GENHD_FL_UP))
321 device_add_disk(&head->subsys->dev, head->disk,
322 nvme_ns_id_attr_groups);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200323
324 kblockd_schedule_work(&ns->head->requeue_work);
325}
326
327static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
328 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
329 void *))
330{
331 void *base = ctrl->ana_log_buf;
332 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
333 int error, i;
334
335 lockdep_assert_held(&ctrl->ana_lock);
336
337 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
338 struct nvme_ana_group_desc *desc = base + offset;
339 u32 nr_nsids = le32_to_cpu(desc->nnsids);
340 size_t nsid_buf_size = nr_nsids * sizeof(__le32);
341
342 if (WARN_ON_ONCE(desc->grpid == 0))
343 return -EINVAL;
344 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
345 return -EINVAL;
346 if (WARN_ON_ONCE(desc->state == 0))
347 return -EINVAL;
348 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
349 return -EINVAL;
350
351 offset += sizeof(*desc);
352 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
353 return -EINVAL;
354
355 error = cb(ctrl, desc, data);
356 if (error)
357 return error;
358
359 offset += nsid_buf_size;
360 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
361 return -EINVAL;
362 }
363
364 return 0;
365}
366
367static inline bool nvme_state_is_live(enum nvme_ana_state state)
368{
369 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
370}
371
372static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
373 struct nvme_ns *ns)
374{
375 enum nvme_ana_state old;
376
377 mutex_lock(&ns->head->lock);
378 old = ns->ana_state;
379 ns->ana_grpid = le32_to_cpu(desc->grpid);
380 ns->ana_state = desc->state;
381 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
382
383 if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
384 nvme_mpath_set_live(ns);
385 mutex_unlock(&ns->head->lock);
386}
387
388static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
389 struct nvme_ana_group_desc *desc, void *data)
390{
391 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
392 unsigned *nr_change_groups = data;
393 struct nvme_ns *ns;
394
395 dev_info(ctrl->device, "ANA group %d: %s.\n",
396 le32_to_cpu(desc->grpid),
397 nvme_ana_state_names[desc->state]);
398
399 if (desc->state == NVME_ANA_CHANGE)
400 (*nr_change_groups)++;
401
402 if (!nr_nsids)
403 return 0;
404
405 down_write(&ctrl->namespaces_rwsem);
406 list_for_each_entry(ns, &ctrl->namespaces, list) {
407 if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
408 continue;
409 nvme_update_ns_ana_state(desc, ns);
410 if (++n == nr_nsids)
411 break;
412 }
413 up_write(&ctrl->namespaces_rwsem);
414 WARN_ON_ONCE(n < nr_nsids);
415 return 0;
416}
417
418static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
419{
420 u32 nr_change_groups = 0;
421 int error;
422
423 mutex_lock(&ctrl->ana_lock);
424 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
425 groups_only ? NVME_ANA_LOG_RGO : 0,
426 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
427 if (error) {
428 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
429 goto out_unlock;
430 }
431
432 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
433 nvme_update_ana_state);
434 if (error)
435 goto out_unlock;
436
437 /*
438 * In theory we should have an ANATT timer per group as they might enter
439 * the change state at different times. But that is a lot of overhead
440 * just to protect against a target that keeps entering new changes
441 * states while never finishing previous ones. But we'll still
442 * eventually time out once all groups are in change state, so this
443 * isn't a big deal.
444 *
445 * We also double the ANATT value to provide some slack for transports
446 * or AEN processing overhead.
447 */
448 if (nr_change_groups)
449 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
450 else
451 del_timer_sync(&ctrl->anatt_timer);
452out_unlock:
453 mutex_unlock(&ctrl->ana_lock);
454 return error;
455}
456
457static void nvme_ana_work(struct work_struct *work)
458{
459 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
460
461 nvme_read_ana_log(ctrl, false);
462}
463
464static void nvme_anatt_timeout(struct timer_list *t)
465{
466 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
467
468 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
469 nvme_reset_ctrl(ctrl);
470}
471
472void nvme_mpath_stop(struct nvme_ctrl *ctrl)
473{
474 if (!nvme_ctrl_use_ana(ctrl))
475 return;
476 del_timer_sync(&ctrl->anatt_timer);
477 cancel_work_sync(&ctrl->ana_work);
478}
479
480static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
481 char *buf)
482{
483 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
484}
485DEVICE_ATTR_RO(ana_grpid);
486
487static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
488 char *buf)
489{
490 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
491
492 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
493}
494DEVICE_ATTR_RO(ana_state);
495
496static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
497 struct nvme_ana_group_desc *desc, void *data)
498{
499 struct nvme_ns *ns = data;
500
501 if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
502 nvme_update_ns_ana_state(desc, ns);
503 return -ENXIO; /* just break out of the loop */
504 }
505
506 return 0;
507}
508
509void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
510{
511 if (nvme_ctrl_use_ana(ns->ctrl)) {
512 mutex_lock(&ns->ctrl->ana_lock);
513 ns->ana_grpid = le32_to_cpu(id->anagrpid);
514 nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
515 mutex_unlock(&ns->ctrl->ana_lock);
516 } else {
517 mutex_lock(&ns->head->lock);
518 ns->ana_state = NVME_ANA_OPTIMIZED;
519 nvme_mpath_set_live(ns);
520 mutex_unlock(&ns->head->lock);
521 }
Christoph Hellwig32acab32017-11-02 12:59:30 +0100522}
523
524void nvme_mpath_remove_disk(struct nvme_ns_head *head)
525{
526 if (!head->disk)
527 return;
Hannes Reinecke33b14f62018-09-28 08:17:20 +0200528 if (head->disk->flags & GENHD_FL_UP)
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200529 del_gendisk(head->disk);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100530 blk_set_queue_dying(head->disk->queue);
531 /* make sure all pending bios are cleaned up */
532 kblockd_schedule_work(&head->requeue_work);
533 flush_work(&head->requeue_work);
534 blk_cleanup_queue(head->disk->queue);
535 put_disk(head->disk);
536}
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200537
538int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
539{
540 int error;
541
542 if (!nvme_ctrl_use_ana(ctrl))
543 return 0;
544
545 ctrl->anacap = id->anacap;
546 ctrl->anatt = id->anatt;
547 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
548 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
549
550 mutex_init(&ctrl->ana_lock);
551 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
552 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
553 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
554 if (!(ctrl->anacap & (1 << 6)))
555 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
556
557 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
558 dev_err(ctrl->device,
559 "ANA log page size (%zd) larger than MDTS (%d).\n",
560 ctrl->ana_log_size,
561 ctrl->max_hw_sectors << SECTOR_SHIFT);
562 dev_err(ctrl->device, "disabling ANA support.\n");
563 return 0;
564 }
565
566 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
567 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
Susobhan Deybb830ad2018-09-25 12:29:15 -0700568 if (!ctrl->ana_log_buf) {
569 error = -ENOMEM;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200570 goto out;
Susobhan Deybb830ad2018-09-25 12:29:15 -0700571 }
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200572
573 error = nvme_read_ana_log(ctrl, true);
574 if (error)
575 goto out_free_ana_log_buf;
576 return 0;
577out_free_ana_log_buf:
578 kfree(ctrl->ana_log_buf);
579out:
Susobhan Deybb830ad2018-09-25 12:29:15 -0700580 return error;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200581}
582
583void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
584{
585 kfree(ctrl->ana_log_buf);
586}
587