blob: 8e846095c42d8f276e50277b0eac6777c322ef5a [file] [log] [blame]
Christoph Hellwig32acab32017-11-02 12:59:30 +01001/*
Christoph Hellwig0d0b6602018-05-14 08:48:54 +02002 * Copyright (c) 2017-2018 Christoph Hellwig.
Christoph Hellwig32acab32017-11-02 12:59:30 +01003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <linux/moduleparam.h>
Hannes Reinecke2796b562018-06-07 10:38:47 +020015#include <trace/events/block.h>
Christoph Hellwig32acab32017-11-02 12:59:30 +010016#include "nvme.h"
17
18static bool multipath = true;
Keith Busch5cadde82018-04-26 14:24:29 -060019module_param(multipath, bool, 0444);
Christoph Hellwig32acab32017-11-02 12:59:30 +010020MODULE_PARM_DESC(multipath,
21 "turn on native support for multiple controllers per subsystem");
22
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020023inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
24{
Hannes Reinecke8f220c42018-08-07 12:43:42 +020025 return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020026}
27
Keith Buscha785dbc2018-04-26 14:22:41 -060028/*
29 * If multipathing is enabled we need to always use the subsystem instance
30 * number for numbering our devices to avoid conflicts between subsystems that
31 * have multiple controllers and thus use the multipath-aware subsystem node
32 * and those that have a single controller and use the controller node
33 * directly.
34 */
35void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
36 struct nvme_ctrl *ctrl, int *flags)
37{
38 if (!multipath) {
39 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
40 } else if (ns->head->disk) {
41 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
42 ctrl->cntlid, ns->head->instance);
43 *flags = GENHD_FL_HIDDEN;
44 } else {
45 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
46 ns->head->instance);
47 }
48}
49
Christoph Hellwig32acab32017-11-02 12:59:30 +010050void nvme_failover_req(struct request *req)
51{
52 struct nvme_ns *ns = req->q->queuedata;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020053 u16 status = nvme_req(req)->status;
Christoph Hellwig32acab32017-11-02 12:59:30 +010054 unsigned long flags;
55
56 spin_lock_irqsave(&ns->head->requeue_lock, flags);
57 blk_steal_bios(&ns->head->requeue_list, req);
58 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
59 blk_mq_end_request(req, 0);
60
Christoph Hellwig0d0b6602018-05-14 08:48:54 +020061 switch (status & 0x7ff) {
62 case NVME_SC_ANA_TRANSITION:
63 case NVME_SC_ANA_INACCESSIBLE:
64 case NVME_SC_ANA_PERSISTENT_LOSS:
65 /*
66 * If we got back an ANA error we know the controller is alive,
67 * but not ready to serve this namespaces. The spec suggests
68 * we should update our general state here, but due to the fact
69 * that the admin and I/O queues are not serialized that is
70 * fundamentally racy. So instead just clear the current path,
71 * mark the the path as pending and kick of a re-read of the ANA
72 * log page ASAP.
73 */
74 nvme_mpath_clear_current_path(ns);
75 if (ns->ctrl->ana_log_buf) {
76 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
77 queue_work(nvme_wq, &ns->ctrl->ana_work);
78 }
79 break;
80 default:
81 /*
82 * Reset the controller for any non-ANA error as we don't know
83 * what caused the error.
84 */
85 nvme_reset_ctrl(ns->ctrl);
86 break;
87 }
88
Christoph Hellwig32acab32017-11-02 12:59:30 +010089 kblockd_schedule_work(&ns->head->requeue_work);
90}
91
Christoph Hellwig32acab32017-11-02 12:59:30 +010092void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
93{
94 struct nvme_ns *ns;
95
Jianchao Wang765cc0312018-02-12 20:54:46 +080096 down_read(&ctrl->namespaces_rwsem);
Christoph Hellwig32acab32017-11-02 12:59:30 +010097 list_for_each_entry(ns, &ctrl->namespaces, list) {
98 if (ns->head->disk)
99 kblockd_schedule_work(&ns->head->requeue_work);
100 }
Jianchao Wang765cc0312018-02-12 20:54:46 +0800101 up_read(&ctrl->namespaces_rwsem);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100102}
103
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200104static const char *nvme_ana_state_names[] = {
105 [0] = "invalid state",
106 [NVME_ANA_OPTIMIZED] = "optimized",
107 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
108 [NVME_ANA_INACCESSIBLE] = "inaccessible",
109 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
110 [NVME_ANA_CHANGE] = "change",
111};
112
Christoph Hellwig32acab32017-11-02 12:59:30 +0100113static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
114{
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200115 struct nvme_ns *ns, *fallback = NULL;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100116
117 list_for_each_entry_rcu(ns, &head->list, siblings) {
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200118 if (ns->ctrl->state != NVME_CTRL_LIVE ||
119 test_bit(NVME_NS_ANA_PENDING, &ns->flags))
120 continue;
121 switch (ns->ana_state) {
122 case NVME_ANA_OPTIMIZED:
Christoph Hellwig32acab32017-11-02 12:59:30 +0100123 rcu_assign_pointer(head->current_path, ns);
124 return ns;
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200125 case NVME_ANA_NONOPTIMIZED:
126 fallback = ns;
127 break;
128 default:
129 break;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100130 }
131 }
132
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200133 if (fallback)
134 rcu_assign_pointer(head->current_path, fallback);
135 return fallback;
136}
137
138static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
139{
140 return ns->ctrl->state == NVME_CTRL_LIVE &&
141 ns->ana_state == NVME_ANA_OPTIMIZED;
Christoph Hellwig32acab32017-11-02 12:59:30 +0100142}
143
144inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
145{
146 struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
147
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200148 if (unlikely(!ns || !nvme_path_is_optimized(ns)))
Christoph Hellwig32acab32017-11-02 12:59:30 +0100149 ns = __nvme_find_path(head);
150 return ns;
151}
152
153static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
154 struct bio *bio)
155{
156 struct nvme_ns_head *head = q->queuedata;
157 struct device *dev = disk_to_dev(head->disk);
158 struct nvme_ns *ns;
159 blk_qc_t ret = BLK_QC_T_NONE;
160 int srcu_idx;
161
162 srcu_idx = srcu_read_lock(&head->srcu);
163 ns = nvme_find_path(head);
164 if (likely(ns)) {
165 bio->bi_disk = ns->disk;
166 bio->bi_opf |= REQ_NVME_MPATH;
Hannes Reinecke2796b562018-06-07 10:38:47 +0200167 trace_block_bio_remap(bio->bi_disk->queue, bio,
168 disk_devt(ns->head->disk),
169 bio->bi_iter.bi_sector);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100170 ret = direct_make_request(bio);
171 } else if (!list_empty_careful(&head->list)) {
Colin Ian King89c4aff2017-11-14 14:26:27 +0000172 dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
Christoph Hellwig32acab32017-11-02 12:59:30 +0100173
174 spin_lock_irq(&head->requeue_lock);
175 bio_list_add(&head->requeue_list, bio);
176 spin_unlock_irq(&head->requeue_lock);
177 } else {
178 dev_warn_ratelimited(dev, "no path - failing I/O\n");
179
180 bio->bi_status = BLK_STS_IOERR;
181 bio_endio(bio);
182 }
183
184 srcu_read_unlock(&head->srcu, srcu_idx);
185 return ret;
186}
187
188static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
189{
190 struct nvme_ns_head *head = q->queuedata;
191 struct nvme_ns *ns;
192 bool found = false;
193 int srcu_idx;
194
195 srcu_idx = srcu_read_lock(&head->srcu);
196 ns = srcu_dereference(head->current_path, &head->srcu);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200197 if (likely(ns && nvme_path_is_optimized(ns)))
Christoph Hellwig32acab32017-11-02 12:59:30 +0100198 found = ns->queue->poll_fn(q, qc);
199 srcu_read_unlock(&head->srcu, srcu_idx);
200 return found;
201}
202
203static void nvme_requeue_work(struct work_struct *work)
204{
205 struct nvme_ns_head *head =
206 container_of(work, struct nvme_ns_head, requeue_work);
207 struct bio *bio, *next;
208
209 spin_lock_irq(&head->requeue_lock);
210 next = bio_list_get(&head->requeue_list);
211 spin_unlock_irq(&head->requeue_lock);
212
213 while ((bio = next) != NULL) {
214 next = bio->bi_next;
215 bio->bi_next = NULL;
216
217 /*
218 * Reset disk to the mpath node and resubmit to select a new
219 * path.
220 */
221 bio->bi_disk = head->disk;
222 generic_make_request(bio);
223 }
224}
225
226int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
227{
228 struct request_queue *q;
229 bool vwc = false;
230
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200231 mutex_init(&head->lock);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100232 bio_list_init(&head->requeue_list);
233 spin_lock_init(&head->requeue_lock);
234 INIT_WORK(&head->requeue_work, nvme_requeue_work);
235
236 /*
237 * Add a multipath node if the subsystems supports multiple controllers.
238 * We also do this for private namespaces as the namespace sharing data could
239 * change after a rescan.
240 */
241 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
242 return 0;
243
Bart Van Assche5ee05242018-02-28 10:15:31 -0800244 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100245 if (!q)
246 goto out;
247 q->queuedata = head;
248 blk_queue_make_request(q, nvme_ns_head_make_request);
249 q->poll_fn = nvme_ns_head_poll;
Bart Van Assche8b904b52018-03-07 17:10:10 -0800250 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100251 /* set to a default value for 512 until disk is validated */
252 blk_queue_logical_block_size(q, 512);
253
254 /* we need to propagate up the VMC settings */
255 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
256 vwc = true;
257 blk_queue_write_cache(q, vwc, vwc);
258
259 head->disk = alloc_disk(0);
260 if (!head->disk)
261 goto out_cleanup_queue;
262 head->disk->fops = &nvme_ns_head_ops;
263 head->disk->private_data = head;
264 head->disk->queue = q;
265 head->disk->flags = GENHD_FL_EXT_DEVT;
266 sprintf(head->disk->disk_name, "nvme%dn%d",
267 ctrl->subsys->instance, head->instance);
268 return 0;
269
270out_cleanup_queue:
271 blk_cleanup_queue(q);
272out:
273 return -ENOMEM;
274}
275
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200276static void nvme_mpath_set_live(struct nvme_ns *ns)
Christoph Hellwig32acab32017-11-02 12:59:30 +0100277{
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200278 struct nvme_ns_head *head = ns->head;
279
280 lockdep_assert_held(&ns->head->lock);
281
Christoph Hellwig32acab32017-11-02 12:59:30 +0100282 if (!head->disk)
283 return;
Baegjae Sung9bd82b12018-02-28 16:06:04 +0900284
Hannes Reinecke33b14f672018-09-28 08:17:20 +0200285 if (!(head->disk->flags & GENHD_FL_UP))
286 device_add_disk(&head->subsys->dev, head->disk,
287 nvme_ns_id_attr_groups);
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200288
289 kblockd_schedule_work(&ns->head->requeue_work);
290}
291
292static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
293 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
294 void *))
295{
296 void *base = ctrl->ana_log_buf;
297 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
298 int error, i;
299
300 lockdep_assert_held(&ctrl->ana_lock);
301
302 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
303 struct nvme_ana_group_desc *desc = base + offset;
304 u32 nr_nsids = le32_to_cpu(desc->nnsids);
305 size_t nsid_buf_size = nr_nsids * sizeof(__le32);
306
307 if (WARN_ON_ONCE(desc->grpid == 0))
308 return -EINVAL;
309 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
310 return -EINVAL;
311 if (WARN_ON_ONCE(desc->state == 0))
312 return -EINVAL;
313 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
314 return -EINVAL;
315
316 offset += sizeof(*desc);
317 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
318 return -EINVAL;
319
320 error = cb(ctrl, desc, data);
321 if (error)
322 return error;
323
324 offset += nsid_buf_size;
325 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
326 return -EINVAL;
327 }
328
329 return 0;
330}
331
332static inline bool nvme_state_is_live(enum nvme_ana_state state)
333{
334 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
335}
336
337static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
338 struct nvme_ns *ns)
339{
340 enum nvme_ana_state old;
341
342 mutex_lock(&ns->head->lock);
343 old = ns->ana_state;
344 ns->ana_grpid = le32_to_cpu(desc->grpid);
345 ns->ana_state = desc->state;
346 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
347
348 if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
349 nvme_mpath_set_live(ns);
350 mutex_unlock(&ns->head->lock);
351}
352
353static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
354 struct nvme_ana_group_desc *desc, void *data)
355{
356 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
357 unsigned *nr_change_groups = data;
358 struct nvme_ns *ns;
359
360 dev_info(ctrl->device, "ANA group %d: %s.\n",
361 le32_to_cpu(desc->grpid),
362 nvme_ana_state_names[desc->state]);
363
364 if (desc->state == NVME_ANA_CHANGE)
365 (*nr_change_groups)++;
366
367 if (!nr_nsids)
368 return 0;
369
370 down_write(&ctrl->namespaces_rwsem);
371 list_for_each_entry(ns, &ctrl->namespaces, list) {
372 if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
373 continue;
374 nvme_update_ns_ana_state(desc, ns);
375 if (++n == nr_nsids)
376 break;
377 }
378 up_write(&ctrl->namespaces_rwsem);
379 WARN_ON_ONCE(n < nr_nsids);
380 return 0;
381}
382
383static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
384{
385 u32 nr_change_groups = 0;
386 int error;
387
388 mutex_lock(&ctrl->ana_lock);
389 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
390 groups_only ? NVME_ANA_LOG_RGO : 0,
391 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
392 if (error) {
393 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
394 goto out_unlock;
395 }
396
397 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
398 nvme_update_ana_state);
399 if (error)
400 goto out_unlock;
401
402 /*
403 * In theory we should have an ANATT timer per group as they might enter
404 * the change state at different times. But that is a lot of overhead
405 * just to protect against a target that keeps entering new changes
406 * states while never finishing previous ones. But we'll still
407 * eventually time out once all groups are in change state, so this
408 * isn't a big deal.
409 *
410 * We also double the ANATT value to provide some slack for transports
411 * or AEN processing overhead.
412 */
413 if (nr_change_groups)
414 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
415 else
416 del_timer_sync(&ctrl->anatt_timer);
417out_unlock:
418 mutex_unlock(&ctrl->ana_lock);
419 return error;
420}
421
422static void nvme_ana_work(struct work_struct *work)
423{
424 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
425
426 nvme_read_ana_log(ctrl, false);
427}
428
429static void nvme_anatt_timeout(struct timer_list *t)
430{
431 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
432
433 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
434 nvme_reset_ctrl(ctrl);
435}
436
437void nvme_mpath_stop(struct nvme_ctrl *ctrl)
438{
439 if (!nvme_ctrl_use_ana(ctrl))
440 return;
441 del_timer_sync(&ctrl->anatt_timer);
442 cancel_work_sync(&ctrl->ana_work);
443}
444
445static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
446 char *buf)
447{
448 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
449}
450DEVICE_ATTR_RO(ana_grpid);
451
452static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
453 char *buf)
454{
455 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
456
457 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
458}
459DEVICE_ATTR_RO(ana_state);
460
461static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
462 struct nvme_ana_group_desc *desc, void *data)
463{
464 struct nvme_ns *ns = data;
465
466 if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
467 nvme_update_ns_ana_state(desc, ns);
468 return -ENXIO; /* just break out of the loop */
469 }
470
471 return 0;
472}
473
474void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
475{
476 if (nvme_ctrl_use_ana(ns->ctrl)) {
477 mutex_lock(&ns->ctrl->ana_lock);
478 ns->ana_grpid = le32_to_cpu(id->anagrpid);
479 nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
480 mutex_unlock(&ns->ctrl->ana_lock);
481 } else {
482 mutex_lock(&ns->head->lock);
483 ns->ana_state = NVME_ANA_OPTIMIZED;
484 nvme_mpath_set_live(ns);
485 mutex_unlock(&ns->head->lock);
486 }
Christoph Hellwig32acab32017-11-02 12:59:30 +0100487}
488
489void nvme_mpath_remove_disk(struct nvme_ns_head *head)
490{
491 if (!head->disk)
492 return;
Hannes Reinecke33b14f672018-09-28 08:17:20 +0200493 if (head->disk->flags & GENHD_FL_UP)
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200494 del_gendisk(head->disk);
Christoph Hellwig32acab32017-11-02 12:59:30 +0100495 blk_set_queue_dying(head->disk->queue);
496 /* make sure all pending bios are cleaned up */
497 kblockd_schedule_work(&head->requeue_work);
498 flush_work(&head->requeue_work);
499 blk_cleanup_queue(head->disk->queue);
500 put_disk(head->disk);
501}
Christoph Hellwig0d0b6602018-05-14 08:48:54 +0200502
503int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
504{
505 int error;
506
507 if (!nvme_ctrl_use_ana(ctrl))
508 return 0;
509
510 ctrl->anacap = id->anacap;
511 ctrl->anatt = id->anatt;
512 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
513 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
514
515 mutex_init(&ctrl->ana_lock);
516 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
517 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
518 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
519 if (!(ctrl->anacap & (1 << 6)))
520 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
521
522 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
523 dev_err(ctrl->device,
524 "ANA log page size (%zd) larger than MDTS (%d).\n",
525 ctrl->ana_log_size,
526 ctrl->max_hw_sectors << SECTOR_SHIFT);
527 dev_err(ctrl->device, "disabling ANA support.\n");
528 return 0;
529 }
530
531 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
532 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
533 if (!ctrl->ana_log_buf)
534 goto out;
535
536 error = nvme_read_ana_log(ctrl, true);
537 if (error)
538 goto out_free_ana_log_buf;
539 return 0;
540out_free_ana_log_buf:
541 kfree(ctrl->ana_log_buf);
542out:
543 return -ENOMEM;
544}
545
546void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
547{
548 kfree(ctrl->ana_log_buf);
549}
550