Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Generic cpu hotunplug interrupt migration code copied from the |
| 3 | * arch/arm implementation |
| 4 | * |
| 5 | * Copyright (C) Russell King |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/ratelimit.h> |
| 13 | #include <linux/irq.h> |
| 14 | |
| 15 | #include "internals.h" |
| 16 | |
| 17 | static bool migrate_one_irq(struct irq_desc *desc) |
| 18 | { |
| 19 | struct irq_data *d = irq_desc_get_irq_data(desc); |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 20 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame^] | 21 | const struct cpumask *affinity; |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 22 | bool brokeaff = false; |
| 23 | int err; |
| 24 | |
| 25 | /* |
| 26 | * IRQ chip might be already torn down, but the irq descriptor is |
| 27 | * still in the radix tree. Also if the chip has no affinity setter, |
| 28 | * nothing can be done here. |
| 29 | */ |
| 30 | if (!chip || !chip->irq_set_affinity) { |
| 31 | pr_debug("IRQ %u: Unable to migrate away\n", d->irq); |
| 32 | return false; |
| 33 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 34 | |
| 35 | /* |
Thomas Gleixner | 91f26cb | 2017-06-20 01:37:28 +0200 | [diff] [blame] | 36 | * No move required, if: |
| 37 | * - Interrupt is per cpu |
| 38 | * - Interrupt is not started |
| 39 | * - Affinity mask does not include this CPU. |
| 40 | * |
| 41 | * Note: Do not check desc->action as this might be a chained |
| 42 | * interrupt. |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 43 | */ |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame^] | 44 | affinity = irq_data_get_affinity_mask(d); |
Thomas Gleixner | 91f26cb | 2017-06-20 01:37:28 +0200 | [diff] [blame] | 45 | if (irqd_is_per_cpu(d) || !irqd_is_started(d) || |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame^] | 46 | !cpumask_test_cpu(smp_processor_id(), affinity)) { |
| 47 | /* |
| 48 | * If an irq move is pending, abort it if the dying CPU is |
| 49 | * the sole target. |
| 50 | */ |
| 51 | irq_fixup_move_pending(desc, false); |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 52 | return false; |
Thomas Gleixner | f0383c2 | 2017-06-20 01:37:29 +0200 | [diff] [blame^] | 53 | } |
| 54 | |
| 55 | /* |
| 56 | * Complete an eventually pending irq move cleanup. If this |
| 57 | * interrupt was moved in hard irq context, then the vectors need |
| 58 | * to be cleaned up. It can't wait until this interrupt actually |
| 59 | * happens and this CPU was involved. |
| 60 | */ |
| 61 | irq_force_complete_move(desc); |
| 62 | |
| 63 | /* |
| 64 | * If there is a setaffinity pending, then try to reuse the pending |
| 65 | * mask, so the last change of the affinity does not get lost. If |
| 66 | * there is no move pending or the pending mask does not contain |
| 67 | * any online CPU, use the current affinity mask. |
| 68 | */ |
| 69 | if (irq_fixup_move_pending(desc, true)) |
| 70 | affinity = irq_desc_get_pending_mask(desc); |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 71 | |
| 72 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 73 | affinity = cpu_online_mask; |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 74 | brokeaff = true; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 75 | } |
| 76 | |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 77 | err = irq_do_set_affinity(d, affinity, false); |
| 78 | if (err) { |
| 79 | pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", |
| 80 | d->irq, err); |
| 81 | return false; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 82 | } |
Thomas Gleixner | e8a7035 | 2017-06-20 01:37:27 +0200 | [diff] [blame] | 83 | return brokeaff; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | /** |
| 87 | * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu |
| 88 | * |
| 89 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
| 90 | * If the affinity settings do not allow other CPUs, force them onto any |
| 91 | * available CPU. |
| 92 | * |
| 93 | * Note: we must iterate over all IRQs, whether they have an attached |
| 94 | * action structure or not, as we need to get chained interrupts too. |
| 95 | */ |
| 96 | void irq_migrate_all_off_this_cpu(void) |
| 97 | { |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 98 | struct irq_desc *desc; |
Thomas Gleixner | 0dd945f | 2017-06-20 01:37:25 +0200 | [diff] [blame] | 99 | unsigned int irq; |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 100 | |
| 101 | for_each_active_irq(irq) { |
| 102 | bool affinity_broken; |
| 103 | |
| 104 | desc = irq_to_desc(irq); |
| 105 | raw_spin_lock(&desc->lock); |
| 106 | affinity_broken = migrate_one_irq(desc); |
| 107 | raw_spin_unlock(&desc->lock); |
| 108 | |
Thomas Gleixner | 0dd945f | 2017-06-20 01:37:25 +0200 | [diff] [blame] | 109 | if (affinity_broken) { |
| 110 | pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n", |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 111 | irq, smp_processor_id()); |
Thomas Gleixner | 0dd945f | 2017-06-20 01:37:25 +0200 | [diff] [blame] | 112 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 113 | } |
Yang Yingliang | f1e0bb0 | 2015-09-24 17:32:13 +0800 | [diff] [blame] | 114 | } |