blob: 39a41c56ad4fe1c97dfca19dced650b3ef215a8a [file] [log] [blame]
Thomas Gleixner52a65ff2018-03-14 22:15:19 +01001// SPDX-License-Identifier: GPL-2.0
Yang Yingliangf1e0bb02015-09-24 17:32:13 +08002/*
3 * Generic cpu hotunplug interrupt migration code copied from the
4 * arch/arm implementation
5 *
6 * Copyright (C) Russell King
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/interrupt.h>
13#include <linux/ratelimit.h>
14#include <linux/irq.h>
Ming Lei11ea68f2020-01-20 17:16:25 +080015#include <linux/sched/isolation.h>
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080016
17#include "internals.h"
18
Thomas Gleixner415fcf12017-06-20 01:37:39 +020019/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
20static inline bool irq_needs_fixup(struct irq_data *d)
21{
22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
Thomas Gleixner60b09c52017-10-09 12:47:24 +020023 unsigned int cpu = smp_processor_id();
Thomas Gleixner415fcf12017-06-20 01:37:39 +020024
Thomas Gleixner60b09c52017-10-09 12:47:24 +020025#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
26 /*
27 * The cpumask_empty() check is a workaround for interrupt chips,
28 * which do not implement effective affinity, but the architecture has
29 * enabled the config switch. Use the general affinity mask instead.
30 */
31 if (cpumask_empty(m))
32 m = irq_data_get_affinity_mask(d);
33
34 /*
35 * Sanity check. If the mask is not empty when excluding the outgoing
36 * CPU then it must contain at least one online CPU. The outgoing CPU
37 * has been removed from the online mask already.
38 */
39 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
40 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
41 /*
42 * If this happens then there was a missed IRQ fixup at some
43 * point. Warn about it and enforce fixup.
44 */
45 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
46 cpumask_pr_args(m), d->irq, cpu);
47 return true;
48 }
49#endif
50 return cpumask_test_cpu(cpu, m);
Thomas Gleixner415fcf12017-06-20 01:37:39 +020051}
52
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080053static bool migrate_one_irq(struct irq_desc *desc)
54{
55 struct irq_data *d = irq_desc_get_irq_data(desc);
Thomas Gleixnere8a70352017-06-20 01:37:27 +020056 struct irq_chip *chip = irq_data_get_irq_chip(d);
Thomas Gleixner47a06d32017-06-20 01:37:30 +020057 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020058 const struct cpumask *affinity;
Thomas Gleixnere8a70352017-06-20 01:37:27 +020059 bool brokeaff = false;
60 int err;
61
62 /*
63 * IRQ chip might be already torn down, but the irq descriptor is
64 * still in the radix tree. Also if the chip has no affinity setter,
65 * nothing can be done here.
66 */
67 if (!chip || !chip->irq_set_affinity) {
68 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
69 return false;
70 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080071
72 /*
Thomas Gleixner91f26cb2017-06-20 01:37:28 +020073 * No move required, if:
74 * - Interrupt is per cpu
75 * - Interrupt is not started
76 * - Affinity mask does not include this CPU.
77 *
78 * Note: Do not check desc->action as this might be a chained
79 * interrupt.
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080080 */
Thomas Gleixner415fcf12017-06-20 01:37:39 +020081 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020082 /*
83 * If an irq move is pending, abort it if the dying CPU is
84 * the sole target.
85 */
86 irq_fixup_move_pending(desc, false);
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080087 return false;
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020088 }
89
90 /*
91 * Complete an eventually pending irq move cleanup. If this
92 * interrupt was moved in hard irq context, then the vectors need
93 * to be cleaned up. It can't wait until this interrupt actually
94 * happens and this CPU was involved.
95 */
96 irq_force_complete_move(desc);
97
98 /*
99 * If there is a setaffinity pending, then try to reuse the pending
100 * mask, so the last change of the affinity does not get lost. If
101 * there is no move pending or the pending mask does not contain
102 * any online CPU, use the current affinity mask.
103 */
104 if (irq_fixup_move_pending(desc, true))
105 affinity = irq_desc_get_pending_mask(desc);
Thomas Gleixner415fcf12017-06-20 01:37:39 +0200106 else
107 affinity = irq_data_get_affinity_mask(d);
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800108
Thomas Gleixner47a06d32017-06-20 01:37:30 +0200109 /* Mask the chip for interrupts which cannot move in process context */
110 if (maskchip && chip->irq_mask)
111 chip->irq_mask(d);
112
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800113 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +0200114 /*
115 * If the interrupt is managed, then shut it down and leave
116 * the affinity untouched.
117 */
118 if (irqd_affinity_is_managed(d)) {
119 irqd_set_managed_shutdown(d);
Thomas Gleixner4001d8e2019-06-28 13:11:49 +0200120 irq_shutdown_and_deactivate(desc);
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +0200121 return false;
122 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800123 affinity = cpu_online_mask;
Thomas Gleixnere8a70352017-06-20 01:37:27 +0200124 brokeaff = true;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800125 }
Thomas Gleixner83979132017-07-27 12:21:11 +0200126 /*
127 * Do not set the force argument of irq_do_set_affinity() as this
128 * disables the masking of offline CPUs from the supplied affinity
129 * mask and therefore might keep/reassign the irq to the outgoing
130 * CPU.
131 */
132 err = irq_do_set_affinity(d, affinity, false);
Thomas Gleixnere8a70352017-06-20 01:37:27 +0200133 if (err) {
134 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
135 d->irq, err);
Thomas Gleixner47a06d32017-06-20 01:37:30 +0200136 brokeaff = false;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800137 }
Thomas Gleixner47a06d32017-06-20 01:37:30 +0200138
139 if (maskchip && chip->irq_unmask)
140 chip->irq_unmask(d);
141
Thomas Gleixnere8a70352017-06-20 01:37:27 +0200142 return brokeaff;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800143}
144
145/**
146 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
147 *
148 * The current CPU has been marked offline. Migrate IRQs off this CPU.
149 * If the affinity settings do not allow other CPUs, force them onto any
150 * available CPU.
151 *
152 * Note: we must iterate over all IRQs, whether they have an attached
153 * action structure or not, as we need to get chained interrupts too.
154 */
155void irq_migrate_all_off_this_cpu(void)
156{
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800157 struct irq_desc *desc;
Thomas Gleixner0dd945f2017-06-20 01:37:25 +0200158 unsigned int irq;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800159
160 for_each_active_irq(irq) {
161 bool affinity_broken;
162
163 desc = irq_to_desc(irq);
164 raw_spin_lock(&desc->lock);
165 affinity_broken = migrate_one_irq(desc);
166 raw_spin_unlock(&desc->lock);
167
Thomas Gleixner0dd945f2017-06-20 01:37:25 +0200168 if (affinity_broken) {
Lee Jones88ffe2d2021-08-16 14:48:17 +0100169 pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800170 irq, smp_processor_id());
Thomas Gleixner0dd945f2017-06-20 01:37:25 +0200171 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800172 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800173}
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +0200174
Ming Lei11ea68f2020-01-20 17:16:25 +0800175static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
176{
177 const struct cpumask *hk_mask;
178
179 if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
180 return false;
181
182 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
183 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
184 return false;
185
186 return cpumask_test_cpu(cpu, hk_mask);
187}
188
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +0200189static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
190{
191 struct irq_data *data = irq_desc_get_irq_data(desc);
192 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
193
194 if (!irqd_affinity_is_managed(data) || !desc->action ||
195 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
196 return;
197
Thomas Gleixner8f31a982017-06-20 01:37:53 +0200198 if (irqd_is_managed_and_shutdown(data)) {
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +0200199 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
Thomas Gleixner8f31a982017-06-20 01:37:53 +0200200 return;
201 }
202
203 /*
204 * If the interrupt can only be directed to a single target
205 * CPU then it is already assigned to a CPU in the affinity
Ming Lei11ea68f2020-01-20 17:16:25 +0800206 * mask. No point in trying to move it around unless the
207 * isolation mechanism requests to move it to an upcoming
208 * housekeeping CPU.
Thomas Gleixner8f31a982017-06-20 01:37:53 +0200209 */
Ming Lei11ea68f2020-01-20 17:16:25 +0800210 if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +0200211 irq_set_affinity_locked(data, affinity, false);
212}
213
214/**
215 * irq_affinity_online_cpu - Restore affinity for managed interrupts
216 * @cpu: Upcoming CPU for which interrupts should be restored
217 */
218int irq_affinity_online_cpu(unsigned int cpu)
219{
220 struct irq_desc *desc;
221 unsigned int irq;
222
223 irq_lock_sparse();
224 for_each_active_irq(irq) {
225 desc = irq_to_desc(irq);
226 raw_spin_lock_irq(&desc->lock);
227 irq_restore_affinity_of_irq(desc, cpu);
228 raw_spin_unlock_irq(&desc->lock);
229 }
230 irq_unlock_sparse();
231
232 return 0;
233}