blob: 4be4bd669d812a490d669b66c2d0eced6d3b1d86 [file] [log] [blame]
Yang Yingliangf1e0bb02015-09-24 17:32:13 +08001/*
2 * Generic cpu hotunplug interrupt migration code copied from the
3 * arch/arm implementation
4 *
5 * Copyright (C) Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/interrupt.h>
12#include <linux/ratelimit.h>
13#include <linux/irq.h>
14
15#include "internals.h"
16
17static bool migrate_one_irq(struct irq_desc *desc)
18{
19 struct irq_data *d = irq_desc_get_irq_data(desc);
Thomas Gleixnere8a70352017-06-20 01:37:27 +020020 struct irq_chip *chip = irq_data_get_irq_chip(d);
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020021 const struct cpumask *affinity;
Thomas Gleixnere8a70352017-06-20 01:37:27 +020022 bool brokeaff = false;
23 int err;
24
25 /*
26 * IRQ chip might be already torn down, but the irq descriptor is
27 * still in the radix tree. Also if the chip has no affinity setter,
28 * nothing can be done here.
29 */
30 if (!chip || !chip->irq_set_affinity) {
31 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
32 return false;
33 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080034
35 /*
Thomas Gleixner91f26cb2017-06-20 01:37:28 +020036 * No move required, if:
37 * - Interrupt is per cpu
38 * - Interrupt is not started
39 * - Affinity mask does not include this CPU.
40 *
41 * Note: Do not check desc->action as this might be a chained
42 * interrupt.
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080043 */
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020044 affinity = irq_data_get_affinity_mask(d);
Thomas Gleixner91f26cb2017-06-20 01:37:28 +020045 if (irqd_is_per_cpu(d) || !irqd_is_started(d) ||
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020046 !cpumask_test_cpu(smp_processor_id(), affinity)) {
47 /*
48 * If an irq move is pending, abort it if the dying CPU is
49 * the sole target.
50 */
51 irq_fixup_move_pending(desc, false);
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080052 return false;
Thomas Gleixnerf0383c22017-06-20 01:37:29 +020053 }
54
55 /*
56 * Complete an eventually pending irq move cleanup. If this
57 * interrupt was moved in hard irq context, then the vectors need
58 * to be cleaned up. It can't wait until this interrupt actually
59 * happens and this CPU was involved.
60 */
61 irq_force_complete_move(desc);
62
63 /*
64 * If there is a setaffinity pending, then try to reuse the pending
65 * mask, so the last change of the affinity does not get lost. If
66 * there is no move pending or the pending mask does not contain
67 * any online CPU, use the current affinity mask.
68 */
69 if (irq_fixup_move_pending(desc, true))
70 affinity = irq_desc_get_pending_mask(desc);
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080071
72 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
73 affinity = cpu_online_mask;
Thomas Gleixnere8a70352017-06-20 01:37:27 +020074 brokeaff = true;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080075 }
76
Thomas Gleixnere8a70352017-06-20 01:37:27 +020077 err = irq_do_set_affinity(d, affinity, false);
78 if (err) {
79 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
80 d->irq, err);
81 return false;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080082 }
Thomas Gleixnere8a70352017-06-20 01:37:27 +020083 return brokeaff;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080084}
85
86/**
87 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
88 *
89 * The current CPU has been marked offline. Migrate IRQs off this CPU.
90 * If the affinity settings do not allow other CPUs, force them onto any
91 * available CPU.
92 *
93 * Note: we must iterate over all IRQs, whether they have an attached
94 * action structure or not, as we need to get chained interrupts too.
95 */
96void irq_migrate_all_off_this_cpu(void)
97{
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080098 struct irq_desc *desc;
Thomas Gleixner0dd945f2017-06-20 01:37:25 +020099 unsigned int irq;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800100
101 for_each_active_irq(irq) {
102 bool affinity_broken;
103
104 desc = irq_to_desc(irq);
105 raw_spin_lock(&desc->lock);
106 affinity_broken = migrate_one_irq(desc);
107 raw_spin_unlock(&desc->lock);
108
Thomas Gleixner0dd945f2017-06-20 01:37:25 +0200109 if (affinity_broken) {
110 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800111 irq, smp_processor_id());
Thomas Gleixner0dd945f2017-06-20 01:37:25 +0200112 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800113 }
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800114}