Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 2 | /* |
| 3 | * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) |
| 4 | * |
| 5 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/smp.h> |
| 9 | #include <linux/irq.h> |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 10 | #include <linux/irqchip/chained_irq.h> |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 11 | #include <linux/spinlock.h> |
Vineet Gupta | 2d7f5c4 | 2016-10-31 11:27:08 -0700 | [diff] [blame] | 12 | #include <soc/arc/mcip.h> |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 13 | #include <asm/irqflags-arcv2.h> |
Vineet Gupta | 964cf28 | 2015-10-02 19:20:27 +0530 | [diff] [blame] | 14 | #include <asm/setup.h> |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 15 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 16 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
| 17 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 18 | #ifdef CONFIG_SMP |
| 19 | |
| 20 | static char smp_cpuinfo_buf[128]; |
| 21 | |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame] | 22 | /* |
| 23 | * Set mask to halt GFRC if any online core in SMP cluster is halted. |
| 24 | * Only works for ARC HS v3.0+, on earlier versions has no effect. |
| 25 | */ |
| 26 | static void mcip_update_gfrc_halt_mask(int cpu) |
| 27 | { |
| 28 | struct bcr_generic gfrc; |
| 29 | unsigned long flags; |
| 30 | u32 gfrc_halt_mask; |
| 31 | |
| 32 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); |
| 33 | |
| 34 | /* |
| 35 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in |
| 36 | * GFRC 0x3 version. |
| 37 | */ |
| 38 | if (gfrc.ver < 0x3) |
| 39 | return; |
| 40 | |
| 41 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 42 | |
| 43 | __mcip_cmd(CMD_GFRC_READ_CORE, 0); |
| 44 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 45 | gfrc_halt_mask |= BIT(cpu); |
| 46 | __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); |
| 47 | |
| 48 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 49 | } |
| 50 | |
Eugeniy Paltsev | f3205de | 2018-02-23 19:41:53 +0300 | [diff] [blame] | 51 | static void mcip_update_debug_halt_mask(int cpu) |
| 52 | { |
| 53 | u32 mcip_mask = 0; |
| 54 | unsigned long flags; |
| 55 | |
| 56 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 57 | |
| 58 | /* |
| 59 | * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK |
| 60 | * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK |
| 61 | * and CMD_DEBUG_READ_SELECT. |
| 62 | */ |
| 63 | __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); |
| 64 | mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 65 | |
| 66 | mcip_mask |= BIT(cpu); |
| 67 | |
| 68 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); |
| 69 | /* |
| 70 | * Parameter specified halt cause: |
| 71 | * STATUS32[H]/actionpoint/breakpoint/self-halt |
| 72 | * We choose all of them (0xF). |
| 73 | */ |
| 74 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); |
| 75 | |
| 76 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 77 | } |
| 78 | |
Vineet Gupta | aa0efcd | 2015-10-12 15:15:48 +0530 | [diff] [blame] | 79 | static void mcip_setup_per_cpu(int cpu) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 80 | { |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame] | 81 | struct mcip_bcr mp; |
| 82 | |
| 83 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 84 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 85 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 86 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame] | 87 | |
| 88 | /* Update GFRC halt mask as new CPU came online */ |
| 89 | if (mp.gfrc) |
| 90 | mcip_update_gfrc_halt_mask(cpu); |
Eugeniy Paltsev | f3205de | 2018-02-23 19:41:53 +0300 | [diff] [blame] | 91 | |
| 92 | /* Update MCIP debug mask as new CPU came online */ |
| 93 | if (mp.dbg) |
| 94 | mcip_update_debug_halt_mask(cpu); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | static void mcip_ipi_send(int cpu) |
| 98 | { |
| 99 | unsigned long flags; |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 100 | int ipi_was_pending; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 101 | |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 102 | /* ARConnect can only send IPI to others */ |
| 103 | if (unlikely(cpu == raw_smp_processor_id())) { |
| 104 | arc_softirq_trigger(SOFTIRQ_IRQ); |
| 105 | return; |
| 106 | } |
| 107 | |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 108 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 109 | |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 110 | /* |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 111 | * If receiver already has a pending interrupt, elide sending this one. |
| 112 | * Linux cross core calling works well with concurrent IPIs |
| 113 | * coalesced into one |
| 114 | * see arch/arc/kernel/smp.c: ipi_send_msg_one() |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 115 | */ |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 116 | __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); |
| 117 | ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 118 | if (!ipi_was_pending) |
| 119 | __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 120 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 121 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 122 | } |
| 123 | |
| 124 | static void mcip_ipi_clear(int irq) |
| 125 | { |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 126 | unsigned int cpu, c; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 127 | unsigned long flags; |
| 128 | |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 129 | if (unlikely(irq == SOFTIRQ_IRQ)) { |
| 130 | arc_softirq_clear(irq); |
| 131 | return; |
| 132 | } |
| 133 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 134 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 135 | |
| 136 | /* Who sent the IPI */ |
| 137 | __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); |
| 138 | |
Vineet Gupta | d73b73f | 2016-02-19 08:18:11 +0530 | [diff] [blame] | 139 | cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 140 | |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 141 | /* |
| 142 | * In rare case, multiple concurrent IPIs sent to same target can |
| 143 | * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be |
| 144 | * "vectored" (multiple bits sets) as opposed to typical single bit |
| 145 | */ |
| 146 | do { |
| 147 | c = __ffs(cpu); /* 0,1,2,3 */ |
| 148 | __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); |
| 149 | cpu &= ~(1U << c); |
| 150 | } while (cpu); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 151 | |
| 152 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 153 | } |
| 154 | |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 155 | static void mcip_probe_n_setup(void) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 156 | { |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 157 | struct mcip_bcr mp; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 158 | |
| 159 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 160 | |
| 161 | sprintf(smp_cpuinfo_buf, |
Vineet Gupta | 517e7610d | 2017-01-19 17:05:00 -0800 | [diff] [blame] | 162 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 163 | mp.ver, mp.num_cores, |
| 164 | IS_AVAIL1(mp.ipi, "IPI "), |
| 165 | IS_AVAIL1(mp.idu, "IDU "), |
| 166 | IS_AVAIL1(mp.dbg, "DEBUG "), |
Vineet Gupta | d584f0f | 2016-01-22 14:27:50 +0530 | [diff] [blame] | 167 | IS_AVAIL1(mp.gfrc, "GFRC")); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 168 | |
Vineet Gupta | e608b53 | 2016-01-01 18:05:48 +0530 | [diff] [blame] | 169 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 170 | } |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 171 | |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 172 | struct plat_smp_ops plat_smp_ops = { |
| 173 | .info = smp_cpuinfo_buf, |
| 174 | .init_early_smp = mcip_probe_n_setup, |
Noam Camus | b474a02 | 2015-12-16 03:10:27 +0200 | [diff] [blame] | 175 | .init_per_cpu = mcip_setup_per_cpu, |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 176 | .ipi_send = mcip_ipi_send, |
| 177 | .ipi_clear = mcip_ipi_clear, |
| 178 | }; |
| 179 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 180 | #endif |
| 181 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 182 | /*************************************************************************** |
| 183 | * ARCv2 Interrupt Distribution Unit (IDU) |
| 184 | * |
| 185 | * Connects external "COMMON" IRQs to core intc, providing: |
| 186 | * -dynamic routing (IRQ affinity) |
| 187 | * -load balancing (Round Robin interrupt distribution) |
| 188 | * -1:N distribution |
| 189 | * |
| 190 | * It physically resides in the MCIP hw block |
| 191 | */ |
| 192 | |
| 193 | #include <linux/irqchip.h> |
| 194 | #include <linux/of.h> |
| 195 | #include <linux/of_irq.h> |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 196 | |
| 197 | /* |
| 198 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) |
| 199 | */ |
| 200 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) |
| 201 | { |
| 202 | __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); |
| 203 | } |
| 204 | |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 205 | static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl, |
| 206 | bool set_distr, unsigned int distr) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 207 | { |
| 208 | union { |
| 209 | unsigned int word; |
| 210 | struct { |
| 211 | unsigned int distr:2, pad:2, lvl:1, pad2:27; |
| 212 | }; |
| 213 | } data; |
| 214 | |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 215 | data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq); |
| 216 | if (set_distr) |
| 217 | data.distr = distr; |
| 218 | if (set_lvl) |
| 219 | data.lvl = lvl; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 220 | __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); |
| 221 | } |
| 222 | |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 223 | static void idu_irq_mask_raw(irq_hw_number_t hwirq) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 224 | { |
| 225 | unsigned long flags; |
| 226 | |
| 227 | raw_spin_lock_irqsave(&mcip_lock, flags); |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 228 | __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 229 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 230 | } |
| 231 | |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 232 | static void idu_irq_mask(struct irq_data *data) |
| 233 | { |
| 234 | idu_irq_mask_raw(data->hwirq); |
| 235 | } |
| 236 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 237 | static void idu_irq_unmask(struct irq_data *data) |
| 238 | { |
| 239 | unsigned long flags; |
| 240 | |
| 241 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 242 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); |
| 243 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 244 | } |
| 245 | |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 246 | static void idu_irq_ack(struct irq_data *data) |
| 247 | { |
| 248 | unsigned long flags; |
| 249 | |
| 250 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 251 | __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq); |
| 252 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 253 | } |
| 254 | |
| 255 | static void idu_irq_mask_ack(struct irq_data *data) |
| 256 | { |
| 257 | unsigned long flags; |
| 258 | |
| 259 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 260 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); |
| 261 | __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq); |
| 262 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 263 | } |
| 264 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 265 | static int |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 266 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
| 267 | bool force) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 268 | { |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 269 | unsigned long flags; |
| 270 | cpumask_t online; |
Yuriy Kolerov | 0a0a047 | 2016-11-08 10:08:32 +0300 | [diff] [blame] | 271 | unsigned int destination_bits; |
| 272 | unsigned int distribution_mode; |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 273 | |
| 274 | /* errout if no online cpu per @cpumask */ |
| 275 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) |
| 276 | return -EINVAL; |
| 277 | |
| 278 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 279 | |
Yuriy Kolerov | 0a0a047 | 2016-11-08 10:08:32 +0300 | [diff] [blame] | 280 | destination_bits = cpumask_bits(&online)[0]; |
| 281 | idu_set_dest(data->hwirq, destination_bits); |
| 282 | |
| 283 | if (ffs(destination_bits) == fls(destination_bits)) |
| 284 | distribution_mode = IDU_M_DISTRI_DEST; |
| 285 | else |
| 286 | distribution_mode = IDU_M_DISTRI_RR; |
| 287 | |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 288 | idu_set_mode(data->hwirq, false, 0, true, distribution_mode); |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 289 | |
| 290 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 291 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 292 | return IRQ_SET_MASK_OK; |
| 293 | } |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 294 | |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 295 | static int idu_irq_set_type(struct irq_data *data, u32 type) |
| 296 | { |
| 297 | unsigned long flags; |
| 298 | |
| 299 | /* |
| 300 | * ARCv2 IDU HW does not support inverse polarity, so these are the |
| 301 | * only interrupt types supported. |
| 302 | */ |
| 303 | if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) |
| 304 | return -EINVAL; |
| 305 | |
| 306 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 307 | |
| 308 | idu_set_mode(data->hwirq, true, |
| 309 | type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE : |
| 310 | IDU_M_TRIG_LEVEL, |
| 311 | false, 0); |
| 312 | |
| 313 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 314 | |
| 315 | return 0; |
| 316 | } |
| 317 | |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 318 | static void idu_irq_enable(struct irq_data *data) |
| 319 | { |
| 320 | /* |
| 321 | * By default send all common interrupts to all available online CPUs. |
| 322 | * The affinity of common interrupts in IDU must be set manually since |
| 323 | * in some cases the kernel will not call irq_set_affinity() by itself: |
| 324 | * 1. When the kernel is not configured with support of SMP. |
| 325 | * 2. When the kernel is configured with support of SMP but upper |
| 326 | * interrupt controllers does not support setting of the affinity |
| 327 | * and cannot propagate it to IDU. |
| 328 | */ |
| 329 | idu_irq_set_affinity(data, cpu_online_mask, false); |
| 330 | idu_irq_unmask(data); |
| 331 | } |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 332 | |
| 333 | static struct irq_chip idu_irq_chip = { |
| 334 | .name = "MCIP IDU Intc", |
| 335 | .irq_mask = idu_irq_mask, |
| 336 | .irq_unmask = idu_irq_unmask, |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 337 | .irq_ack = idu_irq_ack, |
| 338 | .irq_mask_ack = idu_irq_mask_ack, |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 339 | .irq_enable = idu_irq_enable, |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 340 | .irq_set_type = idu_irq_set_type, |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 341 | #ifdef CONFIG_SMP |
| 342 | .irq_set_affinity = idu_irq_set_affinity, |
| 343 | #endif |
| 344 | |
| 345 | }; |
| 346 | |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 347 | static void idu_cascade_isr(struct irq_desc *desc) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 348 | { |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 349 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 350 | struct irq_chip *core_chip = irq_desc_get_chip(desc); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 351 | irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 352 | irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 353 | |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 354 | chained_irq_enter(core_chip, desc); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 355 | generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 356 | chained_irq_exit(core_chip, desc); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) |
| 360 | { |
| 361 | irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); |
| 362 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); |
| 363 | |
| 364 | return 0; |
| 365 | } |
| 366 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 367 | static const struct irq_domain_ops idu_irq_ops = { |
Mischa Jonker | 174ae4e | 2019-07-24 14:04:34 +0200 | [diff] [blame] | 368 | .xlate = irq_domain_xlate_onetwocell, |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 369 | .map = idu_irq_map, |
| 370 | }; |
| 371 | |
| 372 | /* |
| 373 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) |
| 374 | * [24, 23+C]: If C > 0 then "C" common IRQs |
| 375 | * [24+C, N]: Not statically assigned, private-per-core |
| 376 | */ |
| 377 | |
| 378 | |
| 379 | static int __init |
| 380 | idu_of_init(struct device_node *intc, struct device_node *parent) |
| 381 | { |
| 382 | struct irq_domain *domain; |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 383 | int nr_irqs; |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 384 | int i, virq; |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 385 | struct mcip_bcr mp; |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 386 | struct mcip_idu_bcr idu_bcr; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 387 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 388 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 389 | |
| 390 | if (!mp.idu) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 391 | panic("IDU not detected, but DeviceTree using it"); |
| 392 | |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 393 | READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); |
| 394 | nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); |
| 395 | |
| 396 | pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 397 | |
| 398 | domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); |
| 399 | |
| 400 | /* Parent interrupts (core-intc) are already mapped */ |
| 401 | |
| 402 | for (i = 0; i < nr_irqs; i++) { |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 403 | /* Mask all common interrupts by default */ |
| 404 | idu_irq_mask_raw(i); |
| 405 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 406 | /* |
| 407 | * Return parent uplink IRQs (towards core intc) 24,25,..... |
| 408 | * this step has been done before already |
| 409 | * however we need it to get the parent virq and set IDU handler |
| 410 | * as first level isr |
| 411 | */ |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 412 | virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); |
| 413 | BUG_ON(!virq); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 414 | irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | __mcip_cmd(CMD_IDU_ENABLE, 0); |
| 418 | |
| 419 | return 0; |
| 420 | } |
| 421 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); |