Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 1 | /* |
| 2 | * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) |
| 3 | * |
| 4 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/smp.h> |
| 12 | #include <linux/irq.h> |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 13 | #include <linux/irqchip/chained_irq.h> |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 14 | #include <linux/spinlock.h> |
Vineet Gupta | 2d7f5c4 | 2016-10-31 11:27:08 -0700 | [diff] [blame] | 15 | #include <soc/arc/mcip.h> |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 16 | #include <asm/irqflags-arcv2.h> |
Vineet Gupta | 964cf28 | 2015-10-02 19:20:27 +0530 | [diff] [blame] | 17 | #include <asm/setup.h> |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 18 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 19 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
| 20 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_SMP |
| 22 | |
| 23 | static char smp_cpuinfo_buf[128]; |
| 24 | |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame] | 25 | /* |
| 26 | * Set mask to halt GFRC if any online core in SMP cluster is halted. |
| 27 | * Only works for ARC HS v3.0+, on earlier versions has no effect. |
| 28 | */ |
| 29 | static void mcip_update_gfrc_halt_mask(int cpu) |
| 30 | { |
| 31 | struct bcr_generic gfrc; |
| 32 | unsigned long flags; |
| 33 | u32 gfrc_halt_mask; |
| 34 | |
| 35 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); |
| 36 | |
| 37 | /* |
| 38 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in |
| 39 | * GFRC 0x3 version. |
| 40 | */ |
| 41 | if (gfrc.ver < 0x3) |
| 42 | return; |
| 43 | |
| 44 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 45 | |
| 46 | __mcip_cmd(CMD_GFRC_READ_CORE, 0); |
| 47 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 48 | gfrc_halt_mask |= BIT(cpu); |
| 49 | __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); |
| 50 | |
| 51 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 52 | } |
| 53 | |
Eugeniy Paltsev | f3205de | 2018-02-23 19:41:53 +0300 | [diff] [blame^] | 54 | static void mcip_update_debug_halt_mask(int cpu) |
| 55 | { |
| 56 | u32 mcip_mask = 0; |
| 57 | unsigned long flags; |
| 58 | |
| 59 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 60 | |
| 61 | /* |
| 62 | * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK |
| 63 | * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK |
| 64 | * and CMD_DEBUG_READ_SELECT. |
| 65 | */ |
| 66 | __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); |
| 67 | mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 68 | |
| 69 | mcip_mask |= BIT(cpu); |
| 70 | |
| 71 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); |
| 72 | /* |
| 73 | * Parameter specified halt cause: |
| 74 | * STATUS32[H]/actionpoint/breakpoint/self-halt |
| 75 | * We choose all of them (0xF). |
| 76 | */ |
| 77 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); |
| 78 | |
| 79 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 80 | } |
| 81 | |
Vineet Gupta | aa0efcd | 2015-10-12 15:15:48 +0530 | [diff] [blame] | 82 | static void mcip_setup_per_cpu(int cpu) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 83 | { |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame] | 84 | struct mcip_bcr mp; |
| 85 | |
| 86 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 87 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 88 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 89 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
Eugeniy Paltsev | 07423d0 | 2018-02-23 19:41:52 +0300 | [diff] [blame] | 90 | |
| 91 | /* Update GFRC halt mask as new CPU came online */ |
| 92 | if (mp.gfrc) |
| 93 | mcip_update_gfrc_halt_mask(cpu); |
Eugeniy Paltsev | f3205de | 2018-02-23 19:41:53 +0300 | [diff] [blame^] | 94 | |
| 95 | /* Update MCIP debug mask as new CPU came online */ |
| 96 | if (mp.dbg) |
| 97 | mcip_update_debug_halt_mask(cpu); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | static void mcip_ipi_send(int cpu) |
| 101 | { |
| 102 | unsigned long flags; |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 103 | int ipi_was_pending; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 104 | |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 105 | /* ARConnect can only send IPI to others */ |
| 106 | if (unlikely(cpu == raw_smp_processor_id())) { |
| 107 | arc_softirq_trigger(SOFTIRQ_IRQ); |
| 108 | return; |
| 109 | } |
| 110 | |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 111 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 112 | |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 113 | /* |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 114 | * If receiver already has a pending interrupt, elide sending this one. |
| 115 | * Linux cross core calling works well with concurrent IPIs |
| 116 | * coalesced into one |
| 117 | * see arch/arc/kernel/smp.c: ipi_send_msg_one() |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 118 | */ |
Vineet Gupta | 3dea30c | 2016-02-19 07:57:41 +0530 | [diff] [blame] | 119 | __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); |
| 120 | ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); |
| 121 | if (!ipi_was_pending) |
| 122 | __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 123 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 124 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 125 | } |
| 126 | |
| 127 | static void mcip_ipi_clear(int irq) |
| 128 | { |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 129 | unsigned int cpu, c; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 130 | unsigned long flags; |
| 131 | |
Vineet Gupta | bb143f8 | 2016-02-23 11:55:16 +0530 | [diff] [blame] | 132 | if (unlikely(irq == SOFTIRQ_IRQ)) { |
| 133 | arc_softirq_clear(irq); |
| 134 | return; |
| 135 | } |
| 136 | |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 137 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 138 | |
| 139 | /* Who sent the IPI */ |
| 140 | __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); |
| 141 | |
Vineet Gupta | d73b73f | 2016-02-19 08:18:11 +0530 | [diff] [blame] | 142 | cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 143 | |
Vineet Gupta | aa6083e | 2014-11-07 10:45:28 +0530 | [diff] [blame] | 144 | /* |
| 145 | * In rare case, multiple concurrent IPIs sent to same target can |
| 146 | * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be |
| 147 | * "vectored" (multiple bits sets) as opposed to typical single bit |
| 148 | */ |
| 149 | do { |
| 150 | c = __ffs(cpu); /* 0,1,2,3 */ |
| 151 | __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); |
| 152 | cpu &= ~(1U << c); |
| 153 | } while (cpu); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 154 | |
| 155 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 156 | } |
| 157 | |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 158 | static void mcip_probe_n_setup(void) |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 159 | { |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 160 | struct mcip_bcr mp; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 161 | |
| 162 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 163 | |
| 164 | sprintf(smp_cpuinfo_buf, |
Vineet Gupta | 517e7610d | 2017-01-19 17:05:00 -0800 | [diff] [blame] | 165 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 166 | mp.ver, mp.num_cores, |
| 167 | IS_AVAIL1(mp.ipi, "IPI "), |
| 168 | IS_AVAIL1(mp.idu, "IDU "), |
| 169 | IS_AVAIL1(mp.dbg, "DEBUG "), |
Vineet Gupta | d584f0f | 2016-01-22 14:27:50 +0530 | [diff] [blame] | 170 | IS_AVAIL1(mp.gfrc, "GFRC")); |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 171 | |
Vineet Gupta | e608b53 | 2016-01-01 18:05:48 +0530 | [diff] [blame] | 172 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
Vineet Gupta | 82fea5a | 2014-09-10 19:05:38 +0530 | [diff] [blame] | 173 | } |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 174 | |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 175 | struct plat_smp_ops plat_smp_ops = { |
| 176 | .info = smp_cpuinfo_buf, |
| 177 | .init_early_smp = mcip_probe_n_setup, |
Noam Camus | b474a02 | 2015-12-16 03:10:27 +0200 | [diff] [blame] | 178 | .init_per_cpu = mcip_setup_per_cpu, |
Vineet Gupta | 26b8f99 | 2015-10-12 16:38:07 +0530 | [diff] [blame] | 179 | .ipi_send = mcip_ipi_send, |
| 180 | .ipi_clear = mcip_ipi_clear, |
| 181 | }; |
| 182 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 183 | #endif |
| 184 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 185 | /*************************************************************************** |
| 186 | * ARCv2 Interrupt Distribution Unit (IDU) |
| 187 | * |
| 188 | * Connects external "COMMON" IRQs to core intc, providing: |
| 189 | * -dynamic routing (IRQ affinity) |
| 190 | * -load balancing (Round Robin interrupt distribution) |
| 191 | * -1:N distribution |
| 192 | * |
| 193 | * It physically resides in the MCIP hw block |
| 194 | */ |
| 195 | |
| 196 | #include <linux/irqchip.h> |
| 197 | #include <linux/of.h> |
| 198 | #include <linux/of_irq.h> |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 199 | |
| 200 | /* |
| 201 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) |
| 202 | */ |
| 203 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) |
| 204 | { |
| 205 | __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); |
| 206 | } |
| 207 | |
| 208 | static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, |
| 209 | unsigned int distr) |
| 210 | { |
| 211 | union { |
| 212 | unsigned int word; |
| 213 | struct { |
| 214 | unsigned int distr:2, pad:2, lvl:1, pad2:27; |
| 215 | }; |
| 216 | } data; |
| 217 | |
| 218 | data.distr = distr; |
| 219 | data.lvl = lvl; |
| 220 | __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); |
| 221 | } |
| 222 | |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 223 | static void idu_irq_mask_raw(irq_hw_number_t hwirq) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 224 | { |
| 225 | unsigned long flags; |
| 226 | |
| 227 | raw_spin_lock_irqsave(&mcip_lock, flags); |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 228 | __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 229 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 230 | } |
| 231 | |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 232 | static void idu_irq_mask(struct irq_data *data) |
| 233 | { |
| 234 | idu_irq_mask_raw(data->hwirq); |
| 235 | } |
| 236 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 237 | static void idu_irq_unmask(struct irq_data *data) |
| 238 | { |
| 239 | unsigned long flags; |
| 240 | |
| 241 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 242 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); |
| 243 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 244 | } |
| 245 | |
| 246 | static int |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 247 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
| 248 | bool force) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 249 | { |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 250 | unsigned long flags; |
| 251 | cpumask_t online; |
Yuriy Kolerov | 0a0a047 | 2016-11-08 10:08:32 +0300 | [diff] [blame] | 252 | unsigned int destination_bits; |
| 253 | unsigned int distribution_mode; |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 254 | |
| 255 | /* errout if no online cpu per @cpumask */ |
| 256 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) |
| 257 | return -EINVAL; |
| 258 | |
| 259 | raw_spin_lock_irqsave(&mcip_lock, flags); |
| 260 | |
Yuriy Kolerov | 0a0a047 | 2016-11-08 10:08:32 +0300 | [diff] [blame] | 261 | destination_bits = cpumask_bits(&online)[0]; |
| 262 | idu_set_dest(data->hwirq, destination_bits); |
| 263 | |
| 264 | if (ffs(destination_bits) == fls(destination_bits)) |
| 265 | distribution_mode = IDU_M_DISTRI_DEST; |
| 266 | else |
| 267 | distribution_mode = IDU_M_DISTRI_RR; |
| 268 | |
| 269 | idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); |
Vineet Gupta | 83ce3e6 | 2015-06-30 13:37:28 +0530 | [diff] [blame] | 270 | |
| 271 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
| 272 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 273 | return IRQ_SET_MASK_OK; |
| 274 | } |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 275 | |
| 276 | static void idu_irq_enable(struct irq_data *data) |
| 277 | { |
| 278 | /* |
| 279 | * By default send all common interrupts to all available online CPUs. |
| 280 | * The affinity of common interrupts in IDU must be set manually since |
| 281 | * in some cases the kernel will not call irq_set_affinity() by itself: |
| 282 | * 1. When the kernel is not configured with support of SMP. |
| 283 | * 2. When the kernel is configured with support of SMP but upper |
| 284 | * interrupt controllers does not support setting of the affinity |
| 285 | * and cannot propagate it to IDU. |
| 286 | */ |
| 287 | idu_irq_set_affinity(data, cpu_online_mask, false); |
| 288 | idu_irq_unmask(data); |
| 289 | } |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 290 | |
| 291 | static struct irq_chip idu_irq_chip = { |
| 292 | .name = "MCIP IDU Intc", |
| 293 | .irq_mask = idu_irq_mask, |
| 294 | .irq_unmask = idu_irq_unmask, |
Yuriy Kolerov | 92fdb52 | 2016-12-28 11:46:26 +0300 | [diff] [blame] | 295 | .irq_enable = idu_irq_enable, |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 296 | #ifdef CONFIG_SMP |
| 297 | .irq_set_affinity = idu_irq_set_affinity, |
| 298 | #endif |
| 299 | |
| 300 | }; |
| 301 | |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 302 | static void idu_cascade_isr(struct irq_desc *desc) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 303 | { |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 304 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 305 | struct irq_chip *core_chip = irq_desc_get_chip(desc); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 306 | irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 307 | irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 308 | |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 309 | chained_irq_enter(core_chip, desc); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 310 | generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); |
Yuriy Kolerov | e51d5d0 | 2016-12-28 11:46:25 +0300 | [diff] [blame] | 311 | chained_irq_exit(core_chip, desc); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) |
| 315 | { |
| 316 | irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); |
| 317 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); |
| 318 | |
| 319 | return 0; |
| 320 | } |
| 321 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 322 | static const struct irq_domain_ops idu_irq_ops = { |
Yuriy Kolerov | ec69b26 | 2017-02-02 03:13:32 +0300 | [diff] [blame] | 323 | .xlate = irq_domain_xlate_onecell, |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 324 | .map = idu_irq_map, |
| 325 | }; |
| 326 | |
| 327 | /* |
| 328 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) |
| 329 | * [24, 23+C]: If C > 0 then "C" common IRQs |
| 330 | * [24+C, N]: Not statically assigned, private-per-core |
| 331 | */ |
| 332 | |
| 333 | |
| 334 | static int __init |
| 335 | idu_of_init(struct device_node *intc, struct device_node *parent) |
| 336 | { |
| 337 | struct irq_domain *domain; |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 338 | int nr_irqs; |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 339 | int i, virq; |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 340 | struct mcip_bcr mp; |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 341 | struct mcip_idu_bcr idu_bcr; |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 342 | |
Vineet Gupta | 3ce0fef | 2016-09-29 10:00:14 -0700 | [diff] [blame] | 343 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
| 344 | |
| 345 | if (!mp.idu) |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 346 | panic("IDU not detected, but DeviceTree using it"); |
| 347 | |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 348 | READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); |
| 349 | nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); |
| 350 | |
| 351 | pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 352 | |
| 353 | domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); |
| 354 | |
| 355 | /* Parent interrupts (core-intc) are already mapped */ |
| 356 | |
| 357 | for (i = 0; i < nr_irqs; i++) { |
Yuriy Kolerov | fc73965 | 2017-02-01 11:00:30 -0800 | [diff] [blame] | 358 | /* Mask all common interrupts by default */ |
| 359 | idu_irq_mask_raw(i); |
| 360 | |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 361 | /* |
| 362 | * Return parent uplink IRQs (towards core intc) 24,25,..... |
| 363 | * this step has been done before already |
| 364 | * however we need it to get the parent virq and set IDU handler |
| 365 | * as first level isr |
| 366 | */ |
Yuriy Kolerov | 6f0310a | 2017-01-31 14:45:23 +0300 | [diff] [blame] | 367 | virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); |
| 368 | BUG_ON(!virq); |
Yuriy Kolerov | 34e71e4 | 2016-11-08 10:08:31 +0300 | [diff] [blame] | 369 | irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); |
Vineet Gupta | eaf0ecc | 2015-03-09 14:03:10 +0530 | [diff] [blame] | 370 | } |
| 371 | |
| 372 | __mcip_cmd(CMD_IDU_ENABLE, 0); |
| 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); |