blob: 6f567a4d85190791fa21cf712621ef81681b7479 [file] [log] [blame]
Kukjin Kimcc511b82011-12-27 08:18:36 +01001/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Common Codes for EXYNOS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/sysdev.h>
17#include <linux/gpio.h>
18#include <linux/sched.h>
19#include <linux/serial_core.h>
20
21#include <asm/proc-fns.h>
22#include <asm/hardware/cache-l2x0.h>
23#include <asm/hardware/gic.h>
24#include <asm/mach/map.h>
25#include <asm/mach/irq.h>
26
27#include <mach/regs-irq.h>
28#include <mach/regs-pmu.h>
29#include <mach/regs-gpio.h>
30
31#include <plat/cpu.h>
32#include <plat/clock.h>
33#include <plat/devs.h>
34#include <plat/pm.h>
Kukjin Kimcc511b82011-12-27 08:18:36 +010035#include <plat/sdhci.h>
36#include <plat/gpio-cfg.h>
37#include <plat/adc-core.h>
38#include <plat/fb-core.h>
39#include <plat/fimc-core.h>
40#include <plat/iic-core.h>
41#include <plat/tv-core.h>
42#include <plat/regs-serial.h>
43
44#include "common.h"
45
46unsigned int gic_bank_offset __read_mostly;
47
48static const char name_exynos4210[] = "EXYNOS4210";
49static const char name_exynos4212[] = "EXYNOS4212";
50static const char name_exynos4412[] = "EXYNOS4412";
51
52static struct cpu_table cpu_ids[] __initdata = {
53 {
54 .idcode = EXYNOS4210_CPU_ID,
55 .idmask = EXYNOS4_CPU_MASK,
56 .map_io = exynos4_map_io,
57 .init_clocks = exynos4_init_clocks,
58 .init_uarts = exynos4_init_uarts,
59 .init = exynos_init,
60 .name = name_exynos4210,
61 }, {
62 .idcode = EXYNOS4212_CPU_ID,
63 .idmask = EXYNOS4_CPU_MASK,
64 .map_io = exynos4_map_io,
65 .init_clocks = exynos4_init_clocks,
66 .init_uarts = exynos4_init_uarts,
67 .init = exynos_init,
68 .name = name_exynos4212,
69 }, {
70 .idcode = EXYNOS4412_CPU_ID,
71 .idmask = EXYNOS4_CPU_MASK,
72 .map_io = exynos4_map_io,
73 .init_clocks = exynos4_init_clocks,
74 .init_uarts = exynos4_init_uarts,
75 .init = exynos_init,
76 .name = name_exynos4412,
77 },
78};
79
80/* Initial IO mappings */
81
82static struct map_desc exynos_iodesc[] __initdata = {
83 {
84 .virtual = (unsigned long)S5P_VA_CHIPID,
85 .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID),
86 .length = SZ_4K,
87 .type = MT_DEVICE,
88 }, {
89 .virtual = (unsigned long)S3C_VA_SYS,
90 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
91 .length = SZ_64K,
92 .type = MT_DEVICE,
93 }, {
94 .virtual = (unsigned long)S3C_VA_TIMER,
95 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
96 .length = SZ_16K,
97 .type = MT_DEVICE,
98 }, {
99 .virtual = (unsigned long)S3C_VA_WATCHDOG,
100 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
101 .length = SZ_4K,
102 .type = MT_DEVICE,
103 }, {
104 .virtual = (unsigned long)S5P_VA_SROMC,
105 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
106 .length = SZ_4K,
107 .type = MT_DEVICE,
108 }, {
109 .virtual = (unsigned long)S5P_VA_SYSTIMER,
110 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
111 .length = SZ_4K,
112 .type = MT_DEVICE,
113 }, {
114 .virtual = (unsigned long)S5P_VA_PMU,
115 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
116 .length = SZ_64K,
117 .type = MT_DEVICE,
118 }, {
119 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
120 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
121 .length = SZ_4K,
122 .type = MT_DEVICE,
123 }, {
124 .virtual = (unsigned long)S5P_VA_GIC_CPU,
125 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
126 .length = SZ_64K,
127 .type = MT_DEVICE,
128 }, {
129 .virtual = (unsigned long)S5P_VA_GIC_DIST,
130 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
131 .length = SZ_64K,
132 .type = MT_DEVICE,
133 }, {
134 .virtual = (unsigned long)S3C_VA_UART,
135 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
136 .length = SZ_512K,
137 .type = MT_DEVICE,
138 },
139};
140
141static struct map_desc exynos4_iodesc[] __initdata = {
142 {
143 .virtual = (unsigned long)S5P_VA_CMU,
144 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
145 .length = SZ_128K,
146 .type = MT_DEVICE,
147 }, {
148 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
149 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
150 .length = SZ_8K,
151 .type = MT_DEVICE,
152 }, {
153 .virtual = (unsigned long)S5P_VA_L2CC,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
155 .length = SZ_4K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_GPIO1,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
160 .length = SZ_4K,
161 .type = MT_DEVICE,
162 }, {
163 .virtual = (unsigned long)S5P_VA_GPIO2,
164 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
165 .length = SZ_4K,
166 .type = MT_DEVICE,
167 }, {
168 .virtual = (unsigned long)S5P_VA_GPIO3,
169 .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
170 .length = SZ_256,
171 .type = MT_DEVICE,
172 }, {
173 .virtual = (unsigned long)S5P_VA_DMC0,
174 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
175 .length = SZ_4K,
176 .type = MT_DEVICE,
177 }, {
178 .virtual = (unsigned long)S5P_VA_SROMC,
179 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
180 .length = SZ_4K,
181 .type = MT_DEVICE,
182 }, {
183 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
184 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
185 .length = SZ_4K,
186 .type = MT_DEVICE,
187 },
188};
189
190static struct map_desc exynos4_iodesc0[] __initdata = {
191 {
192 .virtual = (unsigned long)S5P_VA_SYSRAM,
193 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
194 .length = SZ_4K,
195 .type = MT_DEVICE,
196 },
197};
198
199static struct map_desc exynos4_iodesc1[] __initdata = {
200 {
201 .virtual = (unsigned long)S5P_VA_SYSRAM,
202 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
203 .length = SZ_4K,
204 .type = MT_DEVICE,
205 },
206};
207
208static void exynos_idle(void)
209{
210 if (!need_resched())
211 cpu_do_idle();
212
213 local_irq_enable();
214}
215
Russell King9eb48592012-01-03 11:56:53 +0100216void exynos4_restart(char mode, const char *cmd)
Kukjin Kimcc511b82011-12-27 08:18:36 +0100217{
218 __raw_writel(0x1, S5P_SWRESET);
219}
220
221/*
222 * exynos_map_io
223 *
224 * register the standard cpu IO areas
225 */
226
227void __init exynos_init_io(struct map_desc *mach_desc, int size)
228{
229 /* initialize the io descriptors we need for initialization */
230 iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
231 if (mach_desc)
232 iotable_init(mach_desc, size);
233
234 /* detect cpu id and rev. */
235 s5p_init_cpu(S5P_VA_CHIPID);
236
237 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
238}
239
240void __init exynos4_map_io(void)
241{
242 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
243
244 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
245 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
246 else
247 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
248
249 /* initialize device information early */
250 exynos4_default_sdhci0();
251 exynos4_default_sdhci1();
252 exynos4_default_sdhci2();
253 exynos4_default_sdhci3();
254
255 s3c_adc_setname("samsung-adc-v3");
256
257 s3c_fimc_setname(0, "exynos4-fimc");
258 s3c_fimc_setname(1, "exynos4-fimc");
259 s3c_fimc_setname(2, "exynos4-fimc");
260 s3c_fimc_setname(3, "exynos4-fimc");
261
262 /* The I2C bus controllers are directly compatible with s3c2440 */
263 s3c_i2c0_setname("s3c2440-i2c");
264 s3c_i2c1_setname("s3c2440-i2c");
265 s3c_i2c2_setname("s3c2440-i2c");
266
267 s5p_fb_setname(0, "exynos4-fb");
268 s5p_hdmi_setname("exynos4-hdmi");
269}
270
271void __init exynos4_init_clocks(int xtal)
272{
273 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
274
275 s3c24xx_register_baseclocks(xtal);
276 s5p_register_clocks(xtal);
277
278 if (soc_is_exynos4210())
279 exynos4210_register_clocks();
280 else if (soc_is_exynos4212() || soc_is_exynos4412())
281 exynos4212_register_clocks();
282
283 exynos4_register_clocks();
284 exynos4_setup_clocks();
285}
286
287#define COMBINER_ENABLE_SET 0x0
288#define COMBINER_ENABLE_CLEAR 0x4
289#define COMBINER_INT_STATUS 0xC
290
291static DEFINE_SPINLOCK(irq_controller_lock);
292
293struct combiner_chip_data {
294 unsigned int irq_offset;
295 unsigned int irq_mask;
296 void __iomem *base;
297};
298
299static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
300
301static inline void __iomem *combiner_base(struct irq_data *data)
302{
303 struct combiner_chip_data *combiner_data =
304 irq_data_get_irq_chip_data(data);
305
306 return combiner_data->base;
307}
308
309static void combiner_mask_irq(struct irq_data *data)
310{
311 u32 mask = 1 << (data->irq % 32);
312
313 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
314}
315
316static void combiner_unmask_irq(struct irq_data *data)
317{
318 u32 mask = 1 << (data->irq % 32);
319
320 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
321}
322
323static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
324{
325 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
326 struct irq_chip *chip = irq_get_chip(irq);
327 unsigned int cascade_irq, combiner_irq;
328 unsigned long status;
329
330 chained_irq_enter(chip, desc);
331
332 spin_lock(&irq_controller_lock);
333 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
334 spin_unlock(&irq_controller_lock);
335 status &= chip_data->irq_mask;
336
337 if (status == 0)
338 goto out;
339
340 combiner_irq = __ffs(status);
341
342 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
343 if (unlikely(cascade_irq >= NR_IRQS))
344 do_bad_IRQ(cascade_irq, desc);
345 else
346 generic_handle_irq(cascade_irq);
347
348 out:
349 chained_irq_exit(chip, desc);
350}
351
352static struct irq_chip combiner_chip = {
353 .name = "COMBINER",
354 .irq_mask = combiner_mask_irq,
355 .irq_unmask = combiner_unmask_irq,
356};
357
358static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
359{
360 if (combiner_nr >= MAX_COMBINER_NR)
361 BUG();
362 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
363 BUG();
364 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
365}
366
367static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
368 unsigned int irq_start)
369{
370 unsigned int i;
371
372 if (combiner_nr >= MAX_COMBINER_NR)
373 BUG();
374
375 combiner_data[combiner_nr].base = base;
376 combiner_data[combiner_nr].irq_offset = irq_start;
377 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
378
379 /* Disable all interrupts */
380
381 __raw_writel(combiner_data[combiner_nr].irq_mask,
382 base + COMBINER_ENABLE_CLEAR);
383
384 /* Setup the Linux IRQ subsystem */
385
386 for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
387 + MAX_IRQ_IN_COMBINER; i++) {
388 irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
389 irq_set_chip_data(i, &combiner_data[combiner_nr]);
390 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
391 }
392}
393
394static void exynos4_gic_irq_fix_base(struct irq_data *d)
395{
396 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
397
398 gic_data->cpu_base = S5P_VA_GIC_CPU +
399 (gic_bank_offset * smp_processor_id());
400
401 gic_data->dist_base = S5P_VA_GIC_DIST +
402 (gic_bank_offset * smp_processor_id());
403}
404
405void __init exynos4_init_irq(void)
406{
407 int irq;
408
409 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
410
411 gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
412 gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
413 gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
414 gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
415
416 for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
417
418 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
419 COMBINER_IRQ(irq, 0));
420 combiner_cascade_irq(irq, IRQ_SPI(irq));
421 }
422
423 /*
424 * The parameters of s5p_init_irq() are for VIC init.
425 * Theses parameters should be NULL and 0 because EXYNOS4
426 * uses GIC instead of VIC.
427 */
428 s5p_init_irq(NULL, 0);
429}
430
431struct sysdev_class exynos4_sysclass = {
432 .name = "exynos4-core",
433};
434
435static struct sys_device exynos4_sysdev = {
436 .cls = &exynos4_sysclass,
437};
438
439static int __init exynos4_core_init(void)
440{
441 return sysdev_class_register(&exynos4_sysclass);
442}
443core_initcall(exynos4_core_init);
444
445#ifdef CONFIG_CACHE_L2X0
446static int __init exynos4_l2x0_cache_init(void)
447{
448 /* TAG, Data Latency Control: 2cycle */
449 __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
450
451 if (soc_is_exynos4210())
452 __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
453 else if (soc_is_exynos4212() || soc_is_exynos4412())
454 __raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
455
456 /* L2X0 Prefetch Control */
457 __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
458
459 /* L2X0 Power Control */
460 __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
461 S5P_VA_L2CC + L2X0_POWER_CTRL);
462
463 l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
464
465 return 0;
466}
467
468early_initcall(exynos4_l2x0_cache_init);
469#endif
470
471int __init exynos_init(void)
472{
473 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
474
475 /* set idle function */
476 pm_idle = exynos_idle;
477
Kukjin Kimcc511b82011-12-27 08:18:36 +0100478 return sysdev_register(&exynos4_sysdev);
479}
480
481static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
482 [0] = {
483 .name = "uclk1",
484 .divisor = 1,
485 .min_baud = 0,
486 .max_baud = 0,
487 },
488};
489
490/* uart registration process */
491
492void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
493{
494 struct s3c2410_uartcfg *tcfg = cfg;
495 u32 ucnt;
496
497 for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
498 if (!tcfg->clocks) {
499 tcfg->has_fracval = 1;
500 tcfg->clocks = exynos4_serial_clocks;
501 tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
502 }
503 tcfg->flags |= NO_NEED_CHECK_CLKSRC;
504 }
505
506 s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
507}
508
509static DEFINE_SPINLOCK(eint_lock);
510
511static unsigned int eint0_15_data[16];
512
513static unsigned int exynos4_get_irq_nr(unsigned int number)
514{
515 u32 ret = 0;
516
517 switch (number) {
518 case 0 ... 3:
519 ret = (number + IRQ_EINT0);
520 break;
521 case 4 ... 7:
522 ret = (number + (IRQ_EINT4 - 4));
523 break;
524 case 8 ... 15:
525 ret = (number + (IRQ_EINT8 - 8));
526 break;
527 default:
528 printk(KERN_ERR "number available : %d\n", number);
529 }
530
531 return ret;
532}
533
534static inline void exynos4_irq_eint_mask(struct irq_data *data)
535{
536 u32 mask;
537
538 spin_lock(&eint_lock);
539 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
540 mask |= eint_irq_to_bit(data->irq);
541 __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
542 spin_unlock(&eint_lock);
543}
544
545static void exynos4_irq_eint_unmask(struct irq_data *data)
546{
547 u32 mask;
548
549 spin_lock(&eint_lock);
550 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
551 mask &= ~(eint_irq_to_bit(data->irq));
552 __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
553 spin_unlock(&eint_lock);
554}
555
556static inline void exynos4_irq_eint_ack(struct irq_data *data)
557{
558 __raw_writel(eint_irq_to_bit(data->irq),
559 S5P_EINT_PEND(EINT_REG_NR(data->irq)));
560}
561
562static void exynos4_irq_eint_maskack(struct irq_data *data)
563{
564 exynos4_irq_eint_mask(data);
565 exynos4_irq_eint_ack(data);
566}
567
568static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
569{
570 int offs = EINT_OFFSET(data->irq);
571 int shift;
572 u32 ctrl, mask;
573 u32 newvalue = 0;
574
575 switch (type) {
576 case IRQ_TYPE_EDGE_RISING:
577 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
578 break;
579
580 case IRQ_TYPE_EDGE_FALLING:
581 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
582 break;
583
584 case IRQ_TYPE_EDGE_BOTH:
585 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
586 break;
587
588 case IRQ_TYPE_LEVEL_LOW:
589 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
590 break;
591
592 case IRQ_TYPE_LEVEL_HIGH:
593 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
594 break;
595
596 default:
597 printk(KERN_ERR "No such irq type %d", type);
598 return -EINVAL;
599 }
600
601 shift = (offs & 0x7) * 4;
602 mask = 0x7 << shift;
603
604 spin_lock(&eint_lock);
605 ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
606 ctrl &= ~mask;
607 ctrl |= newvalue << shift;
608 __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
609 spin_unlock(&eint_lock);
610
611 switch (offs) {
612 case 0 ... 7:
613 s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
614 break;
615 case 8 ... 15:
616 s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
617 break;
618 case 16 ... 23:
619 s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
620 break;
621 case 24 ... 31:
622 s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
623 break;
624 default:
625 printk(KERN_ERR "No such irq number %d", offs);
626 }
627
628 return 0;
629}
630
631static struct irq_chip exynos4_irq_eint = {
632 .name = "exynos4-eint",
633 .irq_mask = exynos4_irq_eint_mask,
634 .irq_unmask = exynos4_irq_eint_unmask,
635 .irq_mask_ack = exynos4_irq_eint_maskack,
636 .irq_ack = exynos4_irq_eint_ack,
637 .irq_set_type = exynos4_irq_eint_set_type,
638#ifdef CONFIG_PM
639 .irq_set_wake = s3c_irqext_wake,
640#endif
641};
642
643/*
644 * exynos4_irq_demux_eint
645 *
646 * This function demuxes the IRQ from from EINTs 16 to 31.
647 * It is designed to be inlined into the specific handler
648 * s5p_irq_demux_eintX_Y.
649 *
650 * Each EINT pend/mask registers handle eight of them.
651 */
652static inline void exynos4_irq_demux_eint(unsigned int start)
653{
654 unsigned int irq;
655
656 u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
657 u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
658
659 status &= ~mask;
660 status &= 0xff;
661
662 while (status) {
663 irq = fls(status) - 1;
664 generic_handle_irq(irq + start);
665 status &= ~(1 << irq);
666 }
667}
668
669static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
670{
671 struct irq_chip *chip = irq_get_chip(irq);
672 chained_irq_enter(chip, desc);
673 exynos4_irq_demux_eint(IRQ_EINT(16));
674 exynos4_irq_demux_eint(IRQ_EINT(24));
675 chained_irq_exit(chip, desc);
676}
677
678static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
679{
680 u32 *irq_data = irq_get_handler_data(irq);
681 struct irq_chip *chip = irq_get_chip(irq);
682
683 chained_irq_enter(chip, desc);
684 chip->irq_mask(&desc->irq_data);
685
686 if (chip->irq_ack)
687 chip->irq_ack(&desc->irq_data);
688
689 generic_handle_irq(*irq_data);
690
691 chip->irq_unmask(&desc->irq_data);
692 chained_irq_exit(chip, desc);
693}
694
695int __init exynos4_init_irq_eint(void)
696{
697 int irq;
698
699 for (irq = 0 ; irq <= 31 ; irq++) {
700 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
701 handle_level_irq);
702 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
703 }
704
705 irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
706
707 for (irq = 0 ; irq <= 15 ; irq++) {
708 eint0_15_data[irq] = IRQ_EINT(irq);
709
710 irq_set_handler_data(exynos4_get_irq_nr(irq),
711 &eint0_15_data[irq]);
712 irq_set_chained_handler(exynos4_get_irq_nr(irq),
713 exynos4_irq_eint0_15);
714 }
715
716 return 0;
717}
718arch_initcall(exynos4_init_irq_eint);