Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2007 MIPS Technologies, Inc. |
| 7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> |
| 8 | */ |
| 9 | #include <linux/clockchips.h> |
| 10 | #include <linux/interrupt.h> |
| 11 | #include <linux/percpu.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> |
David Howells | ca4d3e67 | 2010-10-07 14:08:54 +0100 | [diff] [blame] | 13 | #include <linux/irq.h> |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 14 | |
Ralf Baechle | f887b93 | 2007-10-19 07:55:48 +0100 | [diff] [blame] | 15 | #include <asm/smtc_ipi.h> |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 16 | #include <asm/time.h> |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 17 | #include <asm/cevt-r4k.h> |
| 18 | |
| 19 | /* |
| 20 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several |
| 21 | * of these routines with SMTC-specific variants. |
| 22 | */ |
| 23 | |
| 24 | #ifndef CONFIG_MIPS_MT_SMTC |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 25 | |
| 26 | static int mips_next_event(unsigned long delta, |
| 27 | struct clock_event_device *evt) |
| 28 | { |
| 29 | unsigned int cnt; |
| 30 | int res; |
| 31 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 32 | cnt = read_c0_count(); |
| 33 | cnt += delta; |
| 34 | write_c0_compare(cnt); |
Kevin Cernekee | 5878fc9 | 2010-11-23 10:26:44 -0800 | [diff] [blame] | 35 | res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 36 | return res; |
| 37 | } |
| 38 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 39 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 40 | |
| 41 | void mips_set_clock_mode(enum clock_event_mode mode, |
| 42 | struct clock_event_device *evt) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 43 | { |
| 44 | /* Nothing to do ... */ |
| 45 | } |
| 46 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 47 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
| 48 | int cp0_timer_irq_installed; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 49 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 50 | #ifndef CONFIG_MIPS_MT_SMTC |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 51 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 52 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 53 | { |
| 54 | const int r2 = cpu_has_mips_r2; |
| 55 | struct clock_event_device *cd; |
| 56 | int cpu = smp_processor_id(); |
| 57 | |
| 58 | /* |
| 59 | * Suckage alert: |
| 60 | * Before R2 of the architecture there was no way to see if a |
| 61 | * performance counter interrupt was pending, so we have to run |
| 62 | * the performance counter interrupt handler anyway. |
| 63 | */ |
| 64 | if (handle_perf_irq(r2)) |
| 65 | goto out; |
| 66 | |
| 67 | /* |
| 68 | * The same applies to performance counter interrupts. But with the |
| 69 | * above we now know that the reason we got here must be a timer |
| 70 | * interrupt. Being the paranoiacs we are we check anyway. |
| 71 | */ |
| 72 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 73 | /* Clear Count/Compare Interrupt */ |
| 74 | write_c0_compare(read_c0_compare()); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 75 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 76 | cd->event_handler(cd); |
| 77 | } |
| 78 | |
| 79 | out: |
| 80 | return IRQ_HANDLED; |
| 81 | } |
| 82 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 83 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
| 84 | |
| 85 | struct irqaction c0_compare_irqaction = { |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 86 | .handler = c0_compare_interrupt, |
Yong Zhang | 8b5690f | 2011-11-22 14:38:03 +0000 | [diff] [blame^] | 87 | .flags = IRQF_PERCPU | IRQF_TIMER, |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 88 | .name = "timer", |
| 89 | }; |
| 90 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 91 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 92 | void mips_event_handler(struct clock_event_device *dev) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 93 | { |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * FIXME: This doesn't hold for the relocated E9000 compare interrupt. |
| 98 | */ |
| 99 | static int c0_compare_int_pending(void) |
| 100 | { |
David VomLehn | 010c108 | 2009-12-21 17:49:22 -0800 | [diff] [blame] | 101 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 102 | } |
| 103 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 104 | /* |
| 105 | * Compare interrupt can be routed and latched outside the core, |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 106 | * so wait up to worst case number of cycle counter ticks for timer interrupt |
| 107 | * changes to propagate to the cause register. |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 108 | */ |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 109 | #define COMPARE_INT_SEEN_TICKS 50 |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 110 | |
| 111 | int c0_compare_int_usable(void) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 112 | { |
Atsushi Nemoto | 3a6c43a | 2007-10-23 21:55:42 +0900 | [diff] [blame] | 113 | unsigned int delta; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 114 | unsigned int cnt; |
| 115 | |
| 116 | /* |
| 117 | * IP7 already pending? Try to clear it by acking the timer. |
| 118 | */ |
| 119 | if (c0_compare_int_pending()) { |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 120 | cnt = read_c0_count(); |
| 121 | write_c0_compare(cnt); |
| 122 | back_to_back_c0_hazard(); |
| 123 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) |
| 124 | if (!c0_compare_int_pending()) |
| 125 | break; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 126 | if (c0_compare_int_pending()) |
| 127 | return 0; |
| 128 | } |
| 129 | |
Atsushi Nemoto | 3a6c43a | 2007-10-23 21:55:42 +0900 | [diff] [blame] | 130 | for (delta = 0x10; delta <= 0x400000; delta <<= 1) { |
| 131 | cnt = read_c0_count(); |
| 132 | cnt += delta; |
| 133 | write_c0_compare(cnt); |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 134 | back_to_back_c0_hazard(); |
Atsushi Nemoto | 3a6c43a | 2007-10-23 21:55:42 +0900 | [diff] [blame] | 135 | if ((int)(read_c0_count() - cnt) < 0) |
| 136 | break; |
| 137 | /* increase delta if the timer was already expired */ |
| 138 | } |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 139 | |
Atsushi Nemoto | c637fec | 2007-10-23 21:51:19 +0900 | [diff] [blame] | 140 | while ((int)(read_c0_count() - cnt) <= 0) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 141 | ; /* Wait for expiry */ |
| 142 | |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 143 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) |
| 144 | if (c0_compare_int_pending()) |
| 145 | break; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 146 | if (!c0_compare_int_pending()) |
| 147 | return 0; |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 148 | cnt = read_c0_count(); |
| 149 | write_c0_compare(cnt); |
| 150 | back_to_back_c0_hazard(); |
| 151 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) |
| 152 | if (!c0_compare_int_pending()) |
| 153 | break; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 154 | if (c0_compare_int_pending()) |
| 155 | return 0; |
| 156 | |
| 157 | /* |
| 158 | * Feels like a real count / compare timer. |
| 159 | */ |
| 160 | return 1; |
| 161 | } |
| 162 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 163 | #ifndef CONFIG_MIPS_MT_SMTC |
| 164 | |
Manuel Lauss | 779e7d4 | 2008-12-21 09:26:22 +0100 | [diff] [blame] | 165 | int __cpuinit r4k_clockevent_init(void) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 166 | { |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 167 | unsigned int cpu = smp_processor_id(); |
| 168 | struct clock_event_device *cd; |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 169 | unsigned int irq; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 170 | |
Yoichi Yuasa | 22df3f5 | 2007-10-26 22:27:05 +0900 | [diff] [blame] | 171 | if (!cpu_has_counter || !mips_hpt_frequency) |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 172 | return -ENXIO; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 173 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 174 | if (!c0_compare_int_usable()) |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 175 | return -ENXIO; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 176 | |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 177 | /* |
| 178 | * With vectored interrupts things are getting platform specific. |
| 179 | * get_c0_compare_int is a hook to allow a platform to return the |
| 180 | * interrupt number of it's liking. |
| 181 | */ |
| 182 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; |
| 183 | if (get_c0_compare_int) |
| 184 | irq = get_c0_compare_int(); |
| 185 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 186 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 187 | |
| 188 | cd->name = "MIPS"; |
| 189 | cd->features = CLOCK_EVT_FEAT_ONESHOT; |
| 190 | |
David Daney | 4d2b112 | 2010-05-19 10:40:53 -0700 | [diff] [blame] | 191 | clockevent_set_clock(cd, mips_hpt_frequency); |
| 192 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 193 | /* Calculate the min / max delta */ |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 194 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); |
| 195 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); |
| 196 | |
| 197 | cd->rating = 300; |
| 198 | cd->irq = irq; |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 199 | cd->cpumask = cpumask_of(cpu); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 200 | cd->set_next_event = mips_next_event; |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 201 | cd->set_mode = mips_set_clock_mode; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 202 | cd->event_handler = mips_event_handler; |
| 203 | |
| 204 | clockevents_register_device(cd); |
| 205 | |
Ralf Baechle | aea6863 | 2007-10-30 02:21:08 +0000 | [diff] [blame] | 206 | if (cp0_timer_irq_installed) |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 207 | return 0; |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 208 | |
| 209 | cp0_timer_irq_installed = 1; |
| 210 | |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 211 | setup_irq(irq, &c0_compare_irqaction); |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 212 | |
| 213 | return 0; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 214 | } |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 215 | |
| 216 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |