Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Count register synchronisation. |
| 3 | * |
Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 4 | * All CPUs will have their count registers synchronised to the CPU0 next time |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 5 | * value. This can cause a small timewarp for CPU0. All other CPU's should |
| 6 | * not have done anything significant (but they may have had interrupts |
| 7 | * enabled briefly - prom_smp_finish() should not be responsible for enabling |
| 8 | * interrupts...) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/kernel.h> |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 12 | #include <linux/irqflags.h> |
Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 13 | #include <linux/cpumask.h> |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 14 | |
Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 15 | #include <asm/r4k-timer.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 16 | #include <linux/atomic.h> |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 17 | #include <asm/barrier.h> |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 18 | #include <asm/mipsregs.h> |
| 19 | |
Paul Gortmaker | 078a55f | 2013-06-18 13:38:59 +0000 | [diff] [blame] | 20 | static atomic_t count_start_flag = ATOMIC_INIT(0); |
| 21 | static atomic_t count_count_start = ATOMIC_INIT(0); |
| 22 | static atomic_t count_count_stop = ATOMIC_INIT(0); |
| 23 | static atomic_t count_reference = ATOMIC_INIT(0); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 24 | |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 25 | #define COUNTON 100 |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 26 | #define NR_LOOPS 5 |
| 27 | |
Paul Gortmaker | 078a55f | 2013-06-18 13:38:59 +0000 | [diff] [blame] | 28 | void synchronise_count_master(int cpu) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 29 | { |
| 30 | int i; |
| 31 | unsigned long flags; |
| 32 | unsigned int initcount; |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 33 | |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 34 | printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 35 | |
| 36 | local_irq_save(flags); |
| 37 | |
| 38 | /* |
| 39 | * Notify the slaves that it's time to start |
| 40 | */ |
Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 41 | atomic_set(&count_reference, read_c0_count()); |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 42 | atomic_set(&count_start_flag, cpu); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 43 | smp_wmb(); |
| 44 | |
Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 45 | /* Count will be initialised to current timer for all CPU's */ |
| 46 | initcount = read_c0_count(); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 47 | |
| 48 | /* |
| 49 | * We loop a few times to get a primed instruction cache, |
| 50 | * then the last pass is more or less synchronised and |
| 51 | * the master and slaves each set their cycle counters to a known |
| 52 | * value all at once. This reduces the chance of having random offsets |
| 53 | * between the processors, and guarantees that the maximum |
| 54 | * delay between the cycle counters is never bigger than |
| 55 | * the latency of information-passing (cachelines) between |
| 56 | * two CPUs. |
| 57 | */ |
| 58 | |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 59 | for (i = 0; i < NR_LOOPS; i++) { |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 60 | /* slaves loop on '!= 2' */ |
| 61 | while (atomic_read(&count_count_start) != 1) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 62 | mb(); |
| 63 | atomic_set(&count_count_stop, 0); |
| 64 | smp_wmb(); |
| 65 | |
| 66 | /* this lets the slaves write their count register */ |
| 67 | atomic_inc(&count_count_start); |
| 68 | |
| 69 | /* |
| 70 | * Everyone initialises count in the last loop: |
| 71 | */ |
| 72 | if (i == NR_LOOPS-1) |
| 73 | write_c0_count(initcount); |
| 74 | |
| 75 | /* |
| 76 | * Wait for all slaves to leave the synchronization point: |
| 77 | */ |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 78 | while (atomic_read(&count_count_stop) != 1) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 79 | mb(); |
| 80 | atomic_set(&count_count_start, 0); |
| 81 | smp_wmb(); |
| 82 | atomic_inc(&count_count_stop); |
| 83 | } |
| 84 | /* Arrange for an interrupt in a short while */ |
| 85 | write_c0_compare(read_c0_count() + COUNTON); |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 86 | atomic_set(&count_start_flag, 0); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 87 | |
| 88 | local_irq_restore(flags); |
| 89 | |
| 90 | /* |
| 91 | * i386 code reported the skew here, but the |
| 92 | * count registers were almost certainly out of sync |
| 93 | * so no point in alarming people |
| 94 | */ |
| 95 | printk("done.\n"); |
| 96 | } |
| 97 | |
Paul Gortmaker | 078a55f | 2013-06-18 13:38:59 +0000 | [diff] [blame] | 98 | void synchronise_count_slave(int cpu) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 99 | { |
| 100 | int i; |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 101 | unsigned int initcount; |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 102 | |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 103 | /* |
| 104 | * Not every cpu is online at the time this gets called, |
| 105 | * so we first wait for the master to say everyone is ready |
| 106 | */ |
| 107 | |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 108 | while (atomic_read(&count_start_flag) != cpu) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 109 | mb(); |
| 110 | |
Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 111 | /* Count will be initialised to next expire for all CPU's */ |
| 112 | initcount = atomic_read(&count_reference); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 113 | |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 114 | for (i = 0; i < NR_LOOPS; i++) { |
| 115 | atomic_inc(&count_count_start); |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 116 | while (atomic_read(&count_count_start) != 2) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 117 | mb(); |
| 118 | |
| 119 | /* |
| 120 | * Everyone initialises count in the last loop: |
| 121 | */ |
| 122 | if (i == NR_LOOPS-1) |
| 123 | write_c0_count(initcount); |
| 124 | |
| 125 | atomic_inc(&count_count_stop); |
Jayachandran C | cf9bfe5 | 2012-08-14 18:56:13 +0530 | [diff] [blame] | 126 | while (atomic_read(&count_count_stop) != 2) |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 127 | mb(); |
| 128 | } |
| 129 | /* Arrange for an interrupt in a short while */ |
| 130 | write_c0_compare(read_c0_count() + COUNTON); |
Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 131 | } |
| 132 | #undef NR_LOOPS |