Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Virtual cpu timer based timer functions. |
| 3 | * |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 2004, 2012 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
| 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/kernel_stat.h> |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 9 | #include <linux/export.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/timex.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/time.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Martin Schwidefsky | 76d4e00 | 2009-06-12 10:26:21 +0200 | [diff] [blame] | 15 | #include <asm/cputime.h> |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 16 | #include <asm/vtimer.h> |
Frederic Weisbecker | a5725ac | 2013-07-16 18:50:52 +0200 | [diff] [blame] | 17 | #include <asm/vtime.h> |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 18 | #include <asm/cpu_mf.h> |
| 19 | #include <asm/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Heiko Carstens | 521b00c | 2016-05-07 12:15:21 +0200 | [diff] [blame] | 21 | #include "entry.h" |
| 22 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 23 | static void virt_timer_expire(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 25 | static LIST_HEAD(virt_timer_list); |
| 26 | static DEFINE_SPINLOCK(virt_timer_lock); |
| 27 | static atomic64_t virt_timer_current; |
| 28 | static atomic64_t virt_timer_elapsed; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 29 | |
Martin Schwidefsky | 72d38b1 | 2015-09-18 16:41:36 +0200 | [diff] [blame] | 30 | DEFINE_PER_CPU(u64, mt_cycles[8]); |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 31 | static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; |
| 32 | static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; |
Martin Schwidefsky | f341b8d | 2015-08-03 16:16:40 +0200 | [diff] [blame] | 33 | static DEFINE_PER_CPU(u64, mt_scaling_jiffies); |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 34 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 35 | static inline u64 get_vtimer(void) |
| 36 | { |
| 37 | u64 timer; |
| 38 | |
| 39 | asm volatile("stpt %0" : "=m" (timer)); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 40 | return timer; |
| 41 | } |
| 42 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 43 | static inline void set_vtimer(u64 expires) |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 44 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 45 | u64 timer; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 46 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 47 | asm volatile( |
| 48 | " stpt %0\n" /* Store current cpu timer value */ |
| 49 | " spt %1" /* Set new value imm. afterwards */ |
| 50 | : "=m" (timer) : "m" (expires)); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 51 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
| 52 | S390_lowcore.last_update_timer = expires; |
| 53 | } |
| 54 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 55 | static inline int virt_timer_forward(u64 elapsed) |
| 56 | { |
| 57 | BUG_ON(!irqs_disabled()); |
| 58 | |
| 59 | if (list_empty(&virt_timer_list)) |
| 60 | return 0; |
| 61 | elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); |
| 62 | return elapsed >= atomic64_read(&virt_timer_current); |
| 63 | } |
| 64 | |
Martin Schwidefsky | 72d38b1 | 2015-09-18 16:41:36 +0200 | [diff] [blame] | 65 | static void update_mt_scaling(void) |
| 66 | { |
| 67 | u64 cycles_new[8], *cycles_old; |
| 68 | u64 delta, fac, mult, div; |
| 69 | int i; |
| 70 | |
| 71 | stcctm5(smp_cpu_mtid + 1, cycles_new); |
| 72 | cycles_old = this_cpu_ptr(mt_cycles); |
| 73 | fac = 1; |
| 74 | mult = div = 0; |
| 75 | for (i = 0; i <= smp_cpu_mtid; i++) { |
| 76 | delta = cycles_new[i] - cycles_old[i]; |
| 77 | div += delta; |
| 78 | mult *= i + 1; |
| 79 | mult += delta * fac; |
| 80 | fac *= i + 1; |
| 81 | } |
| 82 | div *= fac; |
| 83 | if (div > 0) { |
| 84 | /* Update scaling factor */ |
| 85 | __this_cpu_write(mt_scaling_mult, mult); |
| 86 | __this_cpu_write(mt_scaling_div, div); |
| 87 | memcpy(cycles_old, cycles_new, |
| 88 | sizeof(u64) * (smp_cpu_mtid + 1)); |
| 89 | } |
| 90 | __this_cpu_write(mt_scaling_jiffies, jiffies_64); |
| 91 | } |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | /* |
| 94 | * Update process times based on virtual cpu times stored by entry.S |
| 95 | * to the lowcore fields user_timer, system_timer & steal_clock. |
| 96 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 97 | static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 99 | struct thread_info *ti = task_thread_info(tsk); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 100 | u64 timer, clock, user, system, steal; |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 101 | u64 user_scaled, system_scaled; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
| 103 | timer = S390_lowcore.last_update_timer; |
| 104 | clock = S390_lowcore.last_update_clock; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 105 | asm volatile( |
| 106 | " stpt %0\n" /* Store current cpu timer value */ |
Martin Schwidefsky | 1f759bb | 2014-10-20 10:24:39 +0200 | [diff] [blame] | 107 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES |
| 108 | " stckf %1" /* Store current tod clock value */ |
| 109 | #else |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 110 | " stck %1" /* Store current tod clock value */ |
Martin Schwidefsky | 1f759bb | 2014-10-20 10:24:39 +0200 | [diff] [blame] | 111 | #endif |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 112 | : "=m" (S390_lowcore.last_update_timer), |
| 113 | "=m" (S390_lowcore.last_update_clock)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 115 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Martin Schwidefsky | 72d38b1 | 2015-09-18 16:41:36 +0200 | [diff] [blame] | 117 | /* Update MT utilization calculation */ |
Martin Schwidefsky | f341b8d | 2015-08-03 16:16:40 +0200 | [diff] [blame] | 118 | if (smp_cpu_mtid && |
Martin Schwidefsky | 72d38b1 | 2015-09-18 16:41:36 +0200 | [diff] [blame] | 119 | time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) |
| 120 | update_mt_scaling(); |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 121 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 122 | user = S390_lowcore.user_timer - ti->user_timer; |
| 123 | S390_lowcore.steal_timer -= user; |
| 124 | ti->user_timer = S390_lowcore.user_timer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 126 | system = S390_lowcore.system_timer - ti->system_timer; |
| 127 | S390_lowcore.steal_timer -= system; |
| 128 | ti->system_timer = S390_lowcore.system_timer; |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 129 | |
| 130 | user_scaled = user; |
| 131 | system_scaled = system; |
| 132 | /* Do MT utilization scaling */ |
| 133 | if (smp_cpu_mtid) { |
| 134 | u64 mult = __this_cpu_read(mt_scaling_mult); |
| 135 | u64 div = __this_cpu_read(mt_scaling_div); |
| 136 | |
| 137 | user_scaled = (user_scaled * mult) / div; |
| 138 | system_scaled = (system_scaled * mult) / div; |
| 139 | } |
| 140 | account_user_time(tsk, user, user_scaled); |
| 141 | account_system_time(tsk, hardirq_offset, system, system_scaled); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 143 | steal = S390_lowcore.steal_timer; |
| 144 | if ((s64) steal > 0) { |
| 145 | S390_lowcore.steal_timer = 0; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 146 | account_steal_time(steal); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 148 | |
| 149 | return virt_timer_forward(user + system); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } |
| 151 | |
Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 152 | void vtime_task_switch(struct task_struct *prev) |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 153 | { |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 154 | struct thread_info *ti; |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 155 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 156 | do_account_vtime(prev, 0); |
| 157 | ti = task_thread_info(prev); |
| 158 | ti->user_timer = S390_lowcore.user_timer; |
| 159 | ti->system_timer = S390_lowcore.system_timer; |
Frederic Weisbecker | baa3604 | 2012-06-18 17:54:14 +0200 | [diff] [blame] | 160 | ti = task_thread_info(current); |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 161 | S390_lowcore.user_timer = ti->user_timer; |
| 162 | S390_lowcore.system_timer = ti->system_timer; |
| 163 | } |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 164 | |
Frederic Weisbecker | bcebdf8 | 2012-11-13 23:51:06 +0100 | [diff] [blame] | 165 | /* |
| 166 | * In s390, accounting pending user time also implies |
| 167 | * accounting system time in order to correctly compute |
| 168 | * the stolen time accounting. |
| 169 | */ |
| 170 | void vtime_account_user(struct task_struct *tsk) |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 171 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 172 | if (do_account_vtime(tsk, HARDIRQ_OFFSET)) |
| 173 | virt_timer_expire(); |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Update process times based on virtual cpu times stored by entry.S |
| 178 | * to the lowcore fields user_timer, system_timer & steal_clock. |
| 179 | */ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 180 | void vtime_account_irq_enter(struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | { |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 182 | struct thread_info *ti = task_thread_info(tsk); |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 183 | u64 timer, system, system_scaled; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | |
| 185 | timer = S390_lowcore.last_update_timer; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 186 | S390_lowcore.last_update_timer = get_vtimer(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
| 188 | |
Martin Schwidefsky | 72d38b1 | 2015-09-18 16:41:36 +0200 | [diff] [blame] | 189 | /* Update MT utilization calculation */ |
| 190 | if (smp_cpu_mtid && |
| 191 | time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) |
| 192 | update_mt_scaling(); |
| 193 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 194 | system = S390_lowcore.system_timer - ti->system_timer; |
| 195 | S390_lowcore.steal_timer -= system; |
| 196 | ti->system_timer = S390_lowcore.system_timer; |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 197 | system_scaled = system; |
| 198 | /* Do MT utilization scaling */ |
| 199 | if (smp_cpu_mtid) { |
| 200 | u64 mult = __this_cpu_read(mt_scaling_mult); |
| 201 | u64 div = __this_cpu_read(mt_scaling_div); |
| 202 | |
| 203 | system_scaled = (system_scaled * mult) / div; |
| 204 | } |
| 205 | account_system_time(tsk, 0, system, system_scaled); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 206 | |
| 207 | virt_timer_forward(system); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 209 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 211 | void vtime_account_system(struct task_struct *tsk) |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 212 | __attribute__((alias("vtime_account_irq_enter"))); |
Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 213 | EXPORT_SYMBOL_GPL(vtime_account_system); |
Frederic Weisbecker | 1111333 | 2012-10-24 18:05:51 +0200 | [diff] [blame] | 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | /* |
| 216 | * Sorted add to a list. List is linear searched until first bigger |
| 217 | * element is found. |
| 218 | */ |
| 219 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) |
| 220 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 221 | struct vtimer_list *tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 223 | list_for_each_entry(tmp, head, entry) { |
| 224 | if (tmp->expires > timer->expires) { |
| 225 | list_add_tail(&timer->entry, &tmp->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | return; |
| 227 | } |
| 228 | } |
| 229 | list_add_tail(&timer->entry, head); |
| 230 | } |
| 231 | |
| 232 | /* |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 233 | * Handler for expired virtual CPU timer. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 235 | static void virt_timer_expire(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 237 | struct vtimer_list *timer, *tmp; |
| 238 | unsigned long elapsed; |
| 239 | LIST_HEAD(cb_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 241 | /* walk timer list, fire all expired timers */ |
| 242 | spin_lock(&virt_timer_lock); |
| 243 | elapsed = atomic64_read(&virt_timer_elapsed); |
| 244 | list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { |
| 245 | if (timer->expires < elapsed) |
| 246 | /* move expired timer to the callback queue */ |
| 247 | list_move_tail(&timer->entry, &cb_list); |
| 248 | else |
| 249 | timer->expires -= elapsed; |
| 250 | } |
| 251 | if (!list_empty(&virt_timer_list)) { |
| 252 | timer = list_first_entry(&virt_timer_list, |
| 253 | struct vtimer_list, entry); |
| 254 | atomic64_set(&virt_timer_current, timer->expires); |
| 255 | } |
| 256 | atomic64_sub(elapsed, &virt_timer_elapsed); |
| 257 | spin_unlock(&virt_timer_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 259 | /* Do callbacks and recharge periodic timers */ |
| 260 | list_for_each_entry_safe(timer, tmp, &cb_list, entry) { |
| 261 | list_del_init(&timer->entry); |
| 262 | timer->function(timer->data); |
| 263 | if (timer->interval) { |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 264 | /* Recharge interval timer */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 265 | timer->expires = timer->interval + |
| 266 | atomic64_read(&virt_timer_elapsed); |
| 267 | spin_lock(&virt_timer_lock); |
| 268 | list_add_sorted(timer, &virt_timer_list); |
| 269 | spin_unlock(&virt_timer_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | } |
| 271 | } |
| 272 | } |
| 273 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | void init_virt_timer(struct vtimer_list *timer) |
| 275 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | timer->function = NULL; |
| 277 | INIT_LIST_HEAD(&timer->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | } |
| 279 | EXPORT_SYMBOL(init_virt_timer); |
| 280 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | static inline int vtimer_pending(struct vtimer_list *timer) |
| 282 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 283 | return !list_empty(&timer->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | static void internal_add_vtimer(struct vtimer_list *timer) |
| 287 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 288 | if (list_empty(&virt_timer_list)) { |
| 289 | /* First timer, just program it. */ |
| 290 | atomic64_set(&virt_timer_current, timer->expires); |
| 291 | atomic64_set(&virt_timer_elapsed, 0); |
| 292 | list_add(&timer->entry, &virt_timer_list); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 293 | } else { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 294 | /* Update timer against current base. */ |
| 295 | timer->expires += atomic64_read(&virt_timer_elapsed); |
| 296 | if (likely((s64) timer->expires < |
| 297 | (s64) atomic64_read(&virt_timer_current))) |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 298 | /* The new timer expires before the current timer. */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 299 | atomic64_set(&virt_timer_current, timer->expires); |
| 300 | /* Insert new timer into the list. */ |
| 301 | list_add_sorted(timer, &virt_timer_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 305 | static void __add_vtimer(struct vtimer_list *timer, int periodic) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 307 | unsigned long flags; |
| 308 | |
| 309 | timer->interval = periodic ? timer->expires : 0; |
| 310 | spin_lock_irqsave(&virt_timer_lock, flags); |
| 311 | internal_add_vtimer(timer); |
| 312 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | /* |
| 316 | * add_virt_timer - add an oneshot virtual CPU timer |
| 317 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 318 | void add_virt_timer(struct vtimer_list *timer) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 320 | __add_vtimer(timer, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | } |
| 322 | EXPORT_SYMBOL(add_virt_timer); |
| 323 | |
| 324 | /* |
| 325 | * add_virt_timer_int - add an interval virtual CPU timer |
| 326 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 327 | void add_virt_timer_periodic(struct vtimer_list *timer) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 329 | __add_vtimer(timer, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | } |
| 331 | EXPORT_SYMBOL(add_virt_timer_periodic); |
| 332 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 333 | static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | unsigned long flags; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 336 | int rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Martin Schwidefsky | ca366a3 | 2008-07-14 09:59:23 +0200 | [diff] [blame] | 338 | BUG_ON(!timer->function); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | if (timer->expires == expires && vtimer_pending(timer)) |
| 341 | return 1; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 342 | spin_lock_irqsave(&virt_timer_lock, flags); |
| 343 | rc = vtimer_pending(timer); |
| 344 | if (rc) |
| 345 | list_del_init(&timer->entry); |
| 346 | timer->interval = periodic ? expires : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | timer->expires = expires; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | internal_add_vtimer(timer); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 349 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
| 350 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | } |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 352 | |
| 353 | /* |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 354 | * returns whether it has modified a pending timer (1) or not (0) |
| 355 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 356 | int mod_virt_timer(struct vtimer_list *timer, u64 expires) |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 357 | { |
| 358 | return __mod_vtimer(timer, expires, 0); |
| 359 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | EXPORT_SYMBOL(mod_virt_timer); |
| 361 | |
| 362 | /* |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 363 | * returns whether it has modified a pending timer (1) or not (0) |
| 364 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 365 | int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 366 | { |
| 367 | return __mod_vtimer(timer, expires, 1); |
| 368 | } |
| 369 | EXPORT_SYMBOL(mod_virt_timer_periodic); |
| 370 | |
| 371 | /* |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 372 | * Delete a virtual timer. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | * |
| 374 | * returns whether the deleted timer was pending (1) or not (0) |
| 375 | */ |
| 376 | int del_virt_timer(struct vtimer_list *timer) |
| 377 | { |
| 378 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | if (!vtimer_pending(timer)) |
| 381 | return 0; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 382 | spin_lock_irqsave(&virt_timer_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | list_del_init(&timer->entry); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 384 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | return 1; |
| 386 | } |
| 387 | EXPORT_SYMBOL(del_virt_timer); |
| 388 | |
| 389 | /* |
| 390 | * Start the virtual CPU timer on the current CPU. |
| 391 | */ |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 392 | void vtime_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | { |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 394 | /* set initial cpu timer */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 395 | set_vtimer(VTIMER_MAX_SLICE); |
Martin Schwidefsky | f341b8d | 2015-08-03 16:16:40 +0200 | [diff] [blame] | 396 | /* Setup initial MT scaling values */ |
| 397 | if (smp_cpu_mtid) { |
| 398 | __this_cpu_write(mt_scaling_jiffies, jiffies); |
| 399 | __this_cpu_write(mt_scaling_mult, 1); |
| 400 | __this_cpu_write(mt_scaling_div, 1); |
| 401 | stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); |
| 402 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | } |