Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/softirq.c |
| 3 | * |
| 4 | * Copyright (C) 1992 Linus Torvalds |
| 5 | * |
Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 6 | * Distribute under GPLv2. |
| 7 | * |
| 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 9 | * |
| 10 | * Remote softirq infrastructure is by Jens Axboe. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
| 12 | |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 13 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/notifier.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/cpu.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 21 | #include <linux/freezer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/rcupdate.h> |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 24 | #include <linux/ftrace.h> |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 25 | #include <linux/smp.h> |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 26 | #include <linux/smpboot.h> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 27 | #include <linux/tick.h> |
Heiko Carstens | a0e39ed | 2009-04-29 13:51:39 +0200 | [diff] [blame] | 28 | |
| 29 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 30 | #include <trace/events/irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* |
| 33 | - No shared variables, all the data are CPU local. |
| 34 | - If a softirq needs serialization, let it serialize itself |
| 35 | by its own spinlocks. |
| 36 | - Even if softirq is serialized, only local cpu is marked for |
| 37 | execution. Hence, we get something sort of weak cpu binding. |
| 38 | Though it is still not clear, will it result in better locality |
| 39 | or will not. |
| 40 | |
| 41 | Examples: |
| 42 | - NET RX softirq. It is multithreaded and does not require |
| 43 | any global serialization. |
| 44 | - NET TX softirq. It kicks software netdevice queues, hence |
| 45 | it is logically serialized per device, but this serialization |
| 46 | is invisible to common code. |
| 47 | - Tasklets: serialized wrt itself. |
| 48 | */ |
| 49 | |
| 50 | #ifndef __ARCH_IRQ_STAT |
| 51 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; |
| 52 | EXPORT_SYMBOL(irq_stat); |
| 53 | #endif |
| 54 | |
Alexey Dobriyan | 978b011 | 2008-09-06 20:04:36 +0200 | [diff] [blame] | 55 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Venkatesh Pallipadi | 4dd53d8 | 2010-12-21 17:09:00 -0800 | [diff] [blame] | 57 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 59 | char *softirq_to_name[NR_SOFTIRQS] = { |
Li Zefan | 5dd4de5 | 2009-09-17 17:38:32 +0800 | [diff] [blame] | 60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
Shaohua Li | 0922337 | 2011-06-14 13:26:25 +0800 | [diff] [blame] | 61 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 62 | }; |
| 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | /* |
| 65 | * we cannot loop indefinitely here to avoid userspace starvation, |
| 66 | * but we also don't want to introduce a worst case 1/HZ latency |
| 67 | * to the pending events, so lets the scheduler to balance |
| 68 | * the softirq load for us. |
| 69 | */ |
Thomas Gleixner | 676cb02 | 2009-07-20 23:33:49 +0200 | [diff] [blame] | 70 | static void wakeup_softirqd(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | { |
| 72 | /* Interrupts are disabled: no need to stop preemption */ |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | if (tsk && tsk->state != TASK_RUNNING) |
| 76 | wake_up_process(tsk); |
| 77 | } |
| 78 | |
| 79 | /* |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 80 | * preempt_count and SOFTIRQ_OFFSET usage: |
| 81 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving |
| 82 | * softirq processing. |
| 83 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
| 84 | * on local_bh_disable or local_bh_enable. |
| 85 | * This lets us distinguish between whether we are currently processing |
| 86 | * softirq and whether we just have bh disabled. |
| 87 | */ |
| 88 | |
| 89 | /* |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 90 | * This one is for softirq.c-internal use, |
| 91 | * where hardirqs are disabled legitimately: |
| 92 | */ |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 93 | #ifdef CONFIG_TRACE_IRQFLAGS |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 94 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 95 | { |
| 96 | unsigned long flags; |
| 97 | |
| 98 | WARN_ON_ONCE(in_irq()); |
| 99 | |
| 100 | raw_local_irq_save(flags); |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 101 | /* |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 102 | * The preempt tracer hooks into preempt_count_add and will break |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 103 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
| 104 | * is set and before current->softirq_enabled is cleared. |
| 105 | * We must manually increment preempt_count here and manually |
| 106 | * call the trace_preempt_off later. |
| 107 | */ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 108 | __preempt_count_add(cnt); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 109 | /* |
| 110 | * Were softirqs turned off above: |
| 111 | */ |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 112 | if (softirq_count() == cnt) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 113 | trace_softirqs_off(ip); |
| 114 | raw_local_irq_restore(flags); |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 115 | |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 116 | if (preempt_count() == cnt) |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 117 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 118 | } |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 119 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 120 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 121 | { |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 122 | preempt_count_add(cnt); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 123 | barrier(); |
| 124 | } |
| 125 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 126 | |
| 127 | void local_bh_disable(void) |
| 128 | { |
Davidlohr Bueso | d2e0847 | 2013-04-30 11:46:09 -0700 | [diff] [blame] | 129 | __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | EXPORT_SYMBOL(local_bh_disable); |
| 133 | |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 134 | static void __local_bh_enable(unsigned int cnt) |
| 135 | { |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 136 | WARN_ON_ONCE(!irqs_disabled()); |
| 137 | |
| 138 | if (softirq_count() == cnt) |
Davidlohr Bueso | d2e0847 | 2013-04-30 11:46:09 -0700 | [diff] [blame] | 139 | trace_softirqs_on(_RET_IP_); |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 140 | preempt_count_sub(cnt); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 143 | /* |
| 144 | * Special-case - softirqs can safely be enabled in |
| 145 | * cond_resched_softirq(), or by __do_softirq(), |
| 146 | * without processing still-pending softirqs: |
| 147 | */ |
| 148 | void _local_bh_enable(void) |
| 149 | { |
Frederic Weisbecker | 5d60d3e | 2013-09-24 04:11:35 +0200 | [diff] [blame] | 150 | WARN_ON_ONCE(in_irq()); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 151 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | EXPORT_SYMBOL(_local_bh_enable); |
| 155 | |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 156 | static inline void _local_bh_enable_ip(unsigned long ip) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 157 | { |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 158 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 159 | #ifdef CONFIG_TRACE_IRQFLAGS |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 160 | local_irq_disable(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 161 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 162 | /* |
| 163 | * Are softirqs going to be turned on now: |
| 164 | */ |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 165 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 166 | trace_softirqs_on(ip); |
| 167 | /* |
| 168 | * Keep preemption disabled until we are done with |
| 169 | * softirq processing: |
| 170 | */ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 171 | preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 172 | |
Frederic Weisbecker | 0bed698 | 2013-09-05 16:14:00 +0200 | [diff] [blame] | 173 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
| 174 | /* |
| 175 | * Run softirq if any pending. And do it in its own stack |
| 176 | * as we may be calling this deep in a task call stack already. |
| 177 | */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 178 | do_softirq(); |
Frederic Weisbecker | 0bed698 | 2013-09-05 16:14:00 +0200 | [diff] [blame] | 179 | } |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 180 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 181 | preempt_count_dec(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 182 | #ifdef CONFIG_TRACE_IRQFLAGS |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 183 | local_irq_enable(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 184 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 185 | preempt_check_resched(); |
| 186 | } |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 187 | |
| 188 | void local_bh_enable(void) |
| 189 | { |
Davidlohr Bueso | d2e0847 | 2013-04-30 11:46:09 -0700 | [diff] [blame] | 190 | _local_bh_enable_ip(_RET_IP_); |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 191 | } |
| 192 | EXPORT_SYMBOL(local_bh_enable); |
| 193 | |
| 194 | void local_bh_enable_ip(unsigned long ip) |
| 195 | { |
| 196 | _local_bh_enable_ip(ip); |
| 197 | } |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 198 | EXPORT_SYMBOL(local_bh_enable_ip); |
| 199 | |
| 200 | /* |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 201 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
| 202 | * but break the loop if need_resched() is set or after 2 ms. |
| 203 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in |
| 204 | * certain cases, such as stop_machine(), jiffies may cease to |
| 205 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as |
| 206 | * well to make sure we eventually return from this method. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | * |
Eric Dumazet | c10d7367 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 208 | * These limits have been established via experimentation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | * The two things to balance is latency against fairness - |
| 210 | * we want to handle softirqs as soon as possible, but they |
| 211 | * should not be able to lock up the box. |
| 212 | */ |
Eric Dumazet | c10d7367 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 213 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 214 | #define MAX_SOFTIRQ_RESTART 10 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame^] | 216 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 217 | /* |
| 218 | * Convoluted means of passing __do_softirq() a message through the various |
| 219 | * architecture execute_on_stack() bits. |
| 220 | * |
| 221 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need |
| 222 | * to keep the lockdep irq context tracking as tight as possible in order to |
| 223 | * not miss-qualify lock contexts and miss possible deadlocks. |
| 224 | */ |
| 225 | static DEFINE_PER_CPU(int, softirq_from_hardirq); |
| 226 | |
| 227 | static inline void lockdep_softirq_from_hardirq(void) |
| 228 | { |
| 229 | this_cpu_write(softirq_from_hardirq, 1); |
| 230 | } |
| 231 | |
| 232 | static inline void lockdep_softirq_start(void) |
| 233 | { |
| 234 | if (this_cpu_read(softirq_from_hardirq)) |
| 235 | trace_hardirq_exit(); |
| 236 | lockdep_softirq_enter(); |
| 237 | } |
| 238 | |
| 239 | static inline void lockdep_softirq_end(void) |
| 240 | { |
| 241 | lockdep_softirq_exit(); |
| 242 | if (this_cpu_read(softirq_from_hardirq)) { |
| 243 | this_cpu_write(softirq_from_hardirq, 0); |
| 244 | trace_hardirq_enter(); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | #else |
| 249 | static inline void lockdep_softirq_from_hardirq(void) { } |
| 250 | static inline void lockdep_softirq_start(void) { } |
| 251 | static inline void lockdep_softirq_end(void) { } |
| 252 | #endif |
| 253 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | asmlinkage void __do_softirq(void) |
| 255 | { |
Eric Dumazet | c10d7367 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 256 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 257 | unsigned long old_flags = current->flags; |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 258 | int max_restart = MAX_SOFTIRQ_RESTART; |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame^] | 259 | struct softirq_action *h; |
| 260 | __u32 pending; |
| 261 | int cpu; |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 262 | |
| 263 | /* |
| 264 | * Mask out PF_MEMALLOC s current task context is borrowed for the |
| 265 | * softirq. A softirq handled such as network RX might set PF_MEMALLOC |
| 266 | * again if the socket is related to swap |
| 267 | */ |
| 268 | current->flags &= ~PF_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | |
| 270 | pending = local_softirq_pending(); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 271 | account_irq_enter_time(current); |
Paul Mackerras | 829035fd | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 272 | |
Davidlohr Bueso | d2e0847 | 2013-04-30 11:46:09 -0700 | [diff] [blame] | 273 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame^] | 274 | lockdep_softirq_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | cpu = smp_processor_id(); |
| 277 | restart: |
| 278 | /* Reset the pending bitmask before enabling irqs */ |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 279 | set_softirq_pending(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 281 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
| 283 | h = softirq_vec; |
| 284 | |
| 285 | do { |
| 286 | if (pending & 1) { |
Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 287 | unsigned int vec_nr = h - softirq_vec; |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 288 | int prev_count = preempt_count(); |
| 289 | |
Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 290 | kstat_incr_softirqs_this_cpu(vec_nr); |
| 291 | |
| 292 | trace_softirq_entry(vec_nr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | h->action(h); |
Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 294 | trace_softirq_exit(vec_nr); |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 295 | if (unlikely(prev_count != preempt_count())) { |
Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 296 | printk(KERN_ERR "huh, entered softirq %u %s %p" |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 297 | "with preempt_count %08x," |
Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 298 | " exited with %08x?\n", vec_nr, |
| 299 | softirq_to_name[vec_nr], h->action, |
| 300 | prev_count, preempt_count()); |
Peter Zijlstra | 4a2b4b2 | 2013-08-14 14:55:24 +0200 | [diff] [blame] | 301 | preempt_count_set(prev_count); |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 302 | } |
| 303 | |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 304 | rcu_bh_qs(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | } |
| 306 | h++; |
| 307 | pending >>= 1; |
| 308 | } while (pending); |
| 309 | |
Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 310 | local_irq_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
| 312 | pending = local_softirq_pending(); |
Eric Dumazet | c10d7367 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 313 | if (pending) { |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 314 | if (time_before(jiffies, end) && !need_resched() && |
| 315 | --max_restart) |
Eric Dumazet | c10d7367 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 316 | goto restart; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | wakeup_softirqd(); |
Eric Dumazet | c10d7367 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 319 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame^] | 321 | lockdep_softirq_end(); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 322 | account_irq_exit_time(current); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 323 | __local_bh_enable(SOFTIRQ_OFFSET); |
Frederic Weisbecker | 5d60d3e | 2013-09-24 04:11:35 +0200 | [diff] [blame] | 324 | WARN_ON_ONCE(in_interrupt()); |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 325 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | } |
| 327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | asmlinkage void do_softirq(void) |
| 329 | { |
| 330 | __u32 pending; |
| 331 | unsigned long flags; |
| 332 | |
| 333 | if (in_interrupt()) |
| 334 | return; |
| 335 | |
| 336 | local_irq_save(flags); |
| 337 | |
| 338 | pending = local_softirq_pending(); |
| 339 | |
| 340 | if (pending) |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 341 | do_softirq_own_stack(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | |
| 343 | local_irq_restore(flags); |
| 344 | } |
| 345 | |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 346 | /* |
| 347 | * Enter an interrupt context. |
| 348 | */ |
| 349 | void irq_enter(void) |
| 350 | { |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 351 | int cpu = smp_processor_id(); |
Thomas Gleixner | 719254fa | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 352 | |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 353 | rcu_irq_enter(); |
Frederic Weisbecker | 0a8a2e7 | 2012-01-24 18:59:44 +0100 | [diff] [blame] | 354 | if (is_idle_task(current) && !in_interrupt()) { |
Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 355 | /* |
| 356 | * Prevent raise_softirq from needlessly waking up ksoftirqd |
| 357 | * here, as softirq will be serviced on return from interrupt. |
| 358 | */ |
| 359 | local_bh_disable(); |
Thomas Gleixner | 719254fa | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 360 | tick_check_idle(cpu); |
Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 361 | _local_bh_enable(); |
| 362 | } |
| 363 | |
| 364 | __irq_enter(); |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 365 | } |
| 366 | |
Heiko Carstens | b2a0017 | 2012-03-05 15:07:25 -0800 | [diff] [blame] | 367 | static inline void invoke_softirq(void) |
| 368 | { |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 369 | if (!force_irqthreads) { |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame^] | 370 | lockdep_softirq_from_hardirq(); |
Frederic Weisbecker | cc1f027 | 2013-09-24 17:17:47 +0200 | [diff] [blame] | 371 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 372 | /* |
| 373 | * We can safely execute softirq on the current stack if |
| 374 | * it is the irq stack, because it should be near empty |
Frederic Weisbecker | cc1f027 | 2013-09-24 17:17:47 +0200 | [diff] [blame] | 375 | * at this stage. |
| 376 | */ |
| 377 | __do_softirq(); |
| 378 | #else |
| 379 | /* |
| 380 | * Otherwise, irq_exit() is called on the task stack that can |
| 381 | * be potentially deep already. So call softirq in its own stack |
| 382 | * to prevent from any overrun. |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 383 | */ |
Frederic Weisbecker | be6e101 | 2013-09-24 16:39:41 +0200 | [diff] [blame] | 384 | do_softirq_own_stack(); |
Frederic Weisbecker | cc1f027 | 2013-09-24 17:17:47 +0200 | [diff] [blame] | 385 | #endif |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 386 | } else { |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 387 | wakeup_softirqd(); |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 388 | } |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 389 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
Frederic Weisbecker | 67826ea | 2013-04-20 17:43:13 +0200 | [diff] [blame] | 391 | static inline void tick_irq_exit(void) |
| 392 | { |
| 393 | #ifdef CONFIG_NO_HZ_COMMON |
| 394 | int cpu = smp_processor_id(); |
| 395 | |
| 396 | /* Make sure that timer wheel updates are propagated */ |
| 397 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { |
| 398 | if (!in_interrupt()) |
| 399 | tick_nohz_irq_exit(); |
| 400 | } |
| 401 | #endif |
| 402 | } |
| 403 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | /* |
| 405 | * Exit an interrupt context. Process softirqs if needed and possible: |
| 406 | */ |
| 407 | void irq_exit(void) |
| 408 | { |
Thomas Gleixner | 74eed01 | 2013-02-20 22:00:48 +0100 | [diff] [blame] | 409 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
Frederic Weisbecker | 4cd5d11 | 2013-02-28 20:00:43 +0100 | [diff] [blame] | 410 | local_irq_disable(); |
Thomas Gleixner | 74eed01 | 2013-02-20 22:00:48 +0100 | [diff] [blame] | 411 | #else |
| 412 | WARN_ON_ONCE(!irqs_disabled()); |
| 413 | #endif |
| 414 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 415 | account_irq_exit_time(current); |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 416 | preempt_count_sub(HARDIRQ_OFFSET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | if (!in_interrupt() && local_softirq_pending()) |
| 418 | invoke_softirq(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 419 | |
Frederic Weisbecker | 67826ea | 2013-04-20 17:43:13 +0200 | [diff] [blame] | 420 | tick_irq_exit(); |
Frederic Weisbecker | 416eb33 | 2011-10-07 16:31:02 -0700 | [diff] [blame] | 421 | rcu_irq_exit(); |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame^] | 422 | trace_hardirq_exit(); /* must be last! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | /* |
| 426 | * This function must run with irqs disabled! |
| 427 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 428 | inline void raise_softirq_irqoff(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | { |
| 430 | __raise_softirq_irqoff(nr); |
| 431 | |
| 432 | /* |
| 433 | * If we're in an interrupt or softirq, we're done |
| 434 | * (this also catches softirq-disabled code). We will |
| 435 | * actually run the softirq once we return from |
| 436 | * the irq or softirq. |
| 437 | * |
| 438 | * Otherwise we wake up ksoftirqd to make sure we |
| 439 | * schedule the softirq soon. |
| 440 | */ |
| 441 | if (!in_interrupt()) |
| 442 | wakeup_softirqd(); |
| 443 | } |
| 444 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 445 | void raise_softirq(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | { |
| 447 | unsigned long flags; |
| 448 | |
| 449 | local_irq_save(flags); |
| 450 | raise_softirq_irqoff(nr); |
| 451 | local_irq_restore(flags); |
| 452 | } |
| 453 | |
Steven Rostedt | f069686 | 2012-01-25 20:18:55 -0500 | [diff] [blame] | 454 | void __raise_softirq_irqoff(unsigned int nr) |
| 455 | { |
| 456 | trace_softirq_raise(nr); |
| 457 | or_softirq_pending(1UL << nr); |
| 458 | } |
| 459 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 460 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | softirq_vec[nr].action = action; |
| 463 | } |
| 464 | |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 465 | /* |
| 466 | * Tasklets |
| 467 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | struct tasklet_head |
| 469 | { |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 470 | struct tasklet_struct *head; |
| 471 | struct tasklet_struct **tail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | }; |
| 473 | |
Vegard Nossum | 4620b49 | 2008-06-12 23:21:53 +0200 | [diff] [blame] | 474 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
| 475 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 477 | void __tasklet_schedule(struct tasklet_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | { |
| 479 | unsigned long flags; |
| 480 | |
| 481 | local_irq_save(flags); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 482 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 483 | *__this_cpu_read(tasklet_vec.tail) = t; |
| 484 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 486 | local_irq_restore(flags); |
| 487 | } |
| 488 | |
| 489 | EXPORT_SYMBOL(__tasklet_schedule); |
| 490 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 491 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | { |
| 493 | unsigned long flags; |
| 494 | |
| 495 | local_irq_save(flags); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 496 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 497 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
| 498 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | raise_softirq_irqoff(HI_SOFTIRQ); |
| 500 | local_irq_restore(flags); |
| 501 | } |
| 502 | |
| 503 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
| 504 | |
Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 505 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
| 506 | { |
| 507 | BUG_ON(!irqs_disabled()); |
| 508 | |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 509 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
| 510 | __this_cpu_write(tasklet_hi_vec.head, t); |
Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 511 | __raise_softirq_irqoff(HI_SOFTIRQ); |
| 512 | } |
| 513 | |
| 514 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); |
| 515 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | static void tasklet_action(struct softirq_action *a) |
| 517 | { |
| 518 | struct tasklet_struct *list; |
| 519 | |
| 520 | local_irq_disable(); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 521 | list = __this_cpu_read(tasklet_vec.head); |
| 522 | __this_cpu_write(tasklet_vec.head, NULL); |
| 523 | __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | local_irq_enable(); |
| 525 | |
| 526 | while (list) { |
| 527 | struct tasklet_struct *t = list; |
| 528 | |
| 529 | list = list->next; |
| 530 | |
| 531 | if (tasklet_trylock(t)) { |
| 532 | if (!atomic_read(&t->count)) { |
| 533 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
| 534 | BUG(); |
| 535 | t->func(t->data); |
| 536 | tasklet_unlock(t); |
| 537 | continue; |
| 538 | } |
| 539 | tasklet_unlock(t); |
| 540 | } |
| 541 | |
| 542 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 543 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 544 | *__this_cpu_read(tasklet_vec.tail) = t; |
| 545 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 547 | local_irq_enable(); |
| 548 | } |
| 549 | } |
| 550 | |
| 551 | static void tasklet_hi_action(struct softirq_action *a) |
| 552 | { |
| 553 | struct tasklet_struct *list; |
| 554 | |
| 555 | local_irq_disable(); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 556 | list = __this_cpu_read(tasklet_hi_vec.head); |
| 557 | __this_cpu_write(tasklet_hi_vec.head, NULL); |
| 558 | __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | local_irq_enable(); |
| 560 | |
| 561 | while (list) { |
| 562 | struct tasklet_struct *t = list; |
| 563 | |
| 564 | list = list->next; |
| 565 | |
| 566 | if (tasklet_trylock(t)) { |
| 567 | if (!atomic_read(&t->count)) { |
| 568 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
| 569 | BUG(); |
| 570 | t->func(t->data); |
| 571 | tasklet_unlock(t); |
| 572 | continue; |
| 573 | } |
| 574 | tasklet_unlock(t); |
| 575 | } |
| 576 | |
| 577 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 578 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 579 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
| 580 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | __raise_softirq_irqoff(HI_SOFTIRQ); |
| 582 | local_irq_enable(); |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | |
| 587 | void tasklet_init(struct tasklet_struct *t, |
| 588 | void (*func)(unsigned long), unsigned long data) |
| 589 | { |
| 590 | t->next = NULL; |
| 591 | t->state = 0; |
| 592 | atomic_set(&t->count, 0); |
| 593 | t->func = func; |
| 594 | t->data = data; |
| 595 | } |
| 596 | |
| 597 | EXPORT_SYMBOL(tasklet_init); |
| 598 | |
| 599 | void tasklet_kill(struct tasklet_struct *t) |
| 600 | { |
| 601 | if (in_interrupt()) |
| 602 | printk("Attempt to kill tasklet from interrupt\n"); |
| 603 | |
| 604 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
H Hartley Sweeten | 79d381c | 2009-04-16 19:30:18 -0400 | [diff] [blame] | 605 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | yield(); |
H Hartley Sweeten | 79d381c | 2009-04-16 19:30:18 -0400 | [diff] [blame] | 607 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | } |
| 609 | tasklet_unlock_wait(t); |
| 610 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
| 611 | } |
| 612 | |
| 613 | EXPORT_SYMBOL(tasklet_kill); |
| 614 | |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 615 | /* |
| 616 | * tasklet_hrtimer |
| 617 | */ |
| 618 | |
| 619 | /* |
Peter Zijlstra | b9c3032 | 2010-02-03 18:08:52 +0100 | [diff] [blame] | 620 | * The trampoline is called when the hrtimer expires. It schedules a tasklet |
| 621 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended |
| 622 | * hrtimer callback, but from softirq context. |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 623 | */ |
| 624 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) |
| 625 | { |
| 626 | struct tasklet_hrtimer *ttimer = |
| 627 | container_of(timer, struct tasklet_hrtimer, timer); |
| 628 | |
Peter Zijlstra | b9c3032 | 2010-02-03 18:08:52 +0100 | [diff] [blame] | 629 | tasklet_hi_schedule(&ttimer->tasklet); |
| 630 | return HRTIMER_NORESTART; |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | /* |
| 634 | * Helper function which calls the hrtimer callback from |
| 635 | * tasklet/softirq context |
| 636 | */ |
| 637 | static void __tasklet_hrtimer_trampoline(unsigned long data) |
| 638 | { |
| 639 | struct tasklet_hrtimer *ttimer = (void *)data; |
| 640 | enum hrtimer_restart restart; |
| 641 | |
| 642 | restart = ttimer->function(&ttimer->timer); |
| 643 | if (restart != HRTIMER_NORESTART) |
| 644 | hrtimer_restart(&ttimer->timer); |
| 645 | } |
| 646 | |
| 647 | /** |
| 648 | * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks |
| 649 | * @ttimer: tasklet_hrtimer which is initialized |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 650 | * @function: hrtimer callback function which gets called from softirq context |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 651 | * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) |
| 652 | * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) |
| 653 | */ |
| 654 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
| 655 | enum hrtimer_restart (*function)(struct hrtimer *), |
| 656 | clockid_t which_clock, enum hrtimer_mode mode) |
| 657 | { |
| 658 | hrtimer_init(&ttimer->timer, which_clock, mode); |
| 659 | ttimer->timer.function = __hrtimer_tasklet_trampoline; |
| 660 | tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, |
| 661 | (unsigned long)ttimer); |
| 662 | ttimer->function = function; |
| 663 | } |
| 664 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
| 665 | |
| 666 | /* |
| 667 | * Remote softirq bits |
| 668 | */ |
| 669 | |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 670 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
| 671 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); |
| 672 | |
| 673 | static void __local_trigger(struct call_single_data *cp, int softirq) |
| 674 | { |
| 675 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); |
| 676 | |
| 677 | list_add_tail(&cp->list, head); |
| 678 | |
| 679 | /* Trigger the softirq only if the list was previously empty. */ |
| 680 | if (head->next == &cp->list) |
| 681 | raise_softirq_irqoff(softirq); |
| 682 | } |
| 683 | |
| 684 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS |
| 685 | static void remote_softirq_receive(void *data) |
| 686 | { |
| 687 | struct call_single_data *cp = data; |
| 688 | unsigned long flags; |
| 689 | int softirq; |
| 690 | |
liguang | 3440a1c | 2013-04-30 15:27:26 -0700 | [diff] [blame] | 691 | softirq = *(int *)cp->info; |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 692 | local_irq_save(flags); |
| 693 | __local_trigger(cp, softirq); |
| 694 | local_irq_restore(flags); |
| 695 | } |
| 696 | |
| 697 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) |
| 698 | { |
| 699 | if (cpu_online(cpu)) { |
| 700 | cp->func = remote_softirq_receive; |
liguang | 3440a1c | 2013-04-30 15:27:26 -0700 | [diff] [blame] | 701 | cp->info = &softirq; |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 702 | cp->flags = 0; |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 703 | |
Peter Zijlstra | 6e27563 | 2009-02-25 13:59:48 +0100 | [diff] [blame] | 704 | __smp_call_function_single(cpu, cp, 0); |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 705 | return 0; |
| 706 | } |
| 707 | return 1; |
| 708 | } |
| 709 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ |
| 710 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) |
| 711 | { |
| 712 | return 1; |
| 713 | } |
| 714 | #endif |
| 715 | |
| 716 | /** |
| 717 | * __send_remote_softirq - try to schedule softirq work on a remote cpu |
| 718 | * @cp: private SMP call function data area |
| 719 | * @cpu: the remote cpu |
| 720 | * @this_cpu: the currently executing cpu |
| 721 | * @softirq: the softirq for the work |
| 722 | * |
| 723 | * Attempt to schedule softirq work on a remote cpu. If this cannot be |
| 724 | * done, the work is instead queued up on the local cpu. |
| 725 | * |
| 726 | * Interrupts must be disabled. |
| 727 | */ |
| 728 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) |
| 729 | { |
| 730 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) |
| 731 | __local_trigger(cp, softirq); |
| 732 | } |
| 733 | EXPORT_SYMBOL(__send_remote_softirq); |
| 734 | |
| 735 | /** |
| 736 | * send_remote_softirq - try to schedule softirq work on a remote cpu |
| 737 | * @cp: private SMP call function data area |
| 738 | * @cpu: the remote cpu |
| 739 | * @softirq: the softirq for the work |
| 740 | * |
| 741 | * Like __send_remote_softirq except that disabling interrupts and |
| 742 | * computing the current cpu is done for the caller. |
| 743 | */ |
| 744 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) |
| 745 | { |
| 746 | unsigned long flags; |
| 747 | int this_cpu; |
| 748 | |
| 749 | local_irq_save(flags); |
| 750 | this_cpu = smp_processor_id(); |
| 751 | __send_remote_softirq(cp, cpu, this_cpu, softirq); |
| 752 | local_irq_restore(flags); |
| 753 | } |
| 754 | EXPORT_SYMBOL(send_remote_softirq); |
| 755 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 756 | static int remote_softirq_cpu_notify(struct notifier_block *self, |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 757 | unsigned long action, void *hcpu) |
| 758 | { |
| 759 | /* |
| 760 | * If a CPU goes away, splice its entries to the current CPU |
| 761 | * and trigger a run of the softirq |
| 762 | */ |
| 763 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
| 764 | int cpu = (unsigned long) hcpu; |
| 765 | int i; |
| 766 | |
| 767 | local_irq_disable(); |
| 768 | for (i = 0; i < NR_SOFTIRQS; i++) { |
| 769 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); |
| 770 | struct list_head *local_head; |
| 771 | |
| 772 | if (list_empty(head)) |
| 773 | continue; |
| 774 | |
| 775 | local_head = &__get_cpu_var(softirq_work_list[i]); |
| 776 | list_splice_init(head, local_head); |
| 777 | raise_softirq_irqoff(i); |
| 778 | } |
| 779 | local_irq_enable(); |
| 780 | } |
| 781 | |
| 782 | return NOTIFY_OK; |
| 783 | } |
| 784 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 785 | static struct notifier_block remote_softirq_cpu_notifier = { |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 786 | .notifier_call = remote_softirq_cpu_notify, |
| 787 | }; |
| 788 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | void __init softirq_init(void) |
| 790 | { |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 791 | int cpu; |
| 792 | |
| 793 | for_each_possible_cpu(cpu) { |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 794 | int i; |
| 795 | |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 796 | per_cpu(tasklet_vec, cpu).tail = |
| 797 | &per_cpu(tasklet_vec, cpu).head; |
| 798 | per_cpu(tasklet_hi_vec, cpu).tail = |
| 799 | &per_cpu(tasklet_hi_vec, cpu).head; |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 800 | for (i = 0; i < NR_SOFTIRQS; i++) |
| 801 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 802 | } |
| 803 | |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 804 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); |
| 805 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 806 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
| 807 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | } |
| 809 | |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 810 | static int ksoftirqd_should_run(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | { |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 812 | return local_softirq_pending(); |
| 813 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 815 | static void run_ksoftirqd(unsigned int cpu) |
| 816 | { |
| 817 | local_irq_disable(); |
| 818 | if (local_softirq_pending()) { |
Frederic Weisbecker | 0bed698 | 2013-09-05 16:14:00 +0200 | [diff] [blame] | 819 | /* |
| 820 | * We can safely run softirq on inline stack, as we are not deep |
| 821 | * in the task stack here. |
| 822 | */ |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 823 | __do_softirq(); |
| 824 | rcu_note_context_switch(cpu); |
| 825 | local_irq_enable(); |
| 826 | cond_resched(); |
| 827 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | } |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 829 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | } |
| 831 | |
| 832 | #ifdef CONFIG_HOTPLUG_CPU |
| 833 | /* |
| 834 | * tasklet_kill_immediate is called to remove a tasklet which can already be |
| 835 | * scheduled for execution on @cpu. |
| 836 | * |
| 837 | * Unlike tasklet_kill, this function removes the tasklet |
| 838 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. |
| 839 | * |
| 840 | * When this function is called, @cpu must be in the CPU_DEAD state. |
| 841 | */ |
| 842 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) |
| 843 | { |
| 844 | struct tasklet_struct **i; |
| 845 | |
| 846 | BUG_ON(cpu_online(cpu)); |
| 847 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); |
| 848 | |
| 849 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) |
| 850 | return; |
| 851 | |
| 852 | /* CPU is dead, so no lock needed. */ |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 853 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | if (*i == t) { |
| 855 | *i = t->next; |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 856 | /* If this was the tail element, move the tail ptr */ |
| 857 | if (*i == NULL) |
| 858 | per_cpu(tasklet_vec, cpu).tail = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | return; |
| 860 | } |
| 861 | } |
| 862 | BUG(); |
| 863 | } |
| 864 | |
| 865 | static void takeover_tasklets(unsigned int cpu) |
| 866 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | /* CPU is dead, so no lock needed. */ |
| 868 | local_irq_disable(); |
| 869 | |
| 870 | /* Find end, append list for that CPU. */ |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 871 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 872 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
| 873 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 874 | per_cpu(tasklet_vec, cpu).head = NULL; |
| 875 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
| 876 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 878 | |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 879 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 880 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
| 881 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 882 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
| 883 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
| 884 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | raise_softirq_irqoff(HI_SOFTIRQ); |
| 886 | |
| 887 | local_irq_enable(); |
| 888 | } |
| 889 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 890 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 891 | static int cpu_callback(struct notifier_block *nfb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | unsigned long action, |
| 893 | void *hcpu) |
| 894 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | switch (action) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 | #ifdef CONFIG_HOTPLUG_CPU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | case CPU_DEAD: |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 898 | case CPU_DEAD_FROZEN: |
| 899 | takeover_tasklets((unsigned long)hcpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | break; |
| 901 | #endif /* CONFIG_HOTPLUG_CPU */ |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 902 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | return NOTIFY_OK; |
| 904 | } |
| 905 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 906 | static struct notifier_block cpu_nfb = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | .notifier_call = cpu_callback |
| 908 | }; |
| 909 | |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 910 | static struct smp_hotplug_thread softirq_threads = { |
| 911 | .store = &ksoftirqd, |
| 912 | .thread_should_run = ksoftirqd_should_run, |
| 913 | .thread_fn = run_ksoftirqd, |
| 914 | .thread_comm = "ksoftirqd/%u", |
| 915 | }; |
| 916 | |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 917 | static __init int spawn_ksoftirqd(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | register_cpu_notifier(&cpu_nfb); |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 920 | |
| 921 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
| 922 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | return 0; |
| 924 | } |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 925 | early_initcall(spawn_ksoftirqd); |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 926 | |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 927 | /* |
| 928 | * [ These __weak aliases are kept in a separate compilation unit, so that |
| 929 | * GCC does not inline them incorrectly. ] |
| 930 | */ |
| 931 | |
| 932 | int __init __weak early_irq_init(void) |
| 933 | { |
| 934 | return 0; |
| 935 | } |
| 936 | |
Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 937 | int __init __weak arch_probe_nr_irqs(void) |
| 938 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 939 | return NR_IRQS_LEGACY; |
Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 940 | } |
| 941 | |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 942 | int __init __weak arch_early_irq_init(void) |
| 943 | { |
| 944 | return 0; |
| 945 | } |