blob: 177de3640c782f7bcc8bd84b1254083e78b95bfe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Joe Perches40322762014-01-27 17:07:15 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Paul Gortmaker9984de12011-05-23 14:51:41 -040013#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000026#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080027#include <linux/tick.h>
Thomas Gleixnerd5326762014-03-19 11:19:52 +010028#include <linux/irq.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020029
30#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040031#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
Alexey Dobriyan978b0112008-09-06 20:04:36 +020056static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080058DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Joe Perchesce85b4f2014-01-27 17:07:16 -080060const char * const softirq_to_name[NR_SOFTIRQS] = {
Sagi Grimbergf660f602016-10-10 15:10:51 +030061 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
Shaohua Li09223372011-06-14 13:26:25 +080062 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040063};
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020071static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010074 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
80/*
Eric Dumazet4cd13c22016-08-31 10:42:29 -070081 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83 */
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
91/*
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070092 * preempt_count and SOFTIRQ_OFFSET usage:
93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
94 * softirq processing.
95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
96 * on local_bh_disable or local_bh_enable.
97 * This lets us distinguish between whether we are currently processing
98 * softirq and whether we just have bh disabled.
99 */
100
101/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
Tim Chen3c829c32006-07-30 03:04:02 -0700105#ifdef CONFIG_TRACE_IRQFLAGS
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700107{
108 unsigned long flags;
109
110 WARN_ON_ONCE(in_irq());
111
112 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500113 /*
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200114 * The preempt tracer hooks into preempt_count_add and will break
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500115 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116 * is set and before current->softirq_enabled is cleared.
117 * We must manually increment preempt_count here and manually
118 * call the trace_preempt_off later.
119 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200120 __preempt_count_add(cnt);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700121 /*
122 * Were softirqs turned off above:
123 */
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100124 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700125 trace_softirqs_off(ip);
126 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500127
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100128 if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100130 current->preempt_disable_ip = get_lock_parent_ip();
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100131#endif
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100132 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100133 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700134}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100135EXPORT_SYMBOL(__local_bh_disable_ip);
Tim Chen3c829c32006-07-30 03:04:02 -0700136#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700137
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700138static void __local_bh_enable(unsigned int cnt)
139{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100140 lockdep_assert_irqs_disabled();
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700141
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100142 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Davidlohr Buesod2e08472013-04-30 11:46:09 -0700143 trace_softirqs_on(_RET_IP_);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200144 preempt_count_sub(cnt);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700145}
146
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200154 WARN_ON_ONCE(in_irq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700155 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700156}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700157EXPORT_SYMBOL(_local_bh_enable);
158
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700160{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100161 WARN_ON_ONCE(in_irq());
162 lockdep_assert_irqs_enabled();
Tim Chen3c829c32006-07-30 03:04:02 -0700163#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200164 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700165#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700166 /*
167 * Are softirqs going to be turned on now:
168 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700169 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700170 trace_softirqs_on(ip);
171 /*
172 * Keep preemption disabled until we are done with
173 * softirq processing:
Joe Perchesce85b4f2014-01-27 17:07:16 -0800174 */
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100175 preempt_count_sub(cnt - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700176
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200177 if (unlikely(!in_interrupt() && local_softirq_pending())) {
178 /*
179 * Run softirq if any pending. And do it in its own stack
180 * as we may be calling this deep in a task call stack already.
181 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700182 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200183 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700184
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200185 preempt_count_dec();
Tim Chen3c829c32006-07-30 03:04:02 -0700186#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200187 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700188#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700189 preempt_check_resched();
190}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100191EXPORT_SYMBOL(__local_bh_enable_ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700192
193/*
Ben Greear34376a52013-06-06 14:29:49 -0700194 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
195 * but break the loop if need_resched() is set or after 2 ms.
196 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
197 * certain cases, such as stop_machine(), jiffies may cease to
198 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
199 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 *
Eric Dumazetc10d73672013-01-10 15:26:34 -0800201 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * The two things to balance is latency against fairness -
203 * we want to handle softirqs as soon as possible, but they
204 * should not be able to lock up the box.
205 */
Eric Dumazetc10d73672013-01-10 15:26:34 -0800206#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700207#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100209#ifdef CONFIG_TRACE_IRQFLAGS
210/*
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100211 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
212 * to keep the lockdep irq context tracking as tight as possible in order to
213 * not miss-qualify lock contexts and miss possible deadlocks.
214 */
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100215
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100216static inline bool lockdep_softirq_start(void)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100217{
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100218 bool in_hardirq = false;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100219
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100220 if (trace_hardirq_context(current)) {
221 in_hardirq = true;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100222 trace_hardirq_exit();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100223 }
224
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100225 lockdep_softirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100226
227 return in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100228}
229
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100230static inline void lockdep_softirq_end(bool in_hardirq)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100231{
232 lockdep_softirq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100233
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100234 if (in_hardirq)
235 trace_hardirq_enter();
236}
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100237#else
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100238static inline bool lockdep_softirq_start(void) { return false; }
239static inline void lockdep_softirq_end(bool in_hardirq) { }
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100240#endif
241
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700242asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
Eric Dumazetc10d73672013-01-10 15:26:34 -0800244 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Mel Gorman907aed42012-07-31 16:44:07 -0700245 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700246 int max_restart = MAX_SOFTIRQ_RESTART;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100247 struct softirq_action *h;
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100248 bool in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100249 __u32 pending;
Joe Perches2e702b92014-01-27 17:07:14 -0800250 int softirq_bit;
Mel Gorman907aed42012-07-31 16:44:07 -0700251
252 /*
253 * Mask out PF_MEMALLOC s current task context is borrowed for the
254 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
255 * again if the socket is related to swap
256 */
257 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
259 pending = local_softirq_pending();
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100260 account_irq_enter_time(current);
Paul Mackerras829035fd2006-07-03 00:25:40 -0700261
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100262 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100263 in_hardirq = lockdep_softirq_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265restart:
266 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200267 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700269 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 h = softirq_vec;
272
Joe Perches2e702b92014-01-27 17:07:14 -0800273 while ((softirq_bit = ffs(pending))) {
274 unsigned int vec_nr;
275 int prev_count;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200276
Joe Perches2e702b92014-01-27 17:07:14 -0800277 h += softirq_bit - 1;
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200278
Joe Perches2e702b92014-01-27 17:07:14 -0800279 vec_nr = h - softirq_vec;
280 prev_count = preempt_count();
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200281
Joe Perches2e702b92014-01-27 17:07:14 -0800282 kstat_incr_softirqs_this_cpu(vec_nr);
283
284 trace_softirq_entry(vec_nr);
285 h->action(h);
286 trace_softirq_exit(vec_nr);
287 if (unlikely(prev_count != preempt_count())) {
Joe Perches40322762014-01-27 17:07:15 -0800288 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
Joe Perches2e702b92014-01-27 17:07:14 -0800289 vec_nr, softirq_to_name[vec_nr], h->action,
290 prev_count, preempt_count());
291 preempt_count_set(prev_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293 h++;
Joe Perches2e702b92014-01-27 17:07:14 -0800294 pending >>= softirq_bit;
295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700297 rcu_bh_qs();
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700298 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300 pending = local_softirq_pending();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800301 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700302 if (time_before(jiffies, end) && !need_resched() &&
303 --max_restart)
Eric Dumazetc10d73672013-01-10 15:26:34 -0800304 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 wakeup_softirqd();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100309 lockdep_softirq_end(in_hardirq);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100310 account_irq_exit_time(current);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700311 __local_bh_enable(SOFTIRQ_OFFSET);
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200312 WARN_ON_ONCE(in_interrupt());
NeilBrown717a94b2017-04-07 10:03:26 +1000313 current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
Andi Kleen722a9f92014-05-02 00:44:38 +0200316asmlinkage __visible void do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
318 __u32 pending;
319 unsigned long flags;
320
321 if (in_interrupt())
322 return;
323
324 local_irq_save(flags);
325
326 pending = local_softirq_pending();
327
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700328 if (pending && !ksoftirqd_running())
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200329 do_softirq_own_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 local_irq_restore(flags);
332}
333
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800334/*
335 * Enter an interrupt context.
336 */
337void irq_enter(void)
338{
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100339 rcu_irq_enter();
Frederic Weisbecker0a8a2e72012-01-24 18:59:44 +0100340 if (is_idle_task(current) && !in_interrupt()) {
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700341 /*
342 * Prevent raise_softirq from needlessly waking up ksoftirqd
343 * here, as softirq will be serviced on return from interrupt.
344 */
345 local_bh_disable();
Frederic Weisbecker5acac1b2013-12-04 18:28:20 +0100346 tick_irq_enter();
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700347 _local_bh_enable();
348 }
349
350 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800351}
352
Heiko Carstensb2a00172012-03-05 15:07:25 -0800353static inline void invoke_softirq(void)
354{
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700355 if (ksoftirqd_running())
356 return;
357
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200358 if (!force_irqthreads) {
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200359#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200360 /*
361 * We can safely execute softirq on the current stack if
362 * it is the irq stack, because it should be near empty
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200363 * at this stage.
364 */
365 __do_softirq();
366#else
367 /*
368 * Otherwise, irq_exit() is called on the task stack that can
369 * be potentially deep already. So call softirq in its own stack
370 * to prevent from any overrun.
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200371 */
Frederic Weisbeckerbe6e1012013-09-24 16:39:41 +0200372 do_softirq_own_stack();
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200373#endif
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200374 } else {
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000375 wakeup_softirqd();
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200376 }
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000377}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200379static inline void tick_irq_exit(void)
380{
381#ifdef CONFIG_NO_HZ_COMMON
382 int cpu = smp_processor_id();
383
384 /* Make sure that timer wheel updates are propagated */
385 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
386 if (!in_interrupt())
387 tick_nohz_irq_exit();
388 }
389#endif
390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392/*
393 * Exit an interrupt context. Process softirqs if needed and possible:
394 */
395void irq_exit(void)
396{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100397#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100398 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100399#else
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100400 lockdep_assert_irqs_disabled();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100401#endif
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100402 account_irq_exit_time(current);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200403 preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 if (!in_interrupt() && local_softirq_pending())
405 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800406
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200407 tick_irq_exit();
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700408 rcu_irq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100409 trace_hardirq_exit(); /* must be last! */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
412/*
413 * This function must run with irqs disabled!
414 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800415inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
417 __raise_softirq_irqoff(nr);
418
419 /*
420 * If we're in an interrupt or softirq, we're done
421 * (this also catches softirq-disabled code). We will
422 * actually run the softirq once we return from
423 * the irq or softirq.
424 *
425 * Otherwise we wake up ksoftirqd to make sure we
426 * schedule the softirq soon.
427 */
428 if (!in_interrupt())
429 wakeup_softirqd();
430}
431
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800432void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 unsigned long flags;
435
436 local_irq_save(flags);
437 raise_softirq_irqoff(nr);
438 local_irq_restore(flags);
439}
440
Steven Rostedtf0696862012-01-25 20:18:55 -0500441void __raise_softirq_irqoff(unsigned int nr)
442{
443 trace_softirq_raise(nr);
444 or_softirq_pending(1UL << nr);
445}
446
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300447void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 softirq_vec[nr].action = action;
450}
451
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200452/*
453 * Tasklets
454 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800455struct tasklet_head {
Olof Johansson48f20a92008-03-04 15:23:25 -0800456 struct tasklet_struct *head;
457 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458};
459
Vegard Nossum4620b492008-06-12 23:21:53 +0200460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
Ingo Molnar6498dda2018-02-27 17:48:07 +0100463static void __tasklet_schedule_common(struct tasklet_struct *t,
464 struct tasklet_head __percpu *headp,
465 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100467 struct tasklet_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 unsigned long flags;
469
470 local_irq_save(flags);
Ingo Molnar6498dda2018-02-27 17:48:07 +0100471 head = this_cpu_ptr(headp);
Olof Johansson48f20a92008-03-04 15:23:25 -0800472 t->next = NULL;
Ingo Molnar6498dda2018-02-27 17:48:07 +0100473 *head->tail = t;
474 head->tail = &(t->next);
475 raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 local_irq_restore(flags);
477}
Ingo Molnar6498dda2018-02-27 17:48:07 +0100478
479void __tasklet_schedule(struct tasklet_struct *t)
480{
481 __tasklet_schedule_common(t, &tasklet_vec,
482 TASKLET_SOFTIRQ);
483}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484EXPORT_SYMBOL(__tasklet_schedule);
485
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800486void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100488 __tasklet_schedule_common(t, &tasklet_hi_vec,
489 HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491EXPORT_SYMBOL(__tasklet_hi_schedule);
492
Ingo Molnar82b691b2018-02-27 17:48:08 +0100493static void tasklet_action_common(struct softirq_action *a,
494 struct tasklet_head *tl_head,
495 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496{
497 struct tasklet_struct *list;
498
499 local_irq_disable();
Ingo Molnar82b691b2018-02-27 17:48:08 +0100500 list = tl_head->head;
501 tl_head->head = NULL;
502 tl_head->tail = &tl_head->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 local_irq_enable();
504
505 while (list) {
506 struct tasklet_struct *t = list;
507
508 list = list->next;
509
510 if (tasklet_trylock(t)) {
511 if (!atomic_read(&t->count)) {
Joe Perchesce85b4f2014-01-27 17:07:16 -0800512 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
513 &t->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 BUG();
515 t->func(t->data);
516 tasklet_unlock(t);
517 continue;
518 }
519 tasklet_unlock(t);
520 }
521
522 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800523 t->next = NULL;
Ingo Molnar82b691b2018-02-27 17:48:08 +0100524 *tl_head->tail = t;
525 tl_head->tail = &t->next;
526 __raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 local_irq_enable();
528 }
529}
530
Ingo Molnar82b691b2018-02-27 17:48:08 +0100531static __latent_entropy void tasklet_action(struct softirq_action *a)
532{
533 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
534}
535
Emese Revfy0766f782016-06-20 20:42:34 +0200536static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
Ingo Molnar82b691b2018-02-27 17:48:08 +0100538 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539}
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541void tasklet_init(struct tasklet_struct *t,
542 void (*func)(unsigned long), unsigned long data)
543{
544 t->next = NULL;
545 t->state = 0;
546 atomic_set(&t->count, 0);
547 t->func = func;
548 t->data = data;
549}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550EXPORT_SYMBOL(tasklet_init);
551
552void tasklet_kill(struct tasklet_struct *t)
553{
554 if (in_interrupt())
Joe Perches40322762014-01-27 17:07:15 -0800555 pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400558 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400560 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
562 tasklet_unlock_wait(t);
563 clear_bit(TASKLET_STATE_SCHED, &t->state);
564}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565EXPORT_SYMBOL(tasklet_kill);
566
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200567/*
568 * tasklet_hrtimer
569 */
570
571/*
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100572 * The trampoline is called when the hrtimer expires. It schedules a tasklet
573 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
574 * hrtimer callback, but from softirq context.
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200575 */
576static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
577{
578 struct tasklet_hrtimer *ttimer =
579 container_of(timer, struct tasklet_hrtimer, timer);
580
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100581 tasklet_hi_schedule(&ttimer->tasklet);
582 return HRTIMER_NORESTART;
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200583}
584
585/*
586 * Helper function which calls the hrtimer callback from
587 * tasklet/softirq context
588 */
589static void __tasklet_hrtimer_trampoline(unsigned long data)
590{
591 struct tasklet_hrtimer *ttimer = (void *)data;
592 enum hrtimer_restart restart;
593
594 restart = ttimer->function(&ttimer->timer);
595 if (restart != HRTIMER_NORESTART)
596 hrtimer_restart(&ttimer->timer);
597}
598
599/**
600 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
601 * @ttimer: tasklet_hrtimer which is initialized
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300602 * @function: hrtimer callback function which gets called from softirq context
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200603 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
604 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
605 */
606void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
607 enum hrtimer_restart (*function)(struct hrtimer *),
608 clockid_t which_clock, enum hrtimer_mode mode)
609{
610 hrtimer_init(&ttimer->timer, which_clock, mode);
611 ttimer->timer.function = __hrtimer_tasklet_trampoline;
612 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
613 (unsigned long)ttimer);
614 ttimer->function = function;
615}
616EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618void __init softirq_init(void)
619{
Olof Johansson48f20a92008-03-04 15:23:25 -0800620 int cpu;
621
622 for_each_possible_cpu(cpu) {
623 per_cpu(tasklet_vec, cpu).tail =
624 &per_cpu(tasklet_vec, cpu).head;
625 per_cpu(tasklet_hi_vec, cpu).tail =
626 &per_cpu(tasklet_hi_vec, cpu).head;
627 }
628
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300629 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
630 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000633static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000635 return local_softirq_pending();
636}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000638static void run_ksoftirqd(unsigned int cpu)
639{
640 local_irq_disable();
641 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200642 /*
643 * We can safely run softirq on inline stack, as we are not deep
644 * in the task stack here.
645 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000646 __do_softirq();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000647 local_irq_enable();
Paul E. McKenneyedf22f42017-10-24 08:31:12 -0700648 cond_resched();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000649 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 }
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000651 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
654#ifdef CONFIG_HOTPLUG_CPU
655/*
656 * tasklet_kill_immediate is called to remove a tasklet which can already be
657 * scheduled for execution on @cpu.
658 *
659 * Unlike tasklet_kill, this function removes the tasklet
660 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
661 *
662 * When this function is called, @cpu must be in the CPU_DEAD state.
663 */
664void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
665{
666 struct tasklet_struct **i;
667
668 BUG_ON(cpu_online(cpu));
669 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
670
671 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
672 return;
673
674 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800675 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (*i == t) {
677 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800678 /* If this was the tail element, move the tail ptr */
679 if (*i == NULL)
680 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 return;
682 }
683 }
684 BUG();
685}
686
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200687static int takeover_tasklets(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 /* CPU is dead, so no lock needed. */
690 local_irq_disable();
691
692 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700693 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100694 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
695 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700696 per_cpu(tasklet_vec, cpu).head = NULL;
697 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 raise_softirq_irqoff(TASKLET_SOFTIRQ);
700
Christian Borntraegere5e41722008-05-01 04:34:23 -0700701 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100702 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
703 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700704 per_cpu(tasklet_hi_vec, cpu).head = NULL;
705 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 raise_softirq_irqoff(HI_SOFTIRQ);
708
709 local_irq_enable();
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200710 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200712#else
713#define takeover_tasklets NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714#endif /* CONFIG_HOTPLUG_CPU */
715
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000716static struct smp_hotplug_thread softirq_threads = {
717 .store = &ksoftirqd,
718 .thread_should_run = ksoftirqd_should_run,
719 .thread_fn = run_ksoftirqd,
720 .thread_comm = "ksoftirqd/%u",
721};
722
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700723static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200725 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
726 takeover_tasklets);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000727 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 return 0;
730}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700731early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800732
Yinghai Lu43a25632008-12-28 16:01:13 -0800733/*
734 * [ These __weak aliases are kept in a separate compilation unit, so that
735 * GCC does not inline them incorrectly. ]
736 */
737
738int __init __weak early_irq_init(void)
739{
740 return 0;
741}
742
Yinghai Lu4a046d12009-01-12 17:39:24 -0800743int __init __weak arch_probe_nr_irqs(void)
744{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200745 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800746}
747
Yinghai Lu43a25632008-12-28 16:01:13 -0800748int __init __weak arch_early_irq_init(void)
749{
750 return 0;
751}
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200752
753unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
754{
755 return from;
756}