blob: ba89ca77698ab62f01988bdeeb8afb8347e72776 [file] [log] [blame]
Thomas Gleixner767a67b2019-06-01 10:08:44 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01007 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Joe Perches40322762014-01-27 17:07:15 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Paul Gortmaker9984de12011-05-23 14:51:41 -040012#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070020#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kthread.h>
22#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050023#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080024#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000025#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include <linux/tick.h>
Thomas Gleixnerd5326762014-03-19 11:19:52 +010027#include <linux/irq.h>
Peter Zijlstrada0447472021-03-09 09:42:08 +010028#include <linux/wait_bit.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020029
Thomas Gleixnerdb1cc7a2021-02-10 00:40:53 +010030#include <asm/softirq_stack.h>
31
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020032#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040033#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/*
36 - No shared variables, all the data are CPU local.
37 - If a softirq needs serialization, let it serialize itself
38 by its own spinlocks.
39 - Even if softirq is serialized, only local cpu is marked for
40 execution. Hence, we get something sort of weak cpu binding.
41 Though it is still not clear, will it result in better locality
42 or will not.
43
44 Examples:
45 - NET RX softirq. It is multithreaded and does not require
46 any global serialization.
47 - NET TX softirq. It kicks software netdevice queues, hence
48 it is logically serialized per device, but this serialization
49 is invisible to common code.
50 - Tasklets: serialized wrt itself.
51 */
52
53#ifndef __ARCH_IRQ_STAT
Frederic Weisbecker0f6f47b2018-05-08 15:38:19 +020054DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
55EXPORT_PER_CPU_SYMBOL(irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#endif
57
Alexey Dobriyan978b0112008-09-06 20:04:36 +020058static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080060DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Joe Perchesce85b4f2014-01-27 17:07:16 -080062const char * const softirq_to_name[NR_SOFTIRQS] = {
Sagi Grimbergf660f602016-10-10 15:10:51 +030063 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
Shaohua Li09223372011-06-14 13:26:25 +080064 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040065};
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * we cannot loop indefinitely here to avoid userspace starvation,
69 * but we also don't want to introduce a worst case 1/HZ latency
70 * to the pending events, so lets the scheduler to balance
71 * the softirq load for us.
72 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020073static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010076 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78 if (tsk && tsk->state != TASK_RUNNING)
79 wake_up_process(tsk);
80}
81
82/*
Eric Dumazet4cd13c22016-08-31 10:42:29 -070083 * If ksoftirqd is scheduled, we do not want to process pending softirqs
Linus Torvalds3c537762018-01-08 11:51:04 -080084 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
85 * unless we're doing some of the synchronous softirqs.
Eric Dumazet4cd13c22016-08-31 10:42:29 -070086 */
Linus Torvalds3c537762018-01-08 11:51:04 -080087#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
88static bool ksoftirqd_running(unsigned long pending)
Eric Dumazet4cd13c22016-08-31 10:42:29 -070089{
90 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
91
Linus Torvalds3c537762018-01-08 11:51:04 -080092 if (pending & SOFTIRQ_NOW_MASK)
93 return false;
Matthias Kaehlcke1342d802019-01-28 15:46:25 -080094 return tsk && (tsk->state == TASK_RUNNING) &&
95 !__kthread_should_park(tsk);
Eric Dumazet4cd13c22016-08-31 10:42:29 -070096}
97
Thomas Gleixnerae9ef582020-11-13 15:02:18 +010098#ifdef CONFIG_TRACE_IRQFLAGS
99DEFINE_PER_CPU(int, hardirqs_enabled);
100DEFINE_PER_CPU(int, hardirq_context);
101EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
102EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
103#endif
104
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700105/*
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700106 * preempt_count and SOFTIRQ_OFFSET usage:
107 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
108 * softirq processing.
109 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
110 * on local_bh_disable or local_bh_enable.
111 * This lets us distinguish between whether we are currently processing
112 * softirq and whether we just have bh disabled.
113 */
114
Tim Chen3c829c32006-07-30 03:04:02 -0700115#ifdef CONFIG_TRACE_IRQFLAGS
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100116/*
117 * This is for softirq.c-internal use, where hardirqs are disabled
118 * legitimately:
119 */
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100120void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700121{
122 unsigned long flags;
123
124 WARN_ON_ONCE(in_irq());
125
126 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500127 /*
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200128 * The preempt tracer hooks into preempt_count_add and will break
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500129 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
130 * is set and before current->softirq_enabled is cleared.
131 * We must manually increment preempt_count here and manually
132 * call the trace_preempt_off later.
133 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200134 __preempt_count_add(cnt);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700135 /*
136 * Were softirqs turned off above:
137 */
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100138 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100139 lockdep_softirqs_off(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700140 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500141
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100142 if (preempt_count() == cnt) {
143#ifdef CONFIG_DEBUG_PREEMPT
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100144 current->preempt_disable_ip = get_lock_parent_ip();
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100145#endif
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100146 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100147 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700148}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100149EXPORT_SYMBOL(__local_bh_disable_ip);
Tim Chen3c829c32006-07-30 03:04:02 -0700150#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700151
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700152static void __local_bh_enable(unsigned int cnt)
153{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100154 lockdep_assert_irqs_disabled();
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700155
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700156 if (preempt_count() == cnt)
157 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
158
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100159 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100160 lockdep_softirqs_on(_RET_IP_);
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700161
162 __preempt_count_sub(cnt);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700163}
164
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700165/*
Paul E. McKenneyc3442692018-03-05 11:29:40 -0800166 * Special-case - softirqs can safely be enabled by __do_softirq(),
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700167 * without processing still-pending softirqs:
168 */
169void _local_bh_enable(void)
170{
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200171 WARN_ON_ONCE(in_irq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700172 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700173}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700174EXPORT_SYMBOL(_local_bh_enable);
175
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100176void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700177{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100178 WARN_ON_ONCE(in_irq());
179 lockdep_assert_irqs_enabled();
Tim Chen3c829c32006-07-30 03:04:02 -0700180#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200181 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700182#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700183 /*
184 * Are softirqs going to be turned on now:
185 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700186 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Peter Zijlstra0d384532020-03-20 12:56:41 +0100187 lockdep_softirqs_on(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700188 /*
189 * Keep preemption disabled until we are done with
190 * softirq processing:
Joe Perchesce85b4f2014-01-27 17:07:16 -0800191 */
Peter Zijlstra91ea62d2020-12-18 16:39:14 +0100192 __preempt_count_sub(cnt - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700193
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200194 if (unlikely(!in_interrupt() && local_softirq_pending())) {
195 /*
196 * Run softirq if any pending. And do it in its own stack
197 * as we may be calling this deep in a task call stack already.
198 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700199 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200200 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700201
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200202 preempt_count_dec();
Tim Chen3c829c32006-07-30 03:04:02 -0700203#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200204 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700205#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700206 preempt_check_resched();
207}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100208EXPORT_SYMBOL(__local_bh_enable_ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700209
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100210static inline void invoke_softirq(void)
211{
212 if (ksoftirqd_running(local_softirq_pending()))
213 return;
214
215 if (!force_irqthreads) {
216#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
217 /*
218 * We can safely execute softirq on the current stack if
219 * it is the irq stack, because it should be near empty
220 * at this stage.
221 */
222 __do_softirq();
223#else
224 /*
225 * Otherwise, irq_exit() is called on the task stack that can
226 * be potentially deep already. So call softirq in its own stack
227 * to prevent from any overrun.
228 */
229 do_softirq_own_stack();
230#endif
231 } else {
232 wakeup_softirqd();
233 }
234}
235
236asmlinkage __visible void do_softirq(void)
237{
238 __u32 pending;
239 unsigned long flags;
240
241 if (in_interrupt())
242 return;
243
244 local_irq_save(flags);
245
246 pending = local_softirq_pending();
247
248 if (pending && !ksoftirqd_running(pending))
249 do_softirq_own_stack();
250
251 local_irq_restore(flags);
252}
253
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700254/*
Ben Greear34376a52013-06-06 14:29:49 -0700255 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
256 * but break the loop if need_resched() is set or after 2 ms.
257 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
258 * certain cases, such as stop_machine(), jiffies may cease to
259 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
260 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 *
Eric Dumazetc10d73672013-01-10 15:26:34 -0800262 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 * The two things to balance is latency against fairness -
264 * we want to handle softirqs as soon as possible, but they
265 * should not be able to lock up the box.
266 */
Eric Dumazetc10d73672013-01-10 15:26:34 -0800267#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700268#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100270#ifdef CONFIG_TRACE_IRQFLAGS
271/*
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100272 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
273 * to keep the lockdep irq context tracking as tight as possible in order to
274 * not miss-qualify lock contexts and miss possible deadlocks.
275 */
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100276
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100277static inline bool lockdep_softirq_start(void)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100278{
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100279 bool in_hardirq = false;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100280
Peter Zijlstraf9ad4a52020-05-27 13:03:26 +0200281 if (lockdep_hardirq_context()) {
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100282 in_hardirq = true;
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100283 lockdep_hardirq_exit();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100284 }
285
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100286 lockdep_softirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100287
288 return in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100289}
290
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100291static inline void lockdep_softirq_end(bool in_hardirq)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100292{
293 lockdep_softirq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100294
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100295 if (in_hardirq)
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100296 lockdep_hardirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100297}
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100298#else
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100299static inline bool lockdep_softirq_start(void) { return false; }
300static inline void lockdep_softirq_end(bool in_hardirq) { }
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100301#endif
302
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700303asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
Eric Dumazetc10d73672013-01-10 15:26:34 -0800305 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Mel Gorman907aed42012-07-31 16:44:07 -0700306 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700307 int max_restart = MAX_SOFTIRQ_RESTART;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100308 struct softirq_action *h;
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100309 bool in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100310 __u32 pending;
Joe Perches2e702b92014-01-27 17:07:14 -0800311 int softirq_bit;
Mel Gorman907aed42012-07-31 16:44:07 -0700312
313 /*
Yangtao Lie45506a2018-10-18 10:21:33 -0400314 * Mask out PF_MEMALLOC as the current task context is borrowed for the
315 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
316 * again if the socket is related to swapping.
Mel Gorman907aed42012-07-31 16:44:07 -0700317 */
318 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700321
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100322 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100323 in_hardirq = lockdep_softirq_start();
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100324 account_softirq_enter(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326restart:
327 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200328 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700330 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 h = softirq_vec;
333
Joe Perches2e702b92014-01-27 17:07:14 -0800334 while ((softirq_bit = ffs(pending))) {
335 unsigned int vec_nr;
336 int prev_count;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200337
Joe Perches2e702b92014-01-27 17:07:14 -0800338 h += softirq_bit - 1;
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200339
Joe Perches2e702b92014-01-27 17:07:14 -0800340 vec_nr = h - softirq_vec;
341 prev_count = preempt_count();
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200342
Joe Perches2e702b92014-01-27 17:07:14 -0800343 kstat_incr_softirqs_this_cpu(vec_nr);
344
345 trace_softirq_entry(vec_nr);
346 h->action(h);
347 trace_softirq_exit(vec_nr);
348 if (unlikely(prev_count != preempt_count())) {
Joe Perches40322762014-01-27 17:07:15 -0800349 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
Joe Perches2e702b92014-01-27 17:07:14 -0800350 vec_nr, softirq_to_name[vec_nr], h->action,
351 prev_count, preempt_count());
352 preempt_count_set(prev_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
354 h++;
Joe Perches2e702b92014-01-27 17:07:14 -0800355 pending >>= softirq_bit;
356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700358 if (__this_cpu_read(ksoftirqd) == current)
359 rcu_softirq_qs();
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700360 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 pending = local_softirq_pending();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800363 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700364 if (time_before(jiffies, end) && !need_resched() &&
365 --max_restart)
Eric Dumazetc10d73672013-01-10 15:26:34 -0800366 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 wakeup_softirqd();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100371 account_softirq_exit(current);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100372 lockdep_softirq_end(in_hardirq);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700373 __local_bh_enable(SOFTIRQ_OFFSET);
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200374 WARN_ON_ONCE(in_interrupt());
NeilBrown717a94b2017-04-07 10:03:26 +1000375 current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200378/**
379 * irq_enter_rcu - Enter an interrupt context with RCU watching
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800380 */
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200381void irq_enter_rcu(void)
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800382{
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100383 __irq_enter_raw();
384
385 if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
Frederic Weisbecker5acac1b2013-12-04 18:28:20 +0100386 tick_irq_enter();
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100387
388 account_hardirq_enter(current);
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800389}
390
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200391/**
392 * irq_enter - Enter an interrupt context including RCU update
393 */
394void irq_enter(void)
395{
396 rcu_irq_enter();
397 irq_enter_rcu();
398}
399
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200400static inline void tick_irq_exit(void)
401{
402#ifdef CONFIG_NO_HZ_COMMON
403 int cpu = smp_processor_id();
404
405 /* Make sure that timer wheel updates are propagated */
406 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
Frederic Weisbecker0a0e0822018-08-03 15:31:34 +0200407 if (!in_irq())
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200408 tick_nohz_irq_exit();
409 }
410#endif
411}
412
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200413static inline void __irq_exit_rcu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100415#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100416 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100417#else
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100418 lockdep_assert_irqs_disabled();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100419#endif
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100420 account_hardirq_exit(current);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200421 preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 if (!in_interrupt() && local_softirq_pending())
423 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800424
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200425 tick_irq_exit();
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200426}
427
428/**
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200429 * irq_exit_rcu() - Exit an interrupt context without updating RCU
430 *
431 * Also processes softirqs if needed and possible.
432 */
433void irq_exit_rcu(void)
434{
435 __irq_exit_rcu();
436 /* must be last! */
437 lockdep_hardirq_exit();
438}
439
440/**
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200441 * irq_exit - Exit an interrupt context, update RCU and lockdep
442 *
443 * Also processes softirqs if needed and possible.
444 */
445void irq_exit(void)
446{
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200447 __irq_exit_rcu();
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700448 rcu_irq_exit();
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100449 /* must be last! */
450 lockdep_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451}
452
453/*
454 * This function must run with irqs disabled!
455 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800456inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457{
458 __raise_softirq_irqoff(nr);
459
460 /*
461 * If we're in an interrupt or softirq, we're done
462 * (this also catches softirq-disabled code). We will
463 * actually run the softirq once we return from
464 * the irq or softirq.
465 *
466 * Otherwise we wake up ksoftirqd to make sure we
467 * schedule the softirq soon.
468 */
469 if (!in_interrupt())
470 wakeup_softirqd();
471}
472
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800473void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
475 unsigned long flags;
476
477 local_irq_save(flags);
478 raise_softirq_irqoff(nr);
479 local_irq_restore(flags);
480}
481
Steven Rostedtf0696862012-01-25 20:18:55 -0500482void __raise_softirq_irqoff(unsigned int nr)
483{
Jiafei Pancdabce2e2020-08-14 12:55:22 +0800484 lockdep_assert_irqs_disabled();
Steven Rostedtf0696862012-01-25 20:18:55 -0500485 trace_softirq_raise(nr);
486 or_softirq_pending(1UL << nr);
487}
488
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300489void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 softirq_vec[nr].action = action;
492}
493
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200494/*
495 * Tasklets
496 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800497struct tasklet_head {
Olof Johansson48f20a92008-03-04 15:23:25 -0800498 struct tasklet_struct *head;
499 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500};
501
Vegard Nossum4620b492008-06-12 23:21:53 +0200502static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
503static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Ingo Molnar6498dda2018-02-27 17:48:07 +0100505static void __tasklet_schedule_common(struct tasklet_struct *t,
506 struct tasklet_head __percpu *headp,
507 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100509 struct tasklet_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 unsigned long flags;
511
512 local_irq_save(flags);
Ingo Molnar6498dda2018-02-27 17:48:07 +0100513 head = this_cpu_ptr(headp);
Olof Johansson48f20a92008-03-04 15:23:25 -0800514 t->next = NULL;
Ingo Molnar6498dda2018-02-27 17:48:07 +0100515 *head->tail = t;
516 head->tail = &(t->next);
517 raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 local_irq_restore(flags);
519}
Ingo Molnar6498dda2018-02-27 17:48:07 +0100520
521void __tasklet_schedule(struct tasklet_struct *t)
522{
523 __tasklet_schedule_common(t, &tasklet_vec,
524 TASKLET_SOFTIRQ);
525}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526EXPORT_SYMBOL(__tasklet_schedule);
527
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800528void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100530 __tasklet_schedule_common(t, &tasklet_hi_vec,
531 HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533EXPORT_SYMBOL(__tasklet_hi_schedule);
534
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100535static bool tasklet_clear_sched(struct tasklet_struct *t)
Dirk Behme6b2c3392021-03-17 11:20:12 +0100536{
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100537 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
538 wake_up_var(&t->state);
Dirk Behme6b2c3392021-03-17 11:20:12 +0100539 return true;
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100540 }
Dirk Behme6b2c3392021-03-17 11:20:12 +0100541
542 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
543 t->use_callback ? "callback" : "func",
544 t->use_callback ? (void *)t->callback : (void *)t->func);
545
546 return false;
547}
548
Ingo Molnar82b691b2018-02-27 17:48:08 +0100549static void tasklet_action_common(struct softirq_action *a,
550 struct tasklet_head *tl_head,
551 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
553 struct tasklet_struct *list;
554
555 local_irq_disable();
Ingo Molnar82b691b2018-02-27 17:48:08 +0100556 list = tl_head->head;
557 tl_head->head = NULL;
558 tl_head->tail = &tl_head->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 local_irq_enable();
560
561 while (list) {
562 struct tasklet_struct *t = list;
563
564 list = list->next;
565
566 if (tasklet_trylock(t)) {
567 if (!atomic_read(&t->count)) {
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100568 if (tasklet_clear_sched(t)) {
Dirk Behme6b2c3392021-03-17 11:20:12 +0100569 if (t->use_callback)
570 t->callback(t);
571 else
572 t->func(t->data);
573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 tasklet_unlock(t);
575 continue;
576 }
577 tasklet_unlock(t);
578 }
579
580 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800581 t->next = NULL;
Ingo Molnar82b691b2018-02-27 17:48:08 +0100582 *tl_head->tail = t;
583 tl_head->tail = &t->next;
584 __raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 local_irq_enable();
586 }
587}
588
Ingo Molnar82b691b2018-02-27 17:48:08 +0100589static __latent_entropy void tasklet_action(struct softirq_action *a)
590{
591 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
592}
593
Emese Revfy0766f782016-06-20 20:42:34 +0200594static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
Ingo Molnar82b691b2018-02-27 17:48:08 +0100596 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
Romain Perier12cc9232019-09-29 18:30:13 +0200599void tasklet_setup(struct tasklet_struct *t,
600 void (*callback)(struct tasklet_struct *))
601{
602 t->next = NULL;
603 t->state = 0;
604 atomic_set(&t->count, 0);
605 t->callback = callback;
606 t->use_callback = true;
607 t->data = 0;
608}
609EXPORT_SYMBOL(tasklet_setup);
610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611void tasklet_init(struct tasklet_struct *t,
612 void (*func)(unsigned long), unsigned long data)
613{
614 t->next = NULL;
615 t->state = 0;
616 atomic_set(&t->count, 0);
617 t->func = func;
Romain Perier12cc9232019-09-29 18:30:13 +0200618 t->use_callback = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 t->data = data;
620}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621EXPORT_SYMBOL(tasklet_init);
622
623void tasklet_kill(struct tasklet_struct *t)
624{
625 if (in_interrupt())
Joe Perches40322762014-01-27 17:07:15 -0800626 pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100628 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
629 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 tasklet_unlock_wait(t);
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100632 tasklet_clear_sched(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634EXPORT_SYMBOL(tasklet_kill);
635
Peter Zijlstrada0447472021-03-09 09:42:08 +0100636#ifdef CONFIG_SMP
637void tasklet_unlock(struct tasklet_struct *t)
638{
639 smp_mb__before_atomic();
640 clear_bit(TASKLET_STATE_RUN, &t->state);
641 smp_mb__after_atomic();
642 wake_up_var(&t->state);
643}
644EXPORT_SYMBOL_GPL(tasklet_unlock);
645
646void tasklet_unlock_wait(struct tasklet_struct *t)
647{
648 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
649}
650EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
651#endif
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653void __init softirq_init(void)
654{
Olof Johansson48f20a92008-03-04 15:23:25 -0800655 int cpu;
656
657 for_each_possible_cpu(cpu) {
658 per_cpu(tasklet_vec, cpu).tail =
659 &per_cpu(tasklet_vec, cpu).head;
660 per_cpu(tasklet_hi_vec, cpu).tail =
661 &per_cpu(tasklet_hi_vec, cpu).head;
662 }
663
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300664 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
665 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000668static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000670 return local_softirq_pending();
671}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000673static void run_ksoftirqd(unsigned int cpu)
674{
675 local_irq_disable();
676 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200677 /*
678 * We can safely run softirq on inline stack, as we are not deep
679 * in the task stack here.
680 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000681 __do_softirq();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000682 local_irq_enable();
Paul E. McKenneyedf22f42017-10-24 08:31:12 -0700683 cond_resched();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000684 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000686 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687}
688
689#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200690static int takeover_tasklets(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 /* CPU is dead, so no lock needed. */
693 local_irq_disable();
694
695 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700696 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100697 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
Muchun Song8afecaa2019-06-18 22:33:05 +0800698 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700699 per_cpu(tasklet_vec, cpu).head = NULL;
700 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 raise_softirq_irqoff(TASKLET_SOFTIRQ);
703
Christian Borntraegere5e41722008-05-01 04:34:23 -0700704 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100705 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
706 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700707 per_cpu(tasklet_hi_vec, cpu).head = NULL;
708 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 raise_softirq_irqoff(HI_SOFTIRQ);
711
712 local_irq_enable();
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200713 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200715#else
716#define takeover_tasklets NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717#endif /* CONFIG_HOTPLUG_CPU */
718
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000719static struct smp_hotplug_thread softirq_threads = {
720 .store = &ksoftirqd,
721 .thread_should_run = ksoftirqd_should_run,
722 .thread_fn = run_ksoftirqd,
723 .thread_comm = "ksoftirqd/%u",
724};
725
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700726static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200728 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
729 takeover_tasklets);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000730 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 return 0;
733}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700734early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800735
Yinghai Lu43a25632008-12-28 16:01:13 -0800736/*
737 * [ These __weak aliases are kept in a separate compilation unit, so that
738 * GCC does not inline them incorrectly. ]
739 */
740
741int __init __weak early_irq_init(void)
742{
743 return 0;
744}
745
Yinghai Lu4a046d12009-01-12 17:39:24 -0800746int __init __weak arch_probe_nr_irqs(void)
747{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200748 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800749}
750
Yinghai Lu43a25632008-12-28 16:01:13 -0800751int __init __weak arch_early_irq_init(void)
752{
753 return 0;
754}
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200755
756unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
757{
758 return from;
759}