blob: 9908ec4a9bfed905b500e5a0040553acbb986ce1 [file] [log] [blame]
Thomas Gleixner767a67b2019-06-01 10:08:44 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01007 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Joe Perches40322762014-01-27 17:07:15 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Paul Gortmaker9984de12011-05-23 14:51:41 -040012#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070020#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kthread.h>
22#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050023#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080024#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000025#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include <linux/tick.h>
Thomas Gleixnerd5326762014-03-19 11:19:52 +010027#include <linux/irq.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020028
Thomas Gleixnerdb1cc7a2021-02-10 00:40:53 +010029#include <asm/softirq_stack.h>
30
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020031#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040032#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
35 - No shared variables, all the data are CPU local.
36 - If a softirq needs serialization, let it serialize itself
37 by its own spinlocks.
38 - Even if softirq is serialized, only local cpu is marked for
39 execution. Hence, we get something sort of weak cpu binding.
40 Though it is still not clear, will it result in better locality
41 or will not.
42
43 Examples:
44 - NET RX softirq. It is multithreaded and does not require
45 any global serialization.
46 - NET TX softirq. It kicks software netdevice queues, hence
47 it is logically serialized per device, but this serialization
48 is invisible to common code.
49 - Tasklets: serialized wrt itself.
50 */
51
52#ifndef __ARCH_IRQ_STAT
Frederic Weisbecker0f6f47b2018-05-08 15:38:19 +020053DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
54EXPORT_PER_CPU_SYMBOL(irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Alexey Dobriyan978b0112008-09-06 20:04:36 +020057static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080059DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Joe Perchesce85b4f2014-01-27 17:07:16 -080061const char * const softirq_to_name[NR_SOFTIRQS] = {
Sagi Grimbergf660f602016-10-10 15:10:51 +030062 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
Shaohua Li09223372011-06-14 13:26:25 +080063 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040064};
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/*
67 * we cannot loop indefinitely here to avoid userspace starvation,
68 * but we also don't want to introduce a worst case 1/HZ latency
69 * to the pending events, so lets the scheduler to balance
70 * the softirq load for us.
71 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020072static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010075 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77 if (tsk && tsk->state != TASK_RUNNING)
78 wake_up_process(tsk);
79}
80
81/*
Eric Dumazet4cd13c22016-08-31 10:42:29 -070082 * If ksoftirqd is scheduled, we do not want to process pending softirqs
Linus Torvalds3c537762018-01-08 11:51:04 -080083 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
84 * unless we're doing some of the synchronous softirqs.
Eric Dumazet4cd13c22016-08-31 10:42:29 -070085 */
Linus Torvalds3c537762018-01-08 11:51:04 -080086#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
87static bool ksoftirqd_running(unsigned long pending)
Eric Dumazet4cd13c22016-08-31 10:42:29 -070088{
89 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
90
Linus Torvalds3c537762018-01-08 11:51:04 -080091 if (pending & SOFTIRQ_NOW_MASK)
92 return false;
Matthias Kaehlcke1342d802019-01-28 15:46:25 -080093 return tsk && (tsk->state == TASK_RUNNING) &&
94 !__kthread_should_park(tsk);
Eric Dumazet4cd13c22016-08-31 10:42:29 -070095}
96
Thomas Gleixnerae9ef582020-11-13 15:02:18 +010097#ifdef CONFIG_TRACE_IRQFLAGS
98DEFINE_PER_CPU(int, hardirqs_enabled);
99DEFINE_PER_CPU(int, hardirq_context);
100EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
101EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
102#endif
103
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700104/*
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700105 * preempt_count and SOFTIRQ_OFFSET usage:
106 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
107 * softirq processing.
108 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
109 * on local_bh_disable or local_bh_enable.
110 * This lets us distinguish between whether we are currently processing
111 * softirq and whether we just have bh disabled.
112 */
113
Tim Chen3c829c32006-07-30 03:04:02 -0700114#ifdef CONFIG_TRACE_IRQFLAGS
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100115/*
116 * This is for softirq.c-internal use, where hardirqs are disabled
117 * legitimately:
118 */
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100119void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700120{
121 unsigned long flags;
122
123 WARN_ON_ONCE(in_irq());
124
125 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500126 /*
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200127 * The preempt tracer hooks into preempt_count_add and will break
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500128 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
129 * is set and before current->softirq_enabled is cleared.
130 * We must manually increment preempt_count here and manually
131 * call the trace_preempt_off later.
132 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200133 __preempt_count_add(cnt);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700134 /*
135 * Were softirqs turned off above:
136 */
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100137 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100138 lockdep_softirqs_off(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700139 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500140
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100141 if (preempt_count() == cnt) {
142#ifdef CONFIG_DEBUG_PREEMPT
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100143 current->preempt_disable_ip = get_lock_parent_ip();
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100144#endif
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100145 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100146 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700147}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100148EXPORT_SYMBOL(__local_bh_disable_ip);
Tim Chen3c829c32006-07-30 03:04:02 -0700149#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700150
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700151static void __local_bh_enable(unsigned int cnt)
152{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100153 lockdep_assert_irqs_disabled();
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700154
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700155 if (preempt_count() == cnt)
156 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
157
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100158 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100159 lockdep_softirqs_on(_RET_IP_);
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700160
161 __preempt_count_sub(cnt);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700162}
163
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700164/*
Paul E. McKenneyc3442692018-03-05 11:29:40 -0800165 * Special-case - softirqs can safely be enabled by __do_softirq(),
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700166 * without processing still-pending softirqs:
167 */
168void _local_bh_enable(void)
169{
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200170 WARN_ON_ONCE(in_irq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700171 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700172}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700173EXPORT_SYMBOL(_local_bh_enable);
174
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100175void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700176{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100177 WARN_ON_ONCE(in_irq());
178 lockdep_assert_irqs_enabled();
Tim Chen3c829c32006-07-30 03:04:02 -0700179#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200180 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700181#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700182 /*
183 * Are softirqs going to be turned on now:
184 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700185 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Peter Zijlstra0d384532020-03-20 12:56:41 +0100186 lockdep_softirqs_on(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700187 /*
188 * Keep preemption disabled until we are done with
189 * softirq processing:
Joe Perchesce85b4f2014-01-27 17:07:16 -0800190 */
Peter Zijlstra91ea62d2020-12-18 16:39:14 +0100191 __preempt_count_sub(cnt - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700192
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200193 if (unlikely(!in_interrupt() && local_softirq_pending())) {
194 /*
195 * Run softirq if any pending. And do it in its own stack
196 * as we may be calling this deep in a task call stack already.
197 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700198 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200199 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700200
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200201 preempt_count_dec();
Tim Chen3c829c32006-07-30 03:04:02 -0700202#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200203 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700204#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700205 preempt_check_resched();
206}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100207EXPORT_SYMBOL(__local_bh_enable_ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700208
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100209static inline void invoke_softirq(void)
210{
211 if (ksoftirqd_running(local_softirq_pending()))
212 return;
213
214 if (!force_irqthreads) {
215#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
216 /*
217 * We can safely execute softirq on the current stack if
218 * it is the irq stack, because it should be near empty
219 * at this stage.
220 */
221 __do_softirq();
222#else
223 /*
224 * Otherwise, irq_exit() is called on the task stack that can
225 * be potentially deep already. So call softirq in its own stack
226 * to prevent from any overrun.
227 */
228 do_softirq_own_stack();
229#endif
230 } else {
231 wakeup_softirqd();
232 }
233}
234
235asmlinkage __visible void do_softirq(void)
236{
237 __u32 pending;
238 unsigned long flags;
239
240 if (in_interrupt())
241 return;
242
243 local_irq_save(flags);
244
245 pending = local_softirq_pending();
246
247 if (pending && !ksoftirqd_running(pending))
248 do_softirq_own_stack();
249
250 local_irq_restore(flags);
251}
252
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700253/*
Ben Greear34376a52013-06-06 14:29:49 -0700254 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
255 * but break the loop if need_resched() is set or after 2 ms.
256 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
257 * certain cases, such as stop_machine(), jiffies may cease to
258 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
259 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 *
Eric Dumazetc10d73672013-01-10 15:26:34 -0800261 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 * The two things to balance is latency against fairness -
263 * we want to handle softirqs as soon as possible, but they
264 * should not be able to lock up the box.
265 */
Eric Dumazetc10d73672013-01-10 15:26:34 -0800266#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700267#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100269#ifdef CONFIG_TRACE_IRQFLAGS
270/*
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100271 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
272 * to keep the lockdep irq context tracking as tight as possible in order to
273 * not miss-qualify lock contexts and miss possible deadlocks.
274 */
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100275
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100276static inline bool lockdep_softirq_start(void)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100277{
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100278 bool in_hardirq = false;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100279
Peter Zijlstraf9ad4a52020-05-27 13:03:26 +0200280 if (lockdep_hardirq_context()) {
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100281 in_hardirq = true;
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100282 lockdep_hardirq_exit();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100283 }
284
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100285 lockdep_softirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100286
287 return in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100288}
289
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100290static inline void lockdep_softirq_end(bool in_hardirq)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100291{
292 lockdep_softirq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100293
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100294 if (in_hardirq)
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100295 lockdep_hardirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100296}
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100297#else
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100298static inline bool lockdep_softirq_start(void) { return false; }
299static inline void lockdep_softirq_end(bool in_hardirq) { }
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100300#endif
301
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700302asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
Eric Dumazetc10d73672013-01-10 15:26:34 -0800304 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Mel Gorman907aed42012-07-31 16:44:07 -0700305 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700306 int max_restart = MAX_SOFTIRQ_RESTART;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100307 struct softirq_action *h;
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100308 bool in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100309 __u32 pending;
Joe Perches2e702b92014-01-27 17:07:14 -0800310 int softirq_bit;
Mel Gorman907aed42012-07-31 16:44:07 -0700311
312 /*
Yangtao Lie45506a2018-10-18 10:21:33 -0400313 * Mask out PF_MEMALLOC as the current task context is borrowed for the
314 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
315 * again if the socket is related to swapping.
Mel Gorman907aed42012-07-31 16:44:07 -0700316 */
317 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700320
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100321 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100322 in_hardirq = lockdep_softirq_start();
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100323 account_softirq_enter(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325restart:
326 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200327 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700329 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 h = softirq_vec;
332
Joe Perches2e702b92014-01-27 17:07:14 -0800333 while ((softirq_bit = ffs(pending))) {
334 unsigned int vec_nr;
335 int prev_count;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200336
Joe Perches2e702b92014-01-27 17:07:14 -0800337 h += softirq_bit - 1;
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200338
Joe Perches2e702b92014-01-27 17:07:14 -0800339 vec_nr = h - softirq_vec;
340 prev_count = preempt_count();
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200341
Joe Perches2e702b92014-01-27 17:07:14 -0800342 kstat_incr_softirqs_this_cpu(vec_nr);
343
344 trace_softirq_entry(vec_nr);
345 h->action(h);
346 trace_softirq_exit(vec_nr);
347 if (unlikely(prev_count != preempt_count())) {
Joe Perches40322762014-01-27 17:07:15 -0800348 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
Joe Perches2e702b92014-01-27 17:07:14 -0800349 vec_nr, softirq_to_name[vec_nr], h->action,
350 prev_count, preempt_count());
351 preempt_count_set(prev_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 }
353 h++;
Joe Perches2e702b92014-01-27 17:07:14 -0800354 pending >>= softirq_bit;
355 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700357 if (__this_cpu_read(ksoftirqd) == current)
358 rcu_softirq_qs();
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700359 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 pending = local_softirq_pending();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800362 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700363 if (time_before(jiffies, end) && !need_resched() &&
364 --max_restart)
Eric Dumazetc10d73672013-01-10 15:26:34 -0800365 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 wakeup_softirqd();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100370 account_softirq_exit(current);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100371 lockdep_softirq_end(in_hardirq);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700372 __local_bh_enable(SOFTIRQ_OFFSET);
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200373 WARN_ON_ONCE(in_interrupt());
NeilBrown717a94b2017-04-07 10:03:26 +1000374 current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375}
376
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200377/**
378 * irq_enter_rcu - Enter an interrupt context with RCU watching
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800379 */
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200380void irq_enter_rcu(void)
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800381{
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100382 __irq_enter_raw();
383
384 if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
Frederic Weisbecker5acac1b2013-12-04 18:28:20 +0100385 tick_irq_enter();
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100386
387 account_hardirq_enter(current);
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800388}
389
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200390/**
391 * irq_enter - Enter an interrupt context including RCU update
392 */
393void irq_enter(void)
394{
395 rcu_irq_enter();
396 irq_enter_rcu();
397}
398
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200399static inline void tick_irq_exit(void)
400{
401#ifdef CONFIG_NO_HZ_COMMON
402 int cpu = smp_processor_id();
403
404 /* Make sure that timer wheel updates are propagated */
405 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
Frederic Weisbecker0a0e0822018-08-03 15:31:34 +0200406 if (!in_irq())
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200407 tick_nohz_irq_exit();
408 }
409#endif
410}
411
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200412static inline void __irq_exit_rcu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100414#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100415 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100416#else
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100417 lockdep_assert_irqs_disabled();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100418#endif
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100419 account_hardirq_exit(current);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200420 preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 if (!in_interrupt() && local_softirq_pending())
422 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800423
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200424 tick_irq_exit();
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200425}
426
427/**
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200428 * irq_exit_rcu() - Exit an interrupt context without updating RCU
429 *
430 * Also processes softirqs if needed and possible.
431 */
432void irq_exit_rcu(void)
433{
434 __irq_exit_rcu();
435 /* must be last! */
436 lockdep_hardirq_exit();
437}
438
439/**
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200440 * irq_exit - Exit an interrupt context, update RCU and lockdep
441 *
442 * Also processes softirqs if needed and possible.
443 */
444void irq_exit(void)
445{
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200446 __irq_exit_rcu();
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700447 rcu_irq_exit();
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100448 /* must be last! */
449 lockdep_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
452/*
453 * This function must run with irqs disabled!
454 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800455inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
457 __raise_softirq_irqoff(nr);
458
459 /*
460 * If we're in an interrupt or softirq, we're done
461 * (this also catches softirq-disabled code). We will
462 * actually run the softirq once we return from
463 * the irq or softirq.
464 *
465 * Otherwise we wake up ksoftirqd to make sure we
466 * schedule the softirq soon.
467 */
468 if (!in_interrupt())
469 wakeup_softirqd();
470}
471
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800472void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473{
474 unsigned long flags;
475
476 local_irq_save(flags);
477 raise_softirq_irqoff(nr);
478 local_irq_restore(flags);
479}
480
Steven Rostedtf0696862012-01-25 20:18:55 -0500481void __raise_softirq_irqoff(unsigned int nr)
482{
Jiafei Pancdabce2e2020-08-14 12:55:22 +0800483 lockdep_assert_irqs_disabled();
Steven Rostedtf0696862012-01-25 20:18:55 -0500484 trace_softirq_raise(nr);
485 or_softirq_pending(1UL << nr);
486}
487
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300488void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 softirq_vec[nr].action = action;
491}
492
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200493/*
494 * Tasklets
495 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800496struct tasklet_head {
Olof Johansson48f20a92008-03-04 15:23:25 -0800497 struct tasklet_struct *head;
498 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499};
500
Vegard Nossum4620b492008-06-12 23:21:53 +0200501static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
502static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Ingo Molnar6498dda2018-02-27 17:48:07 +0100504static void __tasklet_schedule_common(struct tasklet_struct *t,
505 struct tasklet_head __percpu *headp,
506 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100508 struct tasklet_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 unsigned long flags;
510
511 local_irq_save(flags);
Ingo Molnar6498dda2018-02-27 17:48:07 +0100512 head = this_cpu_ptr(headp);
Olof Johansson48f20a92008-03-04 15:23:25 -0800513 t->next = NULL;
Ingo Molnar6498dda2018-02-27 17:48:07 +0100514 *head->tail = t;
515 head->tail = &(t->next);
516 raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 local_irq_restore(flags);
518}
Ingo Molnar6498dda2018-02-27 17:48:07 +0100519
520void __tasklet_schedule(struct tasklet_struct *t)
521{
522 __tasklet_schedule_common(t, &tasklet_vec,
523 TASKLET_SOFTIRQ);
524}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525EXPORT_SYMBOL(__tasklet_schedule);
526
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800527void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100529 __tasklet_schedule_common(t, &tasklet_hi_vec,
530 HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532EXPORT_SYMBOL(__tasklet_hi_schedule);
533
Ingo Molnar82b691b2018-02-27 17:48:08 +0100534static void tasklet_action_common(struct softirq_action *a,
535 struct tasklet_head *tl_head,
536 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
538 struct tasklet_struct *list;
539
540 local_irq_disable();
Ingo Molnar82b691b2018-02-27 17:48:08 +0100541 list = tl_head->head;
542 tl_head->head = NULL;
543 tl_head->tail = &tl_head->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 local_irq_enable();
545
546 while (list) {
547 struct tasklet_struct *t = list;
548
549 list = list->next;
550
551 if (tasklet_trylock(t)) {
552 if (!atomic_read(&t->count)) {
Joe Perchesce85b4f2014-01-27 17:07:16 -0800553 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
554 &t->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 BUG();
Romain Perier12cc9232019-09-29 18:30:13 +0200556 if (t->use_callback)
557 t->callback(t);
558 else
559 t->func(t->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 tasklet_unlock(t);
561 continue;
562 }
563 tasklet_unlock(t);
564 }
565
566 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800567 t->next = NULL;
Ingo Molnar82b691b2018-02-27 17:48:08 +0100568 *tl_head->tail = t;
569 tl_head->tail = &t->next;
570 __raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 local_irq_enable();
572 }
573}
574
Ingo Molnar82b691b2018-02-27 17:48:08 +0100575static __latent_entropy void tasklet_action(struct softirq_action *a)
576{
577 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
578}
579
Emese Revfy0766f782016-06-20 20:42:34 +0200580static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
Ingo Molnar82b691b2018-02-27 17:48:08 +0100582 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583}
584
Romain Perier12cc9232019-09-29 18:30:13 +0200585void tasklet_setup(struct tasklet_struct *t,
586 void (*callback)(struct tasklet_struct *))
587{
588 t->next = NULL;
589 t->state = 0;
590 atomic_set(&t->count, 0);
591 t->callback = callback;
592 t->use_callback = true;
593 t->data = 0;
594}
595EXPORT_SYMBOL(tasklet_setup);
596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597void tasklet_init(struct tasklet_struct *t,
598 void (*func)(unsigned long), unsigned long data)
599{
600 t->next = NULL;
601 t->state = 0;
602 atomic_set(&t->count, 0);
603 t->func = func;
Romain Perier12cc9232019-09-29 18:30:13 +0200604 t->use_callback = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 t->data = data;
606}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607EXPORT_SYMBOL(tasklet_init);
608
609void tasklet_kill(struct tasklet_struct *t)
610{
611 if (in_interrupt())
Joe Perches40322762014-01-27 17:07:15 -0800612 pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400615 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400617 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
619 tasklet_unlock_wait(t);
620 clear_bit(TASKLET_STATE_SCHED, &t->state);
621}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622EXPORT_SYMBOL(tasklet_kill);
623
624void __init softirq_init(void)
625{
Olof Johansson48f20a92008-03-04 15:23:25 -0800626 int cpu;
627
628 for_each_possible_cpu(cpu) {
629 per_cpu(tasklet_vec, cpu).tail =
630 &per_cpu(tasklet_vec, cpu).head;
631 per_cpu(tasklet_hi_vec, cpu).tail =
632 &per_cpu(tasklet_hi_vec, cpu).head;
633 }
634
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300635 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
636 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000639static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000641 return local_softirq_pending();
642}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000644static void run_ksoftirqd(unsigned int cpu)
645{
646 local_irq_disable();
647 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200648 /*
649 * We can safely run softirq on inline stack, as we are not deep
650 * in the task stack here.
651 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000652 __do_softirq();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000653 local_irq_enable();
Paul E. McKenneyedf22f42017-10-24 08:31:12 -0700654 cond_resched();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000655 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000657 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
660#ifdef CONFIG_HOTPLUG_CPU
661/*
662 * tasklet_kill_immediate is called to remove a tasklet which can already be
663 * scheduled for execution on @cpu.
664 *
665 * Unlike tasklet_kill, this function removes the tasklet
666 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
667 *
668 * When this function is called, @cpu must be in the CPU_DEAD state.
669 */
670void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
671{
672 struct tasklet_struct **i;
673
674 BUG_ON(cpu_online(cpu));
675 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
676
677 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
678 return;
679
680 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800681 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 if (*i == t) {
683 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800684 /* If this was the tail element, move the tail ptr */
685 if (*i == NULL)
686 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 return;
688 }
689 }
690 BUG();
691}
692
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200693static int takeover_tasklets(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 /* CPU is dead, so no lock needed. */
696 local_irq_disable();
697
698 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700699 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100700 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
Muchun Song8afecaa2019-06-18 22:33:05 +0800701 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700702 per_cpu(tasklet_vec, cpu).head = NULL;
703 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 raise_softirq_irqoff(TASKLET_SOFTIRQ);
706
Christian Borntraegere5e41722008-05-01 04:34:23 -0700707 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100708 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
709 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700710 per_cpu(tasklet_hi_vec, cpu).head = NULL;
711 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 raise_softirq_irqoff(HI_SOFTIRQ);
714
715 local_irq_enable();
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200716 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200718#else
719#define takeover_tasklets NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720#endif /* CONFIG_HOTPLUG_CPU */
721
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000722static struct smp_hotplug_thread softirq_threads = {
723 .store = &ksoftirqd,
724 .thread_should_run = ksoftirqd_should_run,
725 .thread_fn = run_ksoftirqd,
726 .thread_comm = "ksoftirqd/%u",
727};
728
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700729static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200731 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
732 takeover_tasklets);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000733 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return 0;
736}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700737early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800738
Yinghai Lu43a25632008-12-28 16:01:13 -0800739/*
740 * [ These __weak aliases are kept in a separate compilation unit, so that
741 * GCC does not inline them incorrectly. ]
742 */
743
744int __init __weak early_irq_init(void)
745{
746 return 0;
747}
748
Yinghai Lu4a046d12009-01-12 17:39:24 -0800749int __init __weak arch_probe_nr_irqs(void)
750{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200751 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800752}
753
Yinghai Lu43a25632008-12-28 16:01:13 -0800754int __init __weak arch_early_irq_init(void)
755{
756 return 0;
757}
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200758
759unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
760{
761 return from;
762}