blob: 322b65d456767ad5858fc1f1a777b8d908768f79 [file] [log] [blame]
Thomas Gleixner767a67b2019-06-01 10:08:44 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01007 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Joe Perches40322762014-01-27 17:07:15 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Paul Gortmaker9984de12011-05-23 14:51:41 -040012#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +010016#include <linux/local_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000026#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080027#include <linux/tick.h>
Thomas Gleixnerd5326762014-03-19 11:19:52 +010028#include <linux/irq.h>
Peter Zijlstrada0447472021-03-09 09:42:08 +010029#include <linux/wait_bit.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020030
Thomas Gleixnerdb1cc7a2021-02-10 00:40:53 +010031#include <asm/softirq_stack.h>
32
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020033#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040034#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/*
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
39 by its own spinlocks.
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
43 or will not.
44
45 Examples:
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
52 */
53
54#ifndef __ARCH_IRQ_STAT
Frederic Weisbecker0f6f47b2018-05-08 15:38:19 +020055DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56EXPORT_PER_CPU_SYMBOL(irq_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#endif
58
Alexey Dobriyan978b0112008-09-06 20:04:36 +020059static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080061DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Joe Perchesce85b4f2014-01-27 17:07:16 -080063const char * const softirq_to_name[NR_SOFTIRQS] = {
Sagi Grimbergf660f602016-10-10 15:10:51 +030064 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
Shaohua Li09223372011-06-14 13:26:25 +080065 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040066};
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/*
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
73 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020074static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
76 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010077 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Peter Zijlstra37aadc682021-06-11 10:28:11 +020079 if (tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 wake_up_process(tsk);
81}
82
83/*
Eric Dumazet4cd13c22016-08-31 10:42:29 -070084 * If ksoftirqd is scheduled, we do not want to process pending softirqs
Linus Torvalds3c537762018-01-08 11:51:04 -080085 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
86 * unless we're doing some of the synchronous softirqs.
Eric Dumazet4cd13c22016-08-31 10:42:29 -070087 */
Linus Torvalds3c537762018-01-08 11:51:04 -080088#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
89static bool ksoftirqd_running(unsigned long pending)
Eric Dumazet4cd13c22016-08-31 10:42:29 -070090{
91 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
92
Linus Torvalds3c537762018-01-08 11:51:04 -080093 if (pending & SOFTIRQ_NOW_MASK)
94 return false;
Peter Zijlstrab03fbd42021-06-11 10:28:12 +020095 return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
Eric Dumazet4cd13c22016-08-31 10:42:29 -070096}
97
Thomas Gleixnerae9ef582020-11-13 15:02:18 +010098#ifdef CONFIG_TRACE_IRQFLAGS
99DEFINE_PER_CPU(int, hardirqs_enabled);
100DEFINE_PER_CPU(int, hardirq_context);
101EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
102EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
103#endif
104
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700105/*
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100106 * SOFTIRQ_OFFSET usage:
107 *
108 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
109 * to a per CPU counter and to task::softirqs_disabled_cnt.
110 *
111 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
112 * processing.
113 *
114 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700115 * on local_bh_disable or local_bh_enable.
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100116 *
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700117 * This lets us distinguish between whether we are currently processing
118 * softirq and whether we just have bh disabled.
119 */
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100120#ifdef CONFIG_PREEMPT_RT
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700121
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100122/*
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100123 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
124 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
125 * softirq disabled section to be preempted.
126 *
127 * The per task counter is used for softirq_count(), in_softirq() and
128 * in_serving_softirqs() because these counts are only valid when the task
129 * holding softirq_ctrl::lock is running.
130 *
131 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
132 * the task which is in a softirq disabled section is preempted or blocks.
133 */
134struct softirq_ctrl {
135 local_lock_t lock;
136 int cnt;
137};
138
139static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
140 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
141};
142
Thomas Gleixner47c218d2021-03-09 09:55:57 +0100143/**
144 * local_bh_blocked() - Check for idle whether BH processing is blocked
145 *
146 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
147 *
148 * This is invoked from the idle task to guard against false positive
149 * softirq pending warnings, which would happen when the task which holds
150 * softirq_ctrl::lock was the only running task on the CPU and blocks on
151 * some other lock.
152 */
153bool local_bh_blocked(void)
154{
155 return __this_cpu_read(softirq_ctrl.cnt) != 0;
156}
157
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100158void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
159{
160 unsigned long flags;
161 int newcnt;
162
163 WARN_ON_ONCE(in_hardirq());
164
165 /* First entry of a task into a BH disabled section? */
166 if (!current->softirq_disable_cnt) {
167 if (preemptible()) {
168 local_lock(&softirq_ctrl.lock);
169 /* Required to meet the RCU bottomhalf requirements. */
170 rcu_read_lock();
171 } else {
172 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
173 }
174 }
175
176 /*
177 * Track the per CPU softirq disabled state. On RT this is per CPU
178 * state to allow preemption of bottom half disabled sections.
179 */
180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
181 /*
182 * Reflect the result in the task state to prevent recursion on the
183 * local lock and to make softirq_count() & al work.
184 */
185 current->softirq_disable_cnt = newcnt;
186
187 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
188 raw_local_irq_save(flags);
189 lockdep_softirqs_off(ip);
190 raw_local_irq_restore(flags);
191 }
192}
193EXPORT_SYMBOL(__local_bh_disable_ip);
194
195static void __local_bh_enable(unsigned int cnt, bool unlock)
196{
197 unsigned long flags;
198 int newcnt;
199
200 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
201 this_cpu_read(softirq_ctrl.cnt));
202
203 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
204 raw_local_irq_save(flags);
205 lockdep_softirqs_on(_RET_IP_);
206 raw_local_irq_restore(flags);
207 }
208
209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
210 current->softirq_disable_cnt = newcnt;
211
212 if (!newcnt && unlock) {
213 rcu_read_unlock();
214 local_unlock(&softirq_ctrl.lock);
215 }
216}
217
218void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
219{
220 bool preempt_on = preemptible();
221 unsigned long flags;
222 u32 pending;
223 int curcnt;
224
225 WARN_ON_ONCE(in_irq());
226 lockdep_assert_irqs_enabled();
227
228 local_irq_save(flags);
229 curcnt = __this_cpu_read(softirq_ctrl.cnt);
230
231 /*
232 * If this is not reenabling soft interrupts, no point in trying to
233 * run pending ones.
234 */
235 if (curcnt != cnt)
236 goto out;
237
238 pending = local_softirq_pending();
239 if (!pending || ksoftirqd_running(pending))
240 goto out;
241
242 /*
243 * If this was called from non preemptible context, wake up the
244 * softirq daemon.
245 */
246 if (!preempt_on) {
247 wakeup_softirqd();
248 goto out;
249 }
250
251 /*
252 * Adjust softirq count to SOFTIRQ_OFFSET which makes
253 * in_serving_softirq() become true.
254 */
255 cnt = SOFTIRQ_OFFSET;
256 __local_bh_enable(cnt, false);
257 __do_softirq();
258
259out:
260 __local_bh_enable(cnt, preempt_on);
261 local_irq_restore(flags);
262}
263EXPORT_SYMBOL(__local_bh_enable_ip);
264
265/*
266 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
267 * to acquire the per CPU local lock for reentrancy protection.
268 */
269static inline void ksoftirqd_run_begin(void)
270{
271 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
272 local_irq_disable();
273}
274
275/* Counterpart to ksoftirqd_run_begin() */
276static inline void ksoftirqd_run_end(void)
277{
278 __local_bh_enable(SOFTIRQ_OFFSET, true);
279 WARN_ON_ONCE(in_interrupt());
280 local_irq_enable();
281}
282
283static inline void softirq_handle_begin(void) { }
284static inline void softirq_handle_end(void) { }
285
286static inline bool should_wake_ksoftirqd(void)
287{
288 return !this_cpu_read(softirq_ctrl.cnt);
289}
290
291static inline void invoke_softirq(void)
292{
293 if (should_wake_ksoftirqd())
294 wakeup_softirqd();
295}
296
297#else /* CONFIG_PREEMPT_RT */
298
299/*
300 * This one is for softirq.c-internal use, where hardirqs are disabled
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100301 * legitimately:
302 */
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100303#ifdef CONFIG_TRACE_IRQFLAGS
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100304void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700305{
306 unsigned long flags;
307
308 WARN_ON_ONCE(in_irq());
309
310 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500311 /*
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200312 * The preempt tracer hooks into preempt_count_add and will break
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500313 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
314 * is set and before current->softirq_enabled is cleared.
315 * We must manually increment preempt_count here and manually
316 * call the trace_preempt_off later.
317 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200318 __preempt_count_add(cnt);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700319 /*
320 * Were softirqs turned off above:
321 */
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100322 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100323 lockdep_softirqs_off(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700324 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500325
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100326 if (preempt_count() == cnt) {
327#ifdef CONFIG_DEBUG_PREEMPT
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100328 current->preempt_disable_ip = get_lock_parent_ip();
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100329#endif
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100330 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100331 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700332}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100333EXPORT_SYMBOL(__local_bh_disable_ip);
Tim Chen3c829c32006-07-30 03:04:02 -0700334#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700335
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700336static void __local_bh_enable(unsigned int cnt)
337{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100338 lockdep_assert_irqs_disabled();
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700339
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700340 if (preempt_count() == cnt)
341 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
342
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100343 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Peter Zijlstra0d384532020-03-20 12:56:41 +0100344 lockdep_softirqs_on(_RET_IP_);
Joel Fernandes (Google)1a63dcd2018-06-07 13:11:43 -0700345
346 __preempt_count_sub(cnt);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700347}
348
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700349/*
Paul E. McKenneyc3442692018-03-05 11:29:40 -0800350 * Special-case - softirqs can safely be enabled by __do_softirq(),
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700351 * without processing still-pending softirqs:
352 */
353void _local_bh_enable(void)
354{
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200355 WARN_ON_ONCE(in_irq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700356 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700357}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700358EXPORT_SYMBOL(_local_bh_enable);
359
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100360void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700361{
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100362 WARN_ON_ONCE(in_irq());
363 lockdep_assert_irqs_enabled();
Tim Chen3c829c32006-07-30 03:04:02 -0700364#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200365 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700366#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700367 /*
368 * Are softirqs going to be turned on now:
369 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700370 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Peter Zijlstra0d384532020-03-20 12:56:41 +0100371 lockdep_softirqs_on(ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700372 /*
373 * Keep preemption disabled until we are done with
374 * softirq processing:
Joe Perchesce85b4f2014-01-27 17:07:16 -0800375 */
Peter Zijlstra91ea62d2020-12-18 16:39:14 +0100376 __preempt_count_sub(cnt - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700377
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200378 if (unlikely(!in_interrupt() && local_softirq_pending())) {
379 /*
380 * Run softirq if any pending. And do it in its own stack
381 * as we may be calling this deep in a task call stack already.
382 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700383 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200384 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700385
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200386 preempt_count_dec();
Tim Chen3c829c32006-07-30 03:04:02 -0700387#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200388 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700389#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700390 preempt_check_resched();
391}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100392EXPORT_SYMBOL(__local_bh_enable_ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700393
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100394static inline void softirq_handle_begin(void)
395{
396 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
397}
398
399static inline void softirq_handle_end(void)
400{
401 __local_bh_enable(SOFTIRQ_OFFSET);
402 WARN_ON_ONCE(in_interrupt());
403}
404
405static inline void ksoftirqd_run_begin(void)
406{
407 local_irq_disable();
408}
409
410static inline void ksoftirqd_run_end(void)
411{
412 local_irq_enable();
413}
414
415static inline bool should_wake_ksoftirqd(void)
416{
417 return true;
418}
419
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100420static inline void invoke_softirq(void)
421{
422 if (ksoftirqd_running(local_softirq_pending()))
423 return;
424
Tanner Love91cc4702021-06-02 14:03:38 -0400425 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
Thomas Gleixnerae9ef582020-11-13 15:02:18 +0100426#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
427 /*
428 * We can safely execute softirq on the current stack if
429 * it is the irq stack, because it should be near empty
430 * at this stage.
431 */
432 __do_softirq();
433#else
434 /*
435 * Otherwise, irq_exit() is called on the task stack that can
436 * be potentially deep already. So call softirq in its own stack
437 * to prevent from any overrun.
438 */
439 do_softirq_own_stack();
440#endif
441 } else {
442 wakeup_softirqd();
443 }
444}
445
446asmlinkage __visible void do_softirq(void)
447{
448 __u32 pending;
449 unsigned long flags;
450
451 if (in_interrupt())
452 return;
453
454 local_irq_save(flags);
455
456 pending = local_softirq_pending();
457
458 if (pending && !ksoftirqd_running(pending))
459 do_softirq_own_stack();
460
461 local_irq_restore(flags);
462}
463
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100464#endif /* !CONFIG_PREEMPT_RT */
465
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700466/*
Ben Greear34376a52013-06-06 14:29:49 -0700467 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
468 * but break the loop if need_resched() is set or after 2 ms.
469 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
470 * certain cases, such as stop_machine(), jiffies may cease to
471 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
472 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 *
Eric Dumazetc10d73672013-01-10 15:26:34 -0800474 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 * The two things to balance is latency against fairness -
476 * we want to handle softirqs as soon as possible, but they
477 * should not be able to lock up the box.
478 */
Eric Dumazetc10d73672013-01-10 15:26:34 -0800479#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700480#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100482#ifdef CONFIG_TRACE_IRQFLAGS
483/*
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100484 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
485 * to keep the lockdep irq context tracking as tight as possible in order to
486 * not miss-qualify lock contexts and miss possible deadlocks.
487 */
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100488
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100489static inline bool lockdep_softirq_start(void)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100490{
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100491 bool in_hardirq = false;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100492
Peter Zijlstraf9ad4a52020-05-27 13:03:26 +0200493 if (lockdep_hardirq_context()) {
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100494 in_hardirq = true;
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100495 lockdep_hardirq_exit();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100496 }
497
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100498 lockdep_softirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100499
500 return in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100501}
502
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100503static inline void lockdep_softirq_end(bool in_hardirq)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100504{
505 lockdep_softirq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100506
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100507 if (in_hardirq)
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100508 lockdep_hardirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100509}
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100510#else
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100511static inline bool lockdep_softirq_start(void) { return false; }
512static inline void lockdep_softirq_end(bool in_hardirq) { }
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100513#endif
514
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700515asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Eric Dumazetc10d73672013-01-10 15:26:34 -0800517 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Mel Gorman907aed42012-07-31 16:44:07 -0700518 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700519 int max_restart = MAX_SOFTIRQ_RESTART;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100520 struct softirq_action *h;
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100521 bool in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100522 __u32 pending;
Joe Perches2e702b92014-01-27 17:07:14 -0800523 int softirq_bit;
Mel Gorman907aed42012-07-31 16:44:07 -0700524
525 /*
Yangtao Lie45506a2018-10-18 10:21:33 -0400526 * Mask out PF_MEMALLOC as the current task context is borrowed for the
527 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
528 * again if the socket is related to swapping.
Mel Gorman907aed42012-07-31 16:44:07 -0700529 */
530 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700533
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100534 softirq_handle_begin();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100535 in_hardirq = lockdep_softirq_start();
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100536 account_softirq_enter(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538restart:
539 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200540 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700542 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
544 h = softirq_vec;
545
Joe Perches2e702b92014-01-27 17:07:14 -0800546 while ((softirq_bit = ffs(pending))) {
547 unsigned int vec_nr;
548 int prev_count;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200549
Joe Perches2e702b92014-01-27 17:07:14 -0800550 h += softirq_bit - 1;
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200551
Joe Perches2e702b92014-01-27 17:07:14 -0800552 vec_nr = h - softirq_vec;
553 prev_count = preempt_count();
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200554
Joe Perches2e702b92014-01-27 17:07:14 -0800555 kstat_incr_softirqs_this_cpu(vec_nr);
556
557 trace_softirq_entry(vec_nr);
558 h->action(h);
559 trace_softirq_exit(vec_nr);
560 if (unlikely(prev_count != preempt_count())) {
Joe Perches40322762014-01-27 17:07:15 -0800561 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
Joe Perches2e702b92014-01-27 17:07:14 -0800562 vec_nr, softirq_to_name[vec_nr], h->action,
563 prev_count, preempt_count());
564 preempt_count_set(prev_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566 h++;
Joe Perches2e702b92014-01-27 17:07:14 -0800567 pending >>= softirq_bit;
568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100570 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
571 __this_cpu_read(ksoftirqd) == current)
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700572 rcu_softirq_qs();
Thomas Gleixner8b1c04a2021-03-09 09:55:56 +0100573
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700574 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576 pending = local_softirq_pending();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800577 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700578 if (time_before(jiffies, end) && !need_resched() &&
579 --max_restart)
Eric Dumazetc10d73672013-01-10 15:26:34 -0800580 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 wakeup_softirqd();
Eric Dumazetc10d73672013-01-10 15:26:34 -0800583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100585 account_softirq_exit(current);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100586 lockdep_softirq_end(in_hardirq);
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100587 softirq_handle_end();
NeilBrown717a94b2017-04-07 10:03:26 +1000588 current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200591/**
592 * irq_enter_rcu - Enter an interrupt context with RCU watching
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800593 */
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200594void irq_enter_rcu(void)
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800595{
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100596 __irq_enter_raw();
597
598 if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
Frederic Weisbecker5acac1b2013-12-04 18:28:20 +0100599 tick_irq_enter();
Frederic Weisbeckerd14ce742020-12-02 12:57:32 +0100600
601 account_hardirq_enter(current);
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800602}
603
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200604/**
605 * irq_enter - Enter an interrupt context including RCU update
606 */
607void irq_enter(void)
608{
609 rcu_irq_enter();
610 irq_enter_rcu();
611}
612
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200613static inline void tick_irq_exit(void)
614{
615#ifdef CONFIG_NO_HZ_COMMON
616 int cpu = smp_processor_id();
617
618 /* Make sure that timer wheel updates are propagated */
619 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
Frederic Weisbecker0a0e0822018-08-03 15:31:34 +0200620 if (!in_irq())
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200621 tick_nohz_irq_exit();
622 }
623#endif
624}
625
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200626static inline void __irq_exit_rcu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100628#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100629 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100630#else
Frederic Weisbeckerf71b74b2017-11-06 16:01:18 +0100631 lockdep_assert_irqs_disabled();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100632#endif
Frederic Weisbeckerd3759e72020-12-02 12:57:31 +0100633 account_hardirq_exit(current);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200634 preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (!in_interrupt() && local_softirq_pending())
636 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800637
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200638 tick_irq_exit();
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200639}
640
641/**
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200642 * irq_exit_rcu() - Exit an interrupt context without updating RCU
643 *
644 * Also processes softirqs if needed and possible.
645 */
646void irq_exit_rcu(void)
647{
648 __irq_exit_rcu();
649 /* must be last! */
650 lockdep_hardirq_exit();
651}
652
653/**
Thomas Gleixner8a6bc472020-05-21 22:05:21 +0200654 * irq_exit - Exit an interrupt context, update RCU and lockdep
655 *
656 * Also processes softirqs if needed and possible.
657 */
658void irq_exit(void)
659{
Peter Zijlstra59bc3002020-05-29 23:27:39 +0200660 __irq_exit_rcu();
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700661 rcu_irq_exit();
Thomas Gleixner2502ec32020-03-20 12:56:40 +0100662 /* must be last! */
663 lockdep_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666/*
667 * This function must run with irqs disabled!
668 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800669inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
671 __raise_softirq_irqoff(nr);
672
673 /*
674 * If we're in an interrupt or softirq, we're done
675 * (this also catches softirq-disabled code). We will
676 * actually run the softirq once we return from
677 * the irq or softirq.
678 *
679 * Otherwise we wake up ksoftirqd to make sure we
680 * schedule the softirq soon.
681 */
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100682 if (!in_interrupt() && should_wake_ksoftirqd())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 wakeup_softirqd();
684}
685
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800686void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
688 unsigned long flags;
689
690 local_irq_save(flags);
691 raise_softirq_irqoff(nr);
692 local_irq_restore(flags);
693}
694
Steven Rostedtf0696862012-01-25 20:18:55 -0500695void __raise_softirq_irqoff(unsigned int nr)
696{
Jiafei Pancdabce2e2020-08-14 12:55:22 +0800697 lockdep_assert_irqs_disabled();
Steven Rostedtf0696862012-01-25 20:18:55 -0500698 trace_softirq_raise(nr);
699 or_softirq_pending(1UL << nr);
700}
701
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300702void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 softirq_vec[nr].action = action;
705}
706
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200707/*
708 * Tasklets
709 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800710struct tasklet_head {
Olof Johansson48f20a92008-03-04 15:23:25 -0800711 struct tasklet_struct *head;
712 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713};
714
Vegard Nossum4620b492008-06-12 23:21:53 +0200715static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
716static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Ingo Molnar6498dda2018-02-27 17:48:07 +0100718static void __tasklet_schedule_common(struct tasklet_struct *t,
719 struct tasklet_head __percpu *headp,
720 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100722 struct tasklet_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 unsigned long flags;
724
725 local_irq_save(flags);
Ingo Molnar6498dda2018-02-27 17:48:07 +0100726 head = this_cpu_ptr(headp);
Olof Johansson48f20a92008-03-04 15:23:25 -0800727 t->next = NULL;
Ingo Molnar6498dda2018-02-27 17:48:07 +0100728 *head->tail = t;
729 head->tail = &(t->next);
730 raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 local_irq_restore(flags);
732}
Ingo Molnar6498dda2018-02-27 17:48:07 +0100733
734void __tasklet_schedule(struct tasklet_struct *t)
735{
736 __tasklet_schedule_common(t, &tasklet_vec,
737 TASKLET_SOFTIRQ);
738}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739EXPORT_SYMBOL(__tasklet_schedule);
740
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800741void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
Ingo Molnar6498dda2018-02-27 17:48:07 +0100743 __tasklet_schedule_common(t, &tasklet_hi_vec,
744 HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746EXPORT_SYMBOL(__tasklet_hi_schedule);
747
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100748static bool tasklet_clear_sched(struct tasklet_struct *t)
Dirk Behme6b2c3392021-03-17 11:20:12 +0100749{
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100750 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
751 wake_up_var(&t->state);
Dirk Behme6b2c3392021-03-17 11:20:12 +0100752 return true;
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100753 }
Dirk Behme6b2c3392021-03-17 11:20:12 +0100754
755 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
756 t->use_callback ? "callback" : "func",
757 t->use_callback ? (void *)t->callback : (void *)t->func);
758
759 return false;
760}
761
Ingo Molnar82b691b2018-02-27 17:48:08 +0100762static void tasklet_action_common(struct softirq_action *a,
763 struct tasklet_head *tl_head,
764 unsigned int softirq_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
766 struct tasklet_struct *list;
767
768 local_irq_disable();
Ingo Molnar82b691b2018-02-27 17:48:08 +0100769 list = tl_head->head;
770 tl_head->head = NULL;
771 tl_head->tail = &tl_head->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 local_irq_enable();
773
774 while (list) {
775 struct tasklet_struct *t = list;
776
777 list = list->next;
778
779 if (tasklet_trylock(t)) {
780 if (!atomic_read(&t->count)) {
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100781 if (tasklet_clear_sched(t)) {
Dirk Behme6b2c3392021-03-17 11:20:12 +0100782 if (t->use_callback)
783 t->callback(t);
784 else
785 t->func(t->data);
786 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 tasklet_unlock(t);
788 continue;
789 }
790 tasklet_unlock(t);
791 }
792
793 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800794 t->next = NULL;
Ingo Molnar82b691b2018-02-27 17:48:08 +0100795 *tl_head->tail = t;
796 tl_head->tail = &t->next;
797 __raise_softirq_irqoff(softirq_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 local_irq_enable();
799 }
800}
801
Ingo Molnar82b691b2018-02-27 17:48:08 +0100802static __latent_entropy void tasklet_action(struct softirq_action *a)
803{
804 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
805}
806
Emese Revfy0766f782016-06-20 20:42:34 +0200807static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
Ingo Molnar82b691b2018-02-27 17:48:08 +0100809 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
Romain Perier12cc9232019-09-29 18:30:13 +0200812void tasklet_setup(struct tasklet_struct *t,
813 void (*callback)(struct tasklet_struct *))
814{
815 t->next = NULL;
816 t->state = 0;
817 atomic_set(&t->count, 0);
818 t->callback = callback;
819 t->use_callback = true;
820 t->data = 0;
821}
822EXPORT_SYMBOL(tasklet_setup);
823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824void tasklet_init(struct tasklet_struct *t,
825 void (*func)(unsigned long), unsigned long data)
826{
827 t->next = NULL;
828 t->state = 0;
829 atomic_set(&t->count, 0);
830 t->func = func;
Romain Perier12cc9232019-09-29 18:30:13 +0200831 t->use_callback = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 t->data = data;
833}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834EXPORT_SYMBOL(tasklet_init);
835
Thomas Gleixnereb2dafb2021-03-09 09:42:10 +0100836#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
837/*
838 * Do not use in new code. Waiting for tasklets from atomic contexts is
839 * error prone and should be avoided.
840 */
841void tasklet_unlock_spin_wait(struct tasklet_struct *t)
842{
843 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
844 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
845 /*
846 * Prevent a live lock when current preempted soft
847 * interrupt processing or prevents ksoftirqd from
848 * running. If the tasklet runs on a different CPU
849 * then this has no effect other than doing the BH
850 * disable/enable dance for nothing.
851 */
852 local_bh_disable();
853 local_bh_enable();
854 } else {
855 cpu_relax();
856 }
857 }
858}
859EXPORT_SYMBOL(tasklet_unlock_spin_wait);
860#endif
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862void tasklet_kill(struct tasklet_struct *t)
863{
864 if (in_interrupt())
Joe Perches40322762014-01-27 17:07:15 -0800865 pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100867 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
868 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 tasklet_unlock_wait(t);
Peter Zijlstra697d8c62021-03-09 09:42:09 +0100871 tasklet_clear_sched(t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873EXPORT_SYMBOL(tasklet_kill);
874
Thomas Gleixnereb2dafb2021-03-09 09:42:10 +0100875#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
Peter Zijlstrada0447472021-03-09 09:42:08 +0100876void tasklet_unlock(struct tasklet_struct *t)
877{
878 smp_mb__before_atomic();
879 clear_bit(TASKLET_STATE_RUN, &t->state);
880 smp_mb__after_atomic();
881 wake_up_var(&t->state);
882}
883EXPORT_SYMBOL_GPL(tasklet_unlock);
884
885void tasklet_unlock_wait(struct tasklet_struct *t)
886{
887 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
888}
889EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
890#endif
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892void __init softirq_init(void)
893{
Olof Johansson48f20a92008-03-04 15:23:25 -0800894 int cpu;
895
896 for_each_possible_cpu(cpu) {
897 per_cpu(tasklet_vec, cpu).tail =
898 &per_cpu(tasklet_vec, cpu).head;
899 per_cpu(tasklet_hi_vec, cpu).tail =
900 &per_cpu(tasklet_hi_vec, cpu).head;
901 }
902
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300903 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
904 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905}
906
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000907static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000909 return local_softirq_pending();
910}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000912static void run_ksoftirqd(unsigned int cpu)
913{
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100914 ksoftirqd_run_begin();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000915 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200916 /*
917 * We can safely run softirq on inline stack, as we are not deep
918 * in the task stack here.
919 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000920 __do_softirq();
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100921 ksoftirqd_run_end();
Paul E. McKenneyedf22f42017-10-24 08:31:12 -0700922 cond_resched();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000923 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
Thomas Gleixnerf02fc962021-03-09 09:55:55 +0100925 ksoftirqd_run_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927
928#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200929static int takeover_tasklets(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 /* CPU is dead, so no lock needed. */
932 local_irq_disable();
933
934 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700935 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100936 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
Muchun Song8afecaa2019-06-18 22:33:05 +0800937 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700938 per_cpu(tasklet_vec, cpu).head = NULL;
939 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
940 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 raise_softirq_irqoff(TASKLET_SOFTIRQ);
942
Christian Borntraegere5e41722008-05-01 04:34:23 -0700943 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100944 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
945 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700946 per_cpu(tasklet_hi_vec, cpu).head = NULL;
947 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 raise_softirq_irqoff(HI_SOFTIRQ);
950
951 local_irq_enable();
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200952 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953}
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200954#else
955#define takeover_tasklets NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956#endif /* CONFIG_HOTPLUG_CPU */
957
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000958static struct smp_hotplug_thread softirq_threads = {
959 .store = &ksoftirqd,
960 .thread_should_run = ksoftirqd_should_run,
961 .thread_fn = run_ksoftirqd,
962 .thread_comm = "ksoftirqd/%u",
963};
964
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700965static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200967 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
968 takeover_tasklets);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000969 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 return 0;
972}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700973early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800974
Yinghai Lu43a25632008-12-28 16:01:13 -0800975/*
976 * [ These __weak aliases are kept in a separate compilation unit, so that
977 * GCC does not inline them incorrectly. ]
978 */
979
980int __init __weak early_irq_init(void)
981{
982 return 0;
983}
984
Yinghai Lu4a046d12009-01-12 17:39:24 -0800985int __init __weak arch_probe_nr_irqs(void)
986{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200987 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800988}
989
Yinghai Lu43a25632008-12-28 16:01:13 -0800990int __init __weak arch_early_irq_init(void)
991{
992 return 0;
993}
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200994
995unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
996{
997 return from;
998}