blob: 7c1ecdb356d8fdca9d2a94fa571334507afb1c76 [file] [log] [blame]
Paul E. McKenney6c442122019-01-17 10:34:35 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07002/*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07005 * Copyright IBM Corporation, 2008
6 *
Paul E. McKenney6c442122019-01-17 10:34:35 -08007 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07008 *
9 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010010 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070011 */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070012#ifndef __LINUX_TINY_H
13#define __LINUX_TINY_H
14
Christoph Hellwig24691062019-08-22 10:53:43 +090015#include <asm/param.h> /* for HZ */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070016
Paul E. McKenney71c40fd2017-05-03 13:51:42 -070017/* Never flag non-existent other CPUs! */
18static inline bool rcu_eqs_special_set(int cpu) { return false; }
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -080019
Paul E. McKenney765a3f42014-03-14 16:37:08 -070020static inline unsigned long get_state_synchronize_rcu(void)
21{
22 return 0;
23}
24
25static inline void cond_synchronize_rcu(unsigned long oldstate)
26{
27 might_sleep();
28}
29
Paul E. McKenney709fdce2018-07-03 10:44:44 -070030extern void rcu_barrier(void);
Paul E. McKenney2c428182011-05-26 22:14:36 -070031
Paul E. McKenneya57eb942010-06-29 16:49:16 -070032static inline void synchronize_rcu_expedited(void)
Paul E. McKenneyda848c42010-03-30 15:46:01 -070033{
Paul E. McKenneya8bb74a2018-07-06 11:46:47 -070034 synchronize_rcu();
Paul E. McKenneyda848c42010-03-30 15:46:01 -070035}
Paul E. McKenney6ebb2372009-11-22 08:53:50 -080036
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +020037/*
38 * Add one more declaration of kvfree() here. It is
39 * not so straight forward to just include <linux/mm.h>
40 * where it is defined due to getting many compile
41 * errors caused by that include.
42 */
43extern void kvfree(const void *addr);
44
Uladzislau Rezki (Sony)c408b212020-05-25 23:47:55 +020045static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenney486e2592012-01-06 14:11:30 -080046{
Uladzislau Rezki (Sony)3042f832020-05-25 23:47:58 +020047 if (head) {
48 call_rcu(head, func);
49 return;
50 }
51
52 // kvfree_rcu(one_arg) call.
53 might_sleep();
54 synchronize_rcu();
55 kvfree((void *) func);
Paul E. McKenney486e2592012-01-06 14:11:30 -080056}
57
Paul E. McKenney709fdce2018-07-03 10:44:44 -070058void rcu_qs(void);
Paul E. McKenney45975c72018-07-02 14:30:37 -070059
Paul E. McKenneyd28139c2018-06-28 14:45:25 -070060static inline void rcu_softirq_qs(void)
61{
Paul E. McKenney709fdce2018-07-03 10:44:44 -070062 rcu_qs();
Paul E. McKenneyd28139c2018-06-28 14:45:25 -070063}
64
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -070065#define rcu_note_context_switch(preempt) \
66 do { \
Paul E. McKenney709fdce2018-07-03 10:44:44 -070067 rcu_qs(); \
Paul E. McKenney43766c32020-03-16 20:38:29 -070068 rcu_tasks_qs(current, (preempt)); \
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -070069 } while (0)
Paul E. McKenneya57eb942010-06-29 16:49:16 -070070
Paul E. McKenney5f192ab2017-05-03 15:24:25 -070071static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
72{
73 *nextevt = KTIME_MAX;
74 return 0;
75}
76
Paul E. McKenneya57eb942010-06-29 16:49:16 -070077/*
Gleb Natapov29ce8312011-05-04 16:31:03 +030078 * Take advantage of the fact that there is only one CPU, which
79 * allows us to ignore virtualization-based context switches.
80 */
Paul E. McKenney71c40fd2017-05-03 13:51:42 -070081static inline void rcu_virt_note_context_switch(int cpu) { }
82static inline void rcu_cpu_stall_reset(void) { }
Paul E. McKenney1b272912018-07-18 14:32:31 -070083static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
Paul E. McKenney71c40fd2017-05-03 13:51:42 -070084static inline void rcu_idle_enter(void) { }
85static inline void rcu_idle_exit(void) { }
86static inline void rcu_irq_enter(void) { }
Paul E. McKenney71c40fd2017-05-03 13:51:42 -070087static inline void rcu_irq_exit_irqson(void) { }
88static inline void rcu_irq_enter_irqson(void) { }
89static inline void rcu_irq_exit(void) { }
Thomas Gleixner8ae0ae62020-05-03 15:08:52 +020090static inline void rcu_irq_exit_preempt(void) { }
Thomas Gleixner07325d42020-05-21 22:05:16 +020091static inline void rcu_irq_exit_check_preempt(void) { }
Paul E. McKenney71c40fd2017-05-03 13:51:42 -070092static inline void exit_rcu(void) { }
Paul E. McKenney3e310092018-06-21 12:50:01 -070093static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
94{
95 return false;
96}
97static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
Paul E. McKenney825c5bd2017-05-26 16:16:40 -070098#ifdef CONFIG_SRCU
Teodora Baluta584dc4c2013-11-11 17:11:23 +020099void rcu_scheduler_starting(void);
Paul E. McKenney825c5bd2017-05-26 16:16:40 -0700100#else /* #ifndef CONFIG_SRCU */
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700101static inline void rcu_scheduler_starting(void) { }
Paul E. McKenney825c5bd2017-05-26 16:16:40 -0700102#endif /* #else #ifndef CONFIG_SRCU */
Paul E. McKenneyd2b16542017-05-11 12:01:50 -0700103static inline void rcu_end_inkernel_boot(void) { }
Paul E. McKenney59ee0322019-11-28 18:54:06 -0800104static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700105static inline bool rcu_is_watching(void) { return true; }
Paul E. McKenney79ba7ff2019-08-04 13:17:35 -0700106static inline void rcu_momentary_dyntick_idle(void) { }
Byungchul Parka35d1692019-08-05 18:22:27 -0400107static inline void kfree_rcu_scheduler_running(void) { }
Paul E. McKenney6be74362020-04-10 13:47:41 -0700108static inline bool rcu_gp_might_be_stalled(void) { return false; }
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700109
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700110/* Avoid RCU read-side critical sections leaking across. */
111static inline void rcu_all_qs(void) { barrier(); }
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800112
Thomas Gleixner4df83742016-07-13 17:17:03 +0000113/* RCUtree hotplug events */
114#define rcutree_prepare_cpu NULL
115#define rcutree_online_cpu NULL
116#define rcutree_offline_cpu NULL
117#define rcutree_dead_cpu NULL
118#define rcutree_dying_cpu NULL
Peter Zijlstraf64c6012018-05-22 09:50:53 -0700119static inline void rcu_cpu_starting(unsigned int cpu) { }
Thomas Gleixner4df83742016-07-13 17:17:03 +0000120
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700121#endif /* __LINUX_RCUTINY_H */