blob: d27701199a4d442d11a2a9700d6ca0d4c4298f3b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Joonsoo Kimfe896d12016-03-17 14:19:26 -07002#ifndef _LINUX_PAGE_REF_H
3#define _LINUX_PAGE_REF_H
4
5#include <linux/atomic.h>
6#include <linux/mm_types.h>
7#include <linux/page-flags.h>
Joonsoo Kim95813b82016-03-17 14:19:29 -07008#include <linux/tracepoint-defs.h>
9
10extern struct tracepoint __tracepoint_page_ref_set;
11extern struct tracepoint __tracepoint_page_ref_mod;
12extern struct tracepoint __tracepoint_page_ref_mod_and_test;
13extern struct tracepoint __tracepoint_page_ref_mod_and_return;
14extern struct tracepoint __tracepoint_page_ref_mod_unless;
15extern struct tracepoint __tracepoint_page_ref_freeze;
16extern struct tracepoint __tracepoint_page_ref_unfreeze;
17
18#ifdef CONFIG_DEBUG_PAGE_REF
19
20/*
21 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
22 * functions. But due to include header file issues, that is not
23 * feasible. Instead we have to open code the static key functions.
24 *
25 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
26 */
27#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
28
29extern void __page_ref_set(struct page *page, int v);
30extern void __page_ref_mod(struct page *page, int v);
31extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33extern void __page_ref_mod_unless(struct page *page, int v, int u);
34extern void __page_ref_freeze(struct page *page, int v, int ret);
35extern void __page_ref_unfreeze(struct page *page, int v);
36
37#else
38
39#define page_ref_tracepoint_active(t) false
40
41static inline void __page_ref_set(struct page *page, int v)
42{
43}
44static inline void __page_ref_mod(struct page *page, int v)
45{
46}
47static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
48{
49}
50static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
51{
52}
53static inline void __page_ref_mod_unless(struct page *page, int v, int u)
54{
55}
56static inline void __page_ref_freeze(struct page *page, int v, int ret)
57{
58}
59static inline void __page_ref_unfreeze(struct page *page, int v)
60{
61}
62
63#endif
Joonsoo Kimfe896d12016-03-17 14:19:26 -070064
65static inline int page_ref_count(struct page *page)
66{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070067 return atomic_read(&page->_refcount);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070068}
69
70static inline int page_count(struct page *page)
71{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070072 return atomic_read(&compound_head(page)->_refcount);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070073}
74
75static inline void set_page_count(struct page *page, int v)
76{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070077 atomic_set(&page->_refcount, v);
Joonsoo Kim95813b82016-03-17 14:19:29 -070078 if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
79 __page_ref_set(page, v);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070080}
81
82/*
83 * Setup the page count before being freed into the page allocator for
84 * the first time (boot or memory hotplug)
85 */
86static inline void init_page_count(struct page *page)
87{
88 set_page_count(page, 1);
89}
90
91static inline void page_ref_add(struct page *page, int nr)
92{
Joonsoo Kim0139aa72016-05-19 17:10:49 -070093 atomic_add(nr, &page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -070094 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
95 __page_ref_mod(page, nr);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070096}
97
98static inline void page_ref_sub(struct page *page, int nr)
99{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700100 atomic_sub(nr, &page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700101 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
102 __page_ref_mod(page, -nr);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700103}
104
John Hubbard566d7742020-04-01 21:05:18 -0700105static inline int page_ref_sub_return(struct page *page, int nr)
106{
107 int ret = atomic_sub_return(nr, &page->_refcount);
108
109 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
110 __page_ref_mod_and_return(page, -nr, ret);
111 return ret;
112}
113
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700114static inline void page_ref_inc(struct page *page)
115{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700116 atomic_inc(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700117 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
118 __page_ref_mod(page, 1);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700119}
120
121static inline void page_ref_dec(struct page *page)
122{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700123 atomic_dec(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700124 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
125 __page_ref_mod(page, -1);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700126}
127
128static inline int page_ref_sub_and_test(struct page *page, int nr)
129{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700130 int ret = atomic_sub_and_test(nr, &page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700131
132 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
133 __page_ref_mod_and_test(page, -nr, ret);
134 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700135}
136
David Hildenbranddf9b2b42016-06-20 12:09:41 +0200137static inline int page_ref_inc_return(struct page *page)
138{
139 int ret = atomic_inc_return(&page->_refcount);
140
141 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
142 __page_ref_mod_and_return(page, 1, ret);
143 return ret;
144}
145
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700146static inline int page_ref_dec_and_test(struct page *page)
147{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700148 int ret = atomic_dec_and_test(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700149
150 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
151 __page_ref_mod_and_test(page, -1, ret);
152 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700153}
154
155static inline int page_ref_dec_return(struct page *page)
156{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700157 int ret = atomic_dec_return(&page->_refcount);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700158
159 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
160 __page_ref_mod_and_return(page, -1, ret);
161 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700162}
163
164static inline int page_ref_add_unless(struct page *page, int nr, int u)
165{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700166 int ret = atomic_add_unless(&page->_refcount, nr, u);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700167
168 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
169 __page_ref_mod_unless(page, nr, ret);
170 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700171}
172
173static inline int page_ref_freeze(struct page *page, int count)
174{
Joonsoo Kim0139aa72016-05-19 17:10:49 -0700175 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700176
177 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
178 __page_ref_freeze(page, count, ret);
179 return ret;
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700180}
181
182static inline void page_ref_unfreeze(struct page *page, int count)
183{
184 VM_BUG_ON_PAGE(page_count(page) != 0, page);
185 VM_BUG_ON(count == 0);
186
Konstantin Khlebnikov03f5d582018-04-05 16:23:24 -0700187 atomic_set_release(&page->_refcount, count);
Joonsoo Kim95813b82016-03-17 14:19:29 -0700188 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
189 __page_ref_unfreeze(page, count);
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700190}
191
192#endif