blob: eb156ff5d60303c9ce69977ff2bc7c3204fa1331 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002/*
3 * Lockless hierarchical page accounting & limiting
4 *
5 * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
6 */
7
8#include <linux/page_counter.h>
9#include <linux/atomic.h>
10#include <linux/kernel.h>
11#include <linux/string.h>
12#include <linux/sched.h>
13#include <linux/bug.h>
14#include <asm/page.h>
15
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070016static void propagate_protected_usage(struct page_counter *c,
17 unsigned long usage)
Roman Gushchin23067152018-06-07 17:06:22 -070018{
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070019 unsigned long protected, old_protected;
Chris Downc3d53202020-04-01 21:07:27 -070020 unsigned long low, min;
Roman Gushchin23067152018-06-07 17:06:22 -070021 long delta;
22
23 if (!c->parent)
24 return;
25
Chris Downc3d53202020-04-01 21:07:27 -070026 min = READ_ONCE(c->min);
27 if (min || atomic_long_read(&c->min_usage)) {
28 protected = min(usage, min);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070029 old_protected = atomic_long_xchg(&c->min_usage, protected);
30 delta = protected - old_protected;
31 if (delta)
32 atomic_long_add(delta, &c->parent->children_min_usage);
33 }
Roman Gushchin23067152018-06-07 17:06:22 -070034
Chris Downf86b8102020-04-01 21:07:24 -070035 low = READ_ONCE(c->low);
36 if (low || atomic_long_read(&c->low_usage)) {
37 protected = min(usage, low);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070038 old_protected = atomic_long_xchg(&c->low_usage, protected);
39 delta = protected - old_protected;
40 if (delta)
41 atomic_long_add(delta, &c->parent->children_low_usage);
42 }
Roman Gushchin23067152018-06-07 17:06:22 -070043}
44
Johannes Weiner3e32cb22014-12-10 15:42:31 -080045/**
46 * page_counter_cancel - take pages out of the local counter
47 * @counter: counter
48 * @nr_pages: number of pages to cancel
Johannes Weiner3e32cb22014-12-10 15:42:31 -080049 */
Johannes Weiner64f21992014-12-10 15:42:45 -080050void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -080051{
52 long new;
53
Roman Gushchinbbec2e12018-06-07 17:06:18 -070054 new = atomic_long_sub_return(nr_pages, &counter->usage);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080055 /* More uncharges than charges? */
Johannes Weiner9317d0f2021-04-29 22:57:04 -070056 if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
57 new, nr_pages)) {
58 new = 0;
59 atomic_long_set(&counter->usage, new);
60 }
61 propagate_protected_usage(counter, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080062}
63
64/**
65 * page_counter_charge - hierarchically charge pages
66 * @counter: counter
67 * @nr_pages: number of pages to charge
68 *
69 * NOTE: This does not consider any configured counter limits.
70 */
71void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
72{
73 struct page_counter *c;
74
75 for (c = counter; c; c = c->parent) {
76 long new;
77
Roman Gushchinbbec2e12018-06-07 17:06:18 -070078 new = atomic_long_add_return(nr_pages, &c->usage);
Michal Koutnýa6f23d142020-08-06 23:22:18 -070079 propagate_protected_usage(c, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080080 /*
81 * This is indeed racy, but we can live with some
82 * inaccuracy in the watermark.
83 */
Qian Cai6e4bd502020-08-14 17:31:34 -070084 if (new > READ_ONCE(c->watermark))
85 WRITE_ONCE(c->watermark, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080086 }
87}
88
89/**
90 * page_counter_try_charge - try to hierarchically charge pages
91 * @counter: counter
92 * @nr_pages: number of pages to charge
93 * @fail: points first counter to hit its limit, if any
94 *
Johannes Weiner6071ca52015-11-05 18:50:26 -080095 * Returns %true on success, or %false and @fail if the counter or one
96 * of its ancestors has hit its configured limit.
Johannes Weiner3e32cb22014-12-10 15:42:31 -080097 */
Johannes Weiner6071ca52015-11-05 18:50:26 -080098bool page_counter_try_charge(struct page_counter *counter,
99 unsigned long nr_pages,
100 struct page_counter **fail)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800101{
102 struct page_counter *c;
103
104 for (c = counter; c; c = c->parent) {
105 long new;
106 /*
107 * Charge speculatively to avoid an expensive CAS. If
108 * a bigger charge fails, it might falsely lock out a
109 * racing smaller charge and send it into reclaim
110 * early, but the error is limited to the difference
111 * between the two sizes, which is less than 2M/4M in
112 * case of a THP locking out a regular page charge.
113 *
114 * The atomic_long_add_return() implies a full memory
115 * barrier between incrementing the count and reading
Miaohe Lind4370242020-10-13 16:53:02 -0700116 * the limit. When racing with page_counter_set_max(),
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800117 * we either see the new limit or the setter sees the
118 * counter has changed and retries.
119 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700120 new = atomic_long_add_return(nr_pages, &c->usage);
121 if (new > c->max) {
122 atomic_long_sub(nr_pages, &c->usage);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800123 /*
124 * This is racy, but we can live with some
Qian Cai6e4bd502020-08-14 17:31:34 -0700125 * inaccuracy in the failcnt which is only used
126 * to report stats.
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800127 */
Qian Cai6e4bd502020-08-14 17:31:34 -0700128 data_race(c->failcnt++);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800129 *fail = c;
130 goto failed;
131 }
Michal Koutnýa6f23d142020-08-06 23:22:18 -0700132 propagate_protected_usage(c, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800133 /*
134 * Just like with failcnt, we can live with some
135 * inaccuracy in the watermark.
136 */
Qian Cai6e4bd502020-08-14 17:31:34 -0700137 if (new > READ_ONCE(c->watermark))
138 WRITE_ONCE(c->watermark, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800139 }
Johannes Weiner6071ca52015-11-05 18:50:26 -0800140 return true;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800141
142failed:
143 for (c = counter; c != *fail; c = c->parent)
144 page_counter_cancel(c, nr_pages);
145
Johannes Weiner6071ca52015-11-05 18:50:26 -0800146 return false;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800147}
148
149/**
150 * page_counter_uncharge - hierarchically uncharge pages
151 * @counter: counter
152 * @nr_pages: number of pages to uncharge
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800153 */
Johannes Weiner64f21992014-12-10 15:42:45 -0800154void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800155{
156 struct page_counter *c;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800157
Johannes Weiner64f21992014-12-10 15:42:45 -0800158 for (c = counter; c; c = c->parent)
159 page_counter_cancel(c, nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800160}
161
162/**
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700163 * page_counter_set_max - set the maximum number of pages allowed
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800164 * @counter: counter
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700165 * @nr_pages: limit to set
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800166 *
167 * Returns 0 on success, -EBUSY if the current number of pages on the
168 * counter already exceeds the specified limit.
169 *
170 * The caller must serialize invocations on the same counter.
171 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700172int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800173{
174 for (;;) {
175 unsigned long old;
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700176 long usage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800177
178 /*
179 * Update the limit while making sure that it's not
180 * below the concurrently-changing counter value.
181 *
182 * The xchg implies two full memory barriers before
183 * and after, so the read-swap-read is ordered and
184 * ensures coherency with page_counter_try_charge():
185 * that function modifies the count before checking
186 * the limit, so if it sees the old limit, we see the
187 * modified counter and retry.
188 */
Hui Su13064782020-12-14 19:06:58 -0800189 usage = page_counter_read(counter);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800190
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700191 if (usage > nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800192 return -EBUSY;
193
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700194 old = xchg(&counter->max, nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800195
Hui Su13064782020-12-14 19:06:58 -0800196 if (page_counter_read(counter) <= usage)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800197 return 0;
198
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700199 counter->max = old;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800200 cond_resched();
201 }
202}
203
204/**
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700205 * page_counter_set_min - set the amount of protected memory
206 * @counter: counter
207 * @nr_pages: value to set
208 *
209 * The caller must serialize invocations on the same counter.
210 */
211void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
212{
213 struct page_counter *c;
214
Chris Downc3d53202020-04-01 21:07:27 -0700215 WRITE_ONCE(counter->min, nr_pages);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700216
217 for (c = counter; c; c = c->parent)
218 propagate_protected_usage(c, atomic_long_read(&c->usage));
219}
220
221/**
Roman Gushchin23067152018-06-07 17:06:22 -0700222 * page_counter_set_low - set the amount of protected memory
223 * @counter: counter
224 * @nr_pages: value to set
225 *
226 * The caller must serialize invocations on the same counter.
227 */
228void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
229{
230 struct page_counter *c;
231
Chris Downf86b8102020-04-01 21:07:24 -0700232 WRITE_ONCE(counter->low, nr_pages);
Roman Gushchin23067152018-06-07 17:06:22 -0700233
234 for (c = counter; c; c = c->parent)
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700235 propagate_protected_usage(c, atomic_long_read(&c->usage));
Roman Gushchin23067152018-06-07 17:06:22 -0700236}
237
238/**
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800239 * page_counter_memparse - memparse() for page counter limits
240 * @buf: string to parse
Johannes Weiner650c5e52015-02-11 15:26:03 -0800241 * @max: string meaning maximum possible value
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800242 * @nr_pages: returns the result in number of pages
243 *
244 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
245 * limited to %PAGE_COUNTER_MAX.
246 */
Johannes Weiner650c5e52015-02-11 15:26:03 -0800247int page_counter_memparse(const char *buf, const char *max,
248 unsigned long *nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800249{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800250 char *end;
251 u64 bytes;
252
Johannes Weiner650c5e52015-02-11 15:26:03 -0800253 if (!strcmp(buf, max)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800254 *nr_pages = PAGE_COUNTER_MAX;
255 return 0;
256 }
257
258 bytes = memparse(buf, &end);
259 if (*end != '\0')
260 return -EINVAL;
261
262 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
263
264 return 0;
265}