blob: c56db2d5e15924ee8526d46905932b023f482ddc [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002/*
3 * Lockless hierarchical page accounting & limiting
4 *
5 * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
6 */
7
8#include <linux/page_counter.h>
9#include <linux/atomic.h>
10#include <linux/kernel.h>
11#include <linux/string.h>
12#include <linux/sched.h>
13#include <linux/bug.h>
14#include <asm/page.h>
15
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070016static void propagate_protected_usage(struct page_counter *c,
17 unsigned long usage)
Roman Gushchin23067152018-06-07 17:06:22 -070018{
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070019 unsigned long protected, old_protected;
Chris Downc3d53202020-04-01 21:07:27 -070020 unsigned long low, min;
Roman Gushchin23067152018-06-07 17:06:22 -070021 long delta;
22
23 if (!c->parent)
24 return;
25
Chris Downc3d53202020-04-01 21:07:27 -070026 min = READ_ONCE(c->min);
27 if (min || atomic_long_read(&c->min_usage)) {
28 protected = min(usage, min);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070029 old_protected = atomic_long_xchg(&c->min_usage, protected);
30 delta = protected - old_protected;
31 if (delta)
32 atomic_long_add(delta, &c->parent->children_min_usage);
33 }
Roman Gushchin23067152018-06-07 17:06:22 -070034
Chris Downf86b8102020-04-01 21:07:24 -070035 low = READ_ONCE(c->low);
36 if (low || atomic_long_read(&c->low_usage)) {
37 protected = min(usage, low);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070038 old_protected = atomic_long_xchg(&c->low_usage, protected);
39 delta = protected - old_protected;
40 if (delta)
41 atomic_long_add(delta, &c->parent->children_low_usage);
42 }
Roman Gushchin23067152018-06-07 17:06:22 -070043}
44
Johannes Weiner3e32cb22014-12-10 15:42:31 -080045/**
46 * page_counter_cancel - take pages out of the local counter
47 * @counter: counter
48 * @nr_pages: number of pages to cancel
Johannes Weiner3e32cb22014-12-10 15:42:31 -080049 */
Johannes Weiner64f21992014-12-10 15:42:45 -080050void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -080051{
52 long new;
53
Roman Gushchinbbec2e12018-06-07 17:06:18 -070054 new = atomic_long_sub_return(nr_pages, &counter->usage);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070055 propagate_protected_usage(counter, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080056 /* More uncharges than charges? */
57 WARN_ON_ONCE(new < 0);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080058}
59
60/**
61 * page_counter_charge - hierarchically charge pages
62 * @counter: counter
63 * @nr_pages: number of pages to charge
64 *
65 * NOTE: This does not consider any configured counter limits.
66 */
67void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
68{
69 struct page_counter *c;
70
71 for (c = counter; c; c = c->parent) {
72 long new;
73
Roman Gushchinbbec2e12018-06-07 17:06:18 -070074 new = atomic_long_add_return(nr_pages, &c->usage);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -070075 propagate_protected_usage(counter, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -080076 /*
77 * This is indeed racy, but we can live with some
78 * inaccuracy in the watermark.
79 */
80 if (new > c->watermark)
81 c->watermark = new;
82 }
83}
84
85/**
86 * page_counter_try_charge - try to hierarchically charge pages
87 * @counter: counter
88 * @nr_pages: number of pages to charge
89 * @fail: points first counter to hit its limit, if any
90 *
Johannes Weiner6071ca52015-11-05 18:50:26 -080091 * Returns %true on success, or %false and @fail if the counter or one
92 * of its ancestors has hit its configured limit.
Johannes Weiner3e32cb22014-12-10 15:42:31 -080093 */
Johannes Weiner6071ca52015-11-05 18:50:26 -080094bool page_counter_try_charge(struct page_counter *counter,
95 unsigned long nr_pages,
96 struct page_counter **fail)
Johannes Weiner3e32cb22014-12-10 15:42:31 -080097{
98 struct page_counter *c;
99
100 for (c = counter; c; c = c->parent) {
101 long new;
102 /*
103 * Charge speculatively to avoid an expensive CAS. If
104 * a bigger charge fails, it might falsely lock out a
105 * racing smaller charge and send it into reclaim
106 * early, but the error is limited to the difference
107 * between the two sizes, which is less than 2M/4M in
108 * case of a THP locking out a regular page charge.
109 *
110 * The atomic_long_add_return() implies a full memory
111 * barrier between incrementing the count and reading
112 * the limit. When racing with page_counter_limit(),
113 * we either see the new limit or the setter sees the
114 * counter has changed and retries.
115 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700116 new = atomic_long_add_return(nr_pages, &c->usage);
117 if (new > c->max) {
118 atomic_long_sub(nr_pages, &c->usage);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700119 propagate_protected_usage(counter, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800120 /*
121 * This is racy, but we can live with some
122 * inaccuracy in the failcnt.
123 */
124 c->failcnt++;
125 *fail = c;
126 goto failed;
127 }
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700128 propagate_protected_usage(counter, new);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800129 /*
130 * Just like with failcnt, we can live with some
131 * inaccuracy in the watermark.
132 */
133 if (new > c->watermark)
134 c->watermark = new;
135 }
Johannes Weiner6071ca52015-11-05 18:50:26 -0800136 return true;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800137
138failed:
139 for (c = counter; c != *fail; c = c->parent)
140 page_counter_cancel(c, nr_pages);
141
Johannes Weiner6071ca52015-11-05 18:50:26 -0800142 return false;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800143}
144
145/**
146 * page_counter_uncharge - hierarchically uncharge pages
147 * @counter: counter
148 * @nr_pages: number of pages to uncharge
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800149 */
Johannes Weiner64f21992014-12-10 15:42:45 -0800150void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800151{
152 struct page_counter *c;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800153
Johannes Weiner64f21992014-12-10 15:42:45 -0800154 for (c = counter; c; c = c->parent)
155 page_counter_cancel(c, nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800156}
157
158/**
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700159 * page_counter_set_max - set the maximum number of pages allowed
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800160 * @counter: counter
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700161 * @nr_pages: limit to set
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800162 *
163 * Returns 0 on success, -EBUSY if the current number of pages on the
164 * counter already exceeds the specified limit.
165 *
166 * The caller must serialize invocations on the same counter.
167 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700168int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800169{
170 for (;;) {
171 unsigned long old;
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700172 long usage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800173
174 /*
175 * Update the limit while making sure that it's not
176 * below the concurrently-changing counter value.
177 *
178 * The xchg implies two full memory barriers before
179 * and after, so the read-swap-read is ordered and
180 * ensures coherency with page_counter_try_charge():
181 * that function modifies the count before checking
182 * the limit, so if it sees the old limit, we see the
183 * modified counter and retry.
184 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700185 usage = atomic_long_read(&counter->usage);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800186
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700187 if (usage > nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800188 return -EBUSY;
189
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700190 old = xchg(&counter->max, nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800191
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700192 if (atomic_long_read(&counter->usage) <= usage)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800193 return 0;
194
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700195 counter->max = old;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800196 cond_resched();
197 }
198}
199
200/**
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700201 * page_counter_set_min - set the amount of protected memory
202 * @counter: counter
203 * @nr_pages: value to set
204 *
205 * The caller must serialize invocations on the same counter.
206 */
207void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
208{
209 struct page_counter *c;
210
Chris Downc3d53202020-04-01 21:07:27 -0700211 WRITE_ONCE(counter->min, nr_pages);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700212
213 for (c = counter; c; c = c->parent)
214 propagate_protected_usage(c, atomic_long_read(&c->usage));
215}
216
217/**
Roman Gushchin23067152018-06-07 17:06:22 -0700218 * page_counter_set_low - set the amount of protected memory
219 * @counter: counter
220 * @nr_pages: value to set
221 *
222 * The caller must serialize invocations on the same counter.
223 */
224void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
225{
226 struct page_counter *c;
227
Chris Downf86b8102020-04-01 21:07:24 -0700228 WRITE_ONCE(counter->low, nr_pages);
Roman Gushchin23067152018-06-07 17:06:22 -0700229
230 for (c = counter; c; c = c->parent)
Roman Gushchinbf8d5d52018-06-07 17:07:46 -0700231 propagate_protected_usage(c, atomic_long_read(&c->usage));
Roman Gushchin23067152018-06-07 17:06:22 -0700232}
233
234/**
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800235 * page_counter_memparse - memparse() for page counter limits
236 * @buf: string to parse
Johannes Weiner650c5e52015-02-11 15:26:03 -0800237 * @max: string meaning maximum possible value
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800238 * @nr_pages: returns the result in number of pages
239 *
240 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
241 * limited to %PAGE_COUNTER_MAX.
242 */
Johannes Weiner650c5e52015-02-11 15:26:03 -0800243int page_counter_memparse(const char *buf, const char *max,
244 unsigned long *nr_pages)
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800245{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800246 char *end;
247 u64 bytes;
248
Johannes Weiner650c5e52015-02-11 15:26:03 -0800249 if (!strcmp(buf, max)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800250 *nr_pages = PAGE_COUNTER_MAX;
251 return 0;
252 }
253
254 bytes = memparse(buf, &end);
255 if (*end != '\0')
256 return -EINVAL;
257
258 *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
259
260 return 0;
261}