blob: e434b05416c680b09fdc67065026c9fe292b92f3 [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +01006 * Cgroup v2
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9 *
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070010 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 */
19
20#include <linux/cgroup.h>
Johannes Weiner71f87bee2014-12-10 15:42:34 -080021#include <linux/page_counter.h>
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070022#include <linux/slab.h>
23#include <linux/hugetlb.h>
24#include <linux/hugetlb_cgroup.h>
25
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010026enum hugetlb_memory_event {
27 HUGETLB_MAX,
28 HUGETLB_NR_MEMORY_EVENTS,
29};
30
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070031struct hugetlb_cgroup {
32 struct cgroup_subsys_state css;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010033
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070034 /*
35 * the counter to account for hugepages from hugetlb.
36 */
Johannes Weiner71f87bee2014-12-10 15:42:34 -080037 struct page_counter hugepage[HUGE_MAX_HSTATE];
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010038
39 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
40 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
41
42 /* Handle for "hugetlb.events" */
43 struct cgroup_file events_file[HUGE_MAX_HSTATE];
44
45 /* Handle for "hugetlb.events.local" */
46 struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070047};
48
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -070049#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
50#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
51#define MEMFILE_ATTR(val) ((val) & 0xffff)
52
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010053#define hugetlb_cgroup_from_counter(counter, idx) \
54 container_of(counter, struct hugetlb_cgroup, hugepage[idx])
55
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070056static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
57
58static inline
59struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
60{
Tejun Heoa7c6d552013-08-08 20:11:23 -040061 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070062}
63
64static inline
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070065struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
66{
Tejun Heo073219e2014-02-08 10:36:58 -050067 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070068}
69
70static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
71{
72 return (h_cg == root_h_cgroup);
73}
74
Tejun Heo3f798512013-08-08 20:11:22 -040075static inline struct hugetlb_cgroup *
76parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070077{
Tejun Heo5c9d5352014-05-16 13:22:48 -040078 return hugetlb_cgroup_from_css(h_cg->css.parent);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070079}
80
Tejun Heo3f798512013-08-08 20:11:22 -040081static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070082{
83 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070084
85 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
Johannes Weiner71f87bee2014-12-10 15:42:34 -080086 if (page_counter_read(&h_cg->hugepage[idx]))
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070087 return true;
88 }
89 return false;
90}
91
David Rientjes297880f2016-05-20 16:57:50 -070092static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
93 struct hugetlb_cgroup *parent_h_cgroup)
94{
95 int idx;
96
97 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
98 struct page_counter *counter = &h_cgroup->hugepage[idx];
99 struct page_counter *parent = NULL;
100 unsigned long limit;
101 int ret;
102
103 if (parent_h_cgroup)
104 parent = &parent_h_cgroup->hugepage[idx];
105 page_counter_init(counter, parent);
106
107 limit = round_down(PAGE_COUNTER_MAX,
108 1 << huge_page_order(&hstates[idx]));
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700109 ret = page_counter_set_max(counter, limit);
David Rientjes297880f2016-05-20 16:57:50 -0700110 VM_BUG_ON(ret);
111 }
112}
113
Tejun Heoeb954192013-08-08 20:11:23 -0400114static struct cgroup_subsys_state *
115hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700116{
Tejun Heoeb954192013-08-08 20:11:23 -0400117 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
118 struct hugetlb_cgroup *h_cgroup;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700119
120 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
121 if (!h_cgroup)
122 return ERR_PTR(-ENOMEM);
123
David Rientjes297880f2016-05-20 16:57:50 -0700124 if (!parent_h_cgroup)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700125 root_h_cgroup = h_cgroup;
David Rientjes297880f2016-05-20 16:57:50 -0700126
127 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700128 return &h_cgroup->css;
129}
130
Tejun Heoeb954192013-08-08 20:11:23 -0400131static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700132{
133 struct hugetlb_cgroup *h_cgroup;
134
Tejun Heoeb954192013-08-08 20:11:23 -0400135 h_cgroup = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700136 kfree(h_cgroup);
137}
138
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700139
140/*
141 * Should be called with hugetlb_lock held.
142 * Since we are holding hugetlb_lock, pages cannot get moved from
143 * active list or uncharged from the cgroup, So no need to get
144 * page reference and test for page active here. This function
145 * cannot fail.
146 */
Tejun Heo3f798512013-08-08 20:11:22 -0400147static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700148 struct page *page)
149{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800150 unsigned int nr_pages;
151 struct page_counter *counter;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700152 struct hugetlb_cgroup *page_hcg;
Tejun Heo3f798512013-08-08 20:11:22 -0400153 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700154
155 page_hcg = hugetlb_cgroup_from_page(page);
156 /*
157 * We can have pages in active list without any cgroup
158 * ie, hugepage with less than 3 pages. We can safely
159 * ignore those pages.
160 */
161 if (!page_hcg || page_hcg != h_cg)
162 goto out;
163
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -0700164 nr_pages = compound_nr(page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700165 if (!parent) {
166 parent = root_h_cgroup;
167 /* root has no limit */
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800168 page_counter_charge(&parent->hugepage[idx], nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700169 }
170 counter = &h_cg->hugepage[idx];
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800171 /* Take the pages off the local counter */
172 page_counter_cancel(counter, nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700173
174 set_hugetlb_cgroup(page, parent);
175out:
176 return;
177}
178
179/*
180 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
181 * the parent cgroup.
182 */
Tejun Heoeb954192013-08-08 20:11:23 -0400183static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700184{
Tejun Heoeb954192013-08-08 20:11:23 -0400185 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700186 struct hstate *h;
187 struct page *page;
Michal Hocko9d093cb2012-10-26 13:37:33 +0200188 int idx = 0;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700189
190 do {
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700191 for_each_hstate(h) {
192 spin_lock(&hugetlb_lock);
193 list_for_each_entry(page, &h->hugepage_activelist, lru)
Tejun Heo3f798512013-08-08 20:11:22 -0400194 hugetlb_cgroup_move_parent(idx, h_cg, page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700195
196 spin_unlock(&hugetlb_lock);
197 idx++;
198 }
199 cond_resched();
Tejun Heo3f798512013-08-08 20:11:22 -0400200 } while (hugetlb_cgroup_have_usage(h_cg));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700201}
202
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100203static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
204 enum hugetlb_memory_event event)
205{
206 atomic_long_inc(&hugetlb->events_local[idx][event]);
207 cgroup_file_notify(&hugetlb->events_local_file[idx]);
208
209 do {
210 atomic_long_inc(&hugetlb->events[idx][event]);
211 cgroup_file_notify(&hugetlb->events_file[idx]);
212 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
213 !hugetlb_cgroup_is_root(hugetlb));
214}
215
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700216int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
217 struct hugetlb_cgroup **ptr)
218{
219 int ret = 0;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800220 struct page_counter *counter;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700221 struct hugetlb_cgroup *h_cg = NULL;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700222
223 if (hugetlb_cgroup_disabled())
224 goto done;
225 /*
226 * We don't charge any cgroup if the compound page have less
227 * than 3 pages.
228 */
229 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
230 goto done;
231again:
232 rcu_read_lock();
233 h_cg = hugetlb_cgroup_from_task(current);
Roman Gushchin0362f322019-11-15 17:34:46 -0800234 if (!css_tryget(&h_cg->css)) {
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700235 rcu_read_unlock();
236 goto again;
237 }
238 rcu_read_unlock();
239
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100240 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages,
241 &counter)) {
Johannes Weiner6071ca52015-11-05 18:50:26 -0800242 ret = -ENOMEM;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100243 hugetlb_event(hugetlb_cgroup_from_counter(counter, idx), idx,
244 HUGETLB_MAX);
245 }
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700246 css_put(&h_cg->css);
247done:
248 *ptr = h_cg;
249 return ret;
250}
251
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700252/* Should be called with hugetlb_lock held */
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700253void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
254 struct hugetlb_cgroup *h_cg,
255 struct page *page)
256{
257 if (hugetlb_cgroup_disabled() || !h_cg)
258 return;
259
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700260 set_hugetlb_cgroup(page, h_cg);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700261 return;
262}
263
264/*
265 * Should be called with hugetlb_lock held
266 */
267void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
268 struct page *page)
269{
270 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700271
272 if (hugetlb_cgroup_disabled())
273 return;
Michal Hocko7ea85742014-08-29 15:18:42 -0700274 lockdep_assert_held(&hugetlb_lock);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700275 h_cg = hugetlb_cgroup_from_page(page);
276 if (unlikely(!h_cg))
277 return;
278 set_hugetlb_cgroup(page, NULL);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800279 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700280 return;
281}
282
283void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
284 struct hugetlb_cgroup *h_cg)
285{
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700286 if (hugetlb_cgroup_disabled() || !h_cg)
287 return;
288
289 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
290 return;
291
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800292 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700293 return;
294}
295
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800296enum {
297 RES_USAGE,
298 RES_LIMIT,
299 RES_MAX_USAGE,
300 RES_FAILCNT,
301};
302
Tejun Heo716f4792013-12-05 12:28:03 -0500303static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
304 struct cftype *cft)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700305{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800306 struct page_counter *counter;
Tejun Heo182446d2013-08-08 20:11:24 -0400307 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700308
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800309 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700310
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800311 switch (MEMFILE_ATTR(cft->private)) {
312 case RES_USAGE:
313 return (u64)page_counter_read(counter) * PAGE_SIZE;
314 case RES_LIMIT:
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700315 return (u64)counter->max * PAGE_SIZE;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800316 case RES_MAX_USAGE:
317 return (u64)counter->watermark * PAGE_SIZE;
318 case RES_FAILCNT:
319 return counter->failcnt;
320 default:
321 BUG();
322 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700323}
324
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100325static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
326{
327 int idx;
328 u64 val;
329 struct cftype *cft = seq_cft(seq);
330 unsigned long limit;
331 struct page_counter *counter;
332 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
333
334 idx = MEMFILE_IDX(cft->private);
335 counter = &h_cg->hugepage[idx];
336
337 limit = round_down(PAGE_COUNTER_MAX,
338 1 << huge_page_order(&hstates[idx]));
339
340 switch (MEMFILE_ATTR(cft->private)) {
341 case RES_USAGE:
342 val = (u64)page_counter_read(counter);
343 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
344 break;
345 case RES_LIMIT:
346 val = (u64)counter->max;
347 if (val == limit)
348 seq_puts(seq, "max\n");
349 else
350 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
351 break;
352 default:
353 BUG();
354 }
355
356 return 0;
357}
358
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800359static DEFINE_MUTEX(hugetlb_limit_mutex);
360
Tejun Heo451af502014-05-13 12:16:21 -0400361static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100362 char *buf, size_t nbytes, loff_t off,
363 const char *max)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700364{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800365 int ret, idx;
366 unsigned long nr_pages;
Tejun Heo451af502014-05-13 12:16:21 -0400367 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700368
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800369 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
370 return -EINVAL;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700371
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800372 buf = strstrip(buf);
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100373 ret = page_counter_memparse(buf, max, &nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800374 if (ret)
375 return ret;
376
377 idx = MEMFILE_IDX(of_cft(of)->private);
David Rientjes297880f2016-05-20 16:57:50 -0700378 nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800379
380 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700381 case RES_LIMIT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800382 mutex_lock(&hugetlb_limit_mutex);
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700383 ret = page_counter_set_max(&h_cg->hugepage[idx], nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800384 mutex_unlock(&hugetlb_limit_mutex);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700385 break;
386 default:
387 ret = -EINVAL;
388 break;
389 }
Tejun Heo451af502014-05-13 12:16:21 -0400390 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700391}
392
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100393static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
394 char *buf, size_t nbytes, loff_t off)
395{
396 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
397}
398
399static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
400 char *buf, size_t nbytes, loff_t off)
401{
402 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
403}
404
Tejun Heo6770c642014-05-13 12:16:21 -0400405static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
406 char *buf, size_t nbytes, loff_t off)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700407{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800408 int ret = 0;
409 struct page_counter *counter;
Tejun Heo6770c642014-05-13 12:16:21 -0400410 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700411
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800412 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700413
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800414 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700415 case RES_MAX_USAGE:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800416 page_counter_reset_watermark(counter);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700417 break;
418 case RES_FAILCNT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800419 counter->failcnt = 0;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700420 break;
421 default:
422 ret = -EINVAL;
423 break;
424 }
Tejun Heo6770c642014-05-13 12:16:21 -0400425 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700426}
427
428static char *mem_fmt(char *buf, int size, unsigned long hsize)
429{
430 if (hsize >= (1UL << 30))
431 snprintf(buf, size, "%luGB", hsize >> 30);
432 else if (hsize >= (1UL << 20))
433 snprintf(buf, size, "%luMB", hsize >> 20);
434 else
435 snprintf(buf, size, "%luKB", hsize >> 10);
436 return buf;
437}
438
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100439static int __hugetlb_events_show(struct seq_file *seq, bool local)
440{
441 int idx;
442 long max;
443 struct cftype *cft = seq_cft(seq);
444 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
445
446 idx = MEMFILE_IDX(cft->private);
447
448 if (local)
449 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
450 else
451 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
452
453 seq_printf(seq, "max %lu\n", max);
454
455 return 0;
456}
457
458static int hugetlb_events_show(struct seq_file *seq, void *v)
459{
460 return __hugetlb_events_show(seq, false);
461}
462
463static int hugetlb_events_local_show(struct seq_file *seq, void *v)
464{
465 return __hugetlb_events_show(seq, true);
466}
467
468static void __init __hugetlb_cgroup_file_dfl_init(int idx)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700469{
470 char buf[32];
471 struct cftype *cft;
472 struct hstate *h = &hstates[idx];
473
474 /* format the size */
475 mem_fmt(buf, 32, huge_page_size(h));
476
477 /* Add the limit file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100478 cft = &h->cgroup_files_dfl[0];
479 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
480 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
481 cft->seq_show = hugetlb_cgroup_read_u64_max;
482 cft->write = hugetlb_cgroup_write_dfl;
483 cft->flags = CFTYPE_NOT_ON_ROOT;
484
485 /* Add the current usage file */
486 cft = &h->cgroup_files_dfl[1];
487 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
488 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
489 cft->seq_show = hugetlb_cgroup_read_u64_max;
490 cft->flags = CFTYPE_NOT_ON_ROOT;
491
492 /* Add the events file */
493 cft = &h->cgroup_files_dfl[2];
494 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
495 cft->private = MEMFILE_PRIVATE(idx, 0);
496 cft->seq_show = hugetlb_events_show;
497 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]),
498 cft->flags = CFTYPE_NOT_ON_ROOT;
499
500 /* Add the events.local file */
501 cft = &h->cgroup_files_dfl[3];
502 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
503 cft->private = MEMFILE_PRIVATE(idx, 0);
504 cft->seq_show = hugetlb_events_local_show;
505 cft->file_offset = offsetof(struct hugetlb_cgroup,
506 events_local_file[idx]),
507 cft->flags = CFTYPE_NOT_ON_ROOT;
508
509 /* NULL terminate the last cft */
510 cft = &h->cgroup_files_dfl[4];
511 memset(cft, 0, sizeof(*cft));
512
513 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
514 h->cgroup_files_dfl));
515}
516
517static void __init __hugetlb_cgroup_file_legacy_init(int idx)
518{
519 char buf[32];
520 struct cftype *cft;
521 struct hstate *h = &hstates[idx];
522
523 /* format the size */
524 mem_fmt(buf, 32, huge_page_size(h));
525
526 /* Add the limit file */
527 cft = &h->cgroup_files_legacy[0];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700528 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
529 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
Tejun Heo716f4792013-12-05 12:28:03 -0500530 cft->read_u64 = hugetlb_cgroup_read_u64;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100531 cft->write = hugetlb_cgroup_write_legacy;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700532
533 /* Add the usage file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100534 cft = &h->cgroup_files_legacy[1];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700535 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
536 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
Tejun Heo716f4792013-12-05 12:28:03 -0500537 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700538
539 /* Add the MAX usage file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100540 cft = &h->cgroup_files_legacy[2];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700541 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
542 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
Tejun Heo6770c642014-05-13 12:16:21 -0400543 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500544 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700545
546 /* Add the failcntfile */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100547 cft = &h->cgroup_files_legacy[3];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700548 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
549 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
Tejun Heo6770c642014-05-13 12:16:21 -0400550 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500551 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700552
553 /* NULL terminate the last cft */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100554 cft = &h->cgroup_files_legacy[4];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700555 memset(cft, 0, sizeof(*cft));
556
Tejun Heo2cf669a2014-07-15 11:05:09 -0400557 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100558 h->cgroup_files_legacy));
559}
560
561static void __init __hugetlb_cgroup_file_init(int idx)
562{
563 __hugetlb_cgroup_file_dfl_init(idx);
564 __hugetlb_cgroup_file_legacy_init(idx);
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800565}
566
567void __init hugetlb_cgroup_file_init(void)
568{
569 struct hstate *h;
570
571 for_each_hstate(h) {
572 /*
573 * Add cgroup control files only if the huge page consists
574 * of more than two normal pages. This is because we use
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800575 * page[2].private for storing cgroup details.
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800576 */
577 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
578 __hugetlb_cgroup_file_init(hstate_index(h));
579 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700580}
581
Aneesh Kumar K.V75754682012-07-31 16:42:36 -0700582/*
583 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
584 * when we migrate hugepages
585 */
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700586void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
587{
588 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700589 struct hstate *h = page_hstate(oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700590
591 if (hugetlb_cgroup_disabled())
592 return;
593
Sasha Levin309381fea2014-01-23 15:52:54 -0800594 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700595 spin_lock(&hugetlb_lock);
596 h_cg = hugetlb_cgroup_from_page(oldhpage);
597 set_hugetlb_cgroup(oldhpage, NULL);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700598
599 /* move the h_cg details to new cgroup */
600 set_hugetlb_cgroup(newhpage, h_cg);
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700601 list_move(&newhpage->lru, &h->hugepage_activelist);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700602 spin_unlock(&hugetlb_lock);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700603 return;
604}
605
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100606static struct cftype hugetlb_files[] = {
607 {} /* terminate */
608};
609
Tejun Heo073219e2014-02-08 10:36:58 -0500610struct cgroup_subsys hugetlb_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -0800611 .css_alloc = hugetlb_cgroup_css_alloc,
612 .css_offline = hugetlb_cgroup_css_offline,
613 .css_free = hugetlb_cgroup_css_free,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100614 .dfl_cftypes = hugetlb_files,
615 .legacy_cftypes = hugetlb_files,
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700616};