blob: 5280bcf459af15b7d6c99d9b99422812b25a1e30 [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +01006 * Cgroup v2
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9 *
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070010 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 */
19
20#include <linux/cgroup.h>
Johannes Weiner71f87bee2014-12-10 15:42:34 -080021#include <linux/page_counter.h>
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070022#include <linux/slab.h>
23#include <linux/hugetlb.h>
24#include <linux/hugetlb_cgroup.h>
25
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010026enum hugetlb_memory_event {
27 HUGETLB_MAX,
28 HUGETLB_NR_MEMORY_EVENTS,
29};
30
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070031struct hugetlb_cgroup {
32 struct cgroup_subsys_state css;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010033
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070034 /*
35 * the counter to account for hugepages from hugetlb.
36 */
Johannes Weiner71f87bee2014-12-10 15:42:34 -080037 struct page_counter hugepage[HUGE_MAX_HSTATE];
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010038
39 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
40 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
41
42 /* Handle for "hugetlb.events" */
43 struct cgroup_file events_file[HUGE_MAX_HSTATE];
44
45 /* Handle for "hugetlb.events.local" */
46 struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070047};
48
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -070049#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
50#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
51#define MEMFILE_ATTR(val) ((val) & 0xffff)
52
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +010053#define hugetlb_cgroup_from_counter(counter, idx) \
54 container_of(counter, struct hugetlb_cgroup, hugepage[idx])
55
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070056static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
57
58static inline
59struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
60{
Tejun Heoa7c6d552013-08-08 20:11:23 -040061 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070062}
63
64static inline
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070065struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
66{
Tejun Heo073219e2014-02-08 10:36:58 -050067 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070068}
69
70static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
71{
72 return (h_cg == root_h_cgroup);
73}
74
Tejun Heo3f798512013-08-08 20:11:22 -040075static inline struct hugetlb_cgroup *
76parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070077{
Tejun Heo5c9d5352014-05-16 13:22:48 -040078 return hugetlb_cgroup_from_css(h_cg->css.parent);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070079}
80
Tejun Heo3f798512013-08-08 20:11:22 -040081static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070082{
83 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070084
85 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
Johannes Weiner71f87bee2014-12-10 15:42:34 -080086 if (page_counter_read(&h_cg->hugepage[idx]))
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070087 return true;
88 }
89 return false;
90}
91
David Rientjes297880f2016-05-20 16:57:50 -070092static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
93 struct hugetlb_cgroup *parent_h_cgroup)
94{
95 int idx;
96
97 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
98 struct page_counter *counter = &h_cgroup->hugepage[idx];
99 struct page_counter *parent = NULL;
100 unsigned long limit;
101 int ret;
102
103 if (parent_h_cgroup)
104 parent = &parent_h_cgroup->hugepage[idx];
105 page_counter_init(counter, parent);
106
107 limit = round_down(PAGE_COUNTER_MAX,
108 1 << huge_page_order(&hstates[idx]));
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700109 ret = page_counter_set_max(counter, limit);
David Rientjes297880f2016-05-20 16:57:50 -0700110 VM_BUG_ON(ret);
111 }
112}
113
Tejun Heoeb954192013-08-08 20:11:23 -0400114static struct cgroup_subsys_state *
115hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700116{
Tejun Heoeb954192013-08-08 20:11:23 -0400117 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
118 struct hugetlb_cgroup *h_cgroup;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700119
120 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
121 if (!h_cgroup)
122 return ERR_PTR(-ENOMEM);
123
David Rientjes297880f2016-05-20 16:57:50 -0700124 if (!parent_h_cgroup)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700125 root_h_cgroup = h_cgroup;
David Rientjes297880f2016-05-20 16:57:50 -0700126
127 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700128 return &h_cgroup->css;
129}
130
Tejun Heoeb954192013-08-08 20:11:23 -0400131static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700132{
133 struct hugetlb_cgroup *h_cgroup;
134
Tejun Heoeb954192013-08-08 20:11:23 -0400135 h_cgroup = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700136 kfree(h_cgroup);
137}
138
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700139
140/*
141 * Should be called with hugetlb_lock held.
142 * Since we are holding hugetlb_lock, pages cannot get moved from
143 * active list or uncharged from the cgroup, So no need to get
144 * page reference and test for page active here. This function
145 * cannot fail.
146 */
Tejun Heo3f798512013-08-08 20:11:22 -0400147static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700148 struct page *page)
149{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800150 unsigned int nr_pages;
151 struct page_counter *counter;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700152 struct hugetlb_cgroup *page_hcg;
Tejun Heo3f798512013-08-08 20:11:22 -0400153 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700154
155 page_hcg = hugetlb_cgroup_from_page(page);
156 /*
157 * We can have pages in active list without any cgroup
158 * ie, hugepage with less than 3 pages. We can safely
159 * ignore those pages.
160 */
161 if (!page_hcg || page_hcg != h_cg)
162 goto out;
163
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -0700164 nr_pages = compound_nr(page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700165 if (!parent) {
166 parent = root_h_cgroup;
167 /* root has no limit */
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800168 page_counter_charge(&parent->hugepage[idx], nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700169 }
170 counter = &h_cg->hugepage[idx];
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800171 /* Take the pages off the local counter */
172 page_counter_cancel(counter, nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700173
174 set_hugetlb_cgroup(page, parent);
175out:
176 return;
177}
178
179/*
180 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
181 * the parent cgroup.
182 */
Tejun Heoeb954192013-08-08 20:11:23 -0400183static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700184{
Tejun Heoeb954192013-08-08 20:11:23 -0400185 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700186 struct hstate *h;
187 struct page *page;
Michal Hocko9d093cb2012-10-26 13:37:33 +0200188 int idx = 0;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700189
190 do {
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700191 for_each_hstate(h) {
192 spin_lock(&hugetlb_lock);
193 list_for_each_entry(page, &h->hugepage_activelist, lru)
Tejun Heo3f798512013-08-08 20:11:22 -0400194 hugetlb_cgroup_move_parent(idx, h_cg, page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700195
196 spin_unlock(&hugetlb_lock);
197 idx++;
198 }
199 cond_resched();
Tejun Heo3f798512013-08-08 20:11:22 -0400200 } while (hugetlb_cgroup_have_usage(h_cg));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700201}
202
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100203static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
204 enum hugetlb_memory_event event)
205{
206 atomic_long_inc(&hugetlb->events_local[idx][event]);
207 cgroup_file_notify(&hugetlb->events_local_file[idx]);
208
209 do {
210 atomic_long_inc(&hugetlb->events[idx][event]);
211 cgroup_file_notify(&hugetlb->events_file[idx]);
212 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
213 !hugetlb_cgroup_is_root(hugetlb));
214}
215
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700216int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
217 struct hugetlb_cgroup **ptr)
218{
219 int ret = 0;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800220 struct page_counter *counter;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700221 struct hugetlb_cgroup *h_cg = NULL;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700222
223 if (hugetlb_cgroup_disabled())
224 goto done;
225 /*
226 * We don't charge any cgroup if the compound page have less
227 * than 3 pages.
228 */
229 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
230 goto done;
231again:
232 rcu_read_lock();
233 h_cg = hugetlb_cgroup_from_task(current);
Roman Gushchin0362f322019-11-15 17:34:46 -0800234 if (!css_tryget(&h_cg->css)) {
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700235 rcu_read_unlock();
236 goto again;
237 }
238 rcu_read_unlock();
239
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100240 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages,
241 &counter)) {
Johannes Weiner6071ca52015-11-05 18:50:26 -0800242 ret = -ENOMEM;
Mina Almasry726b7bb2020-03-28 19:17:22 -0700243 hugetlb_event(h_cg, idx, HUGETLB_MAX);
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100244 }
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700245 css_put(&h_cg->css);
246done:
247 *ptr = h_cg;
248 return ret;
249}
250
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700251/* Should be called with hugetlb_lock held */
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700252void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
253 struct hugetlb_cgroup *h_cg,
254 struct page *page)
255{
256 if (hugetlb_cgroup_disabled() || !h_cg)
257 return;
258
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700259 set_hugetlb_cgroup(page, h_cg);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700260 return;
261}
262
263/*
264 * Should be called with hugetlb_lock held
265 */
266void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
267 struct page *page)
268{
269 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700270
271 if (hugetlb_cgroup_disabled())
272 return;
Michal Hocko7ea85742014-08-29 15:18:42 -0700273 lockdep_assert_held(&hugetlb_lock);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700274 h_cg = hugetlb_cgroup_from_page(page);
275 if (unlikely(!h_cg))
276 return;
277 set_hugetlb_cgroup(page, NULL);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800278 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700279 return;
280}
281
282void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
283 struct hugetlb_cgroup *h_cg)
284{
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700285 if (hugetlb_cgroup_disabled() || !h_cg)
286 return;
287
288 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
289 return;
290
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800291 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700292 return;
293}
294
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800295enum {
296 RES_USAGE,
297 RES_LIMIT,
298 RES_MAX_USAGE,
299 RES_FAILCNT,
300};
301
Tejun Heo716f4792013-12-05 12:28:03 -0500302static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
303 struct cftype *cft)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700304{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800305 struct page_counter *counter;
Tejun Heo182446d2013-08-08 20:11:24 -0400306 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700307
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800308 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700309
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800310 switch (MEMFILE_ATTR(cft->private)) {
311 case RES_USAGE:
312 return (u64)page_counter_read(counter) * PAGE_SIZE;
313 case RES_LIMIT:
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700314 return (u64)counter->max * PAGE_SIZE;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800315 case RES_MAX_USAGE:
316 return (u64)counter->watermark * PAGE_SIZE;
317 case RES_FAILCNT:
318 return counter->failcnt;
319 default:
320 BUG();
321 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700322}
323
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100324static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
325{
326 int idx;
327 u64 val;
328 struct cftype *cft = seq_cft(seq);
329 unsigned long limit;
330 struct page_counter *counter;
331 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
332
333 idx = MEMFILE_IDX(cft->private);
334 counter = &h_cg->hugepage[idx];
335
336 limit = round_down(PAGE_COUNTER_MAX,
337 1 << huge_page_order(&hstates[idx]));
338
339 switch (MEMFILE_ATTR(cft->private)) {
340 case RES_USAGE:
341 val = (u64)page_counter_read(counter);
342 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
343 break;
344 case RES_LIMIT:
345 val = (u64)counter->max;
346 if (val == limit)
347 seq_puts(seq, "max\n");
348 else
349 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
350 break;
351 default:
352 BUG();
353 }
354
355 return 0;
356}
357
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800358static DEFINE_MUTEX(hugetlb_limit_mutex);
359
Tejun Heo451af502014-05-13 12:16:21 -0400360static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100361 char *buf, size_t nbytes, loff_t off,
362 const char *max)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700363{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800364 int ret, idx;
365 unsigned long nr_pages;
Tejun Heo451af502014-05-13 12:16:21 -0400366 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700367
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800368 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
369 return -EINVAL;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700370
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800371 buf = strstrip(buf);
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100372 ret = page_counter_memparse(buf, max, &nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800373 if (ret)
374 return ret;
375
376 idx = MEMFILE_IDX(of_cft(of)->private);
David Rientjes297880f2016-05-20 16:57:50 -0700377 nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800378
379 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700380 case RES_LIMIT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800381 mutex_lock(&hugetlb_limit_mutex);
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700382 ret = page_counter_set_max(&h_cg->hugepage[idx], nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800383 mutex_unlock(&hugetlb_limit_mutex);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700384 break;
385 default:
386 ret = -EINVAL;
387 break;
388 }
Tejun Heo451af502014-05-13 12:16:21 -0400389 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700390}
391
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100392static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
393 char *buf, size_t nbytes, loff_t off)
394{
395 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
396}
397
398static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
399 char *buf, size_t nbytes, loff_t off)
400{
401 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
402}
403
Tejun Heo6770c642014-05-13 12:16:21 -0400404static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
405 char *buf, size_t nbytes, loff_t off)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700406{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800407 int ret = 0;
408 struct page_counter *counter;
Tejun Heo6770c642014-05-13 12:16:21 -0400409 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700410
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800411 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700412
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800413 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700414 case RES_MAX_USAGE:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800415 page_counter_reset_watermark(counter);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700416 break;
417 case RES_FAILCNT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800418 counter->failcnt = 0;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700419 break;
420 default:
421 ret = -EINVAL;
422 break;
423 }
Tejun Heo6770c642014-05-13 12:16:21 -0400424 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700425}
426
427static char *mem_fmt(char *buf, int size, unsigned long hsize)
428{
429 if (hsize >= (1UL << 30))
430 snprintf(buf, size, "%luGB", hsize >> 30);
431 else if (hsize >= (1UL << 20))
432 snprintf(buf, size, "%luMB", hsize >> 20);
433 else
434 snprintf(buf, size, "%luKB", hsize >> 10);
435 return buf;
436}
437
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100438static int __hugetlb_events_show(struct seq_file *seq, bool local)
439{
440 int idx;
441 long max;
442 struct cftype *cft = seq_cft(seq);
443 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
444
445 idx = MEMFILE_IDX(cft->private);
446
447 if (local)
448 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
449 else
450 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
451
452 seq_printf(seq, "max %lu\n", max);
453
454 return 0;
455}
456
457static int hugetlb_events_show(struct seq_file *seq, void *v)
458{
459 return __hugetlb_events_show(seq, false);
460}
461
462static int hugetlb_events_local_show(struct seq_file *seq, void *v)
463{
464 return __hugetlb_events_show(seq, true);
465}
466
467static void __init __hugetlb_cgroup_file_dfl_init(int idx)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700468{
469 char buf[32];
470 struct cftype *cft;
471 struct hstate *h = &hstates[idx];
472
473 /* format the size */
474 mem_fmt(buf, 32, huge_page_size(h));
475
476 /* Add the limit file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100477 cft = &h->cgroup_files_dfl[0];
478 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
479 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
480 cft->seq_show = hugetlb_cgroup_read_u64_max;
481 cft->write = hugetlb_cgroup_write_dfl;
482 cft->flags = CFTYPE_NOT_ON_ROOT;
483
484 /* Add the current usage file */
485 cft = &h->cgroup_files_dfl[1];
486 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
487 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
488 cft->seq_show = hugetlb_cgroup_read_u64_max;
489 cft->flags = CFTYPE_NOT_ON_ROOT;
490
491 /* Add the events file */
492 cft = &h->cgroup_files_dfl[2];
493 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
494 cft->private = MEMFILE_PRIVATE(idx, 0);
495 cft->seq_show = hugetlb_events_show;
496 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]),
497 cft->flags = CFTYPE_NOT_ON_ROOT;
498
499 /* Add the events.local file */
500 cft = &h->cgroup_files_dfl[3];
501 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
502 cft->private = MEMFILE_PRIVATE(idx, 0);
503 cft->seq_show = hugetlb_events_local_show;
504 cft->file_offset = offsetof(struct hugetlb_cgroup,
505 events_local_file[idx]),
506 cft->flags = CFTYPE_NOT_ON_ROOT;
507
508 /* NULL terminate the last cft */
509 cft = &h->cgroup_files_dfl[4];
510 memset(cft, 0, sizeof(*cft));
511
512 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
513 h->cgroup_files_dfl));
514}
515
516static void __init __hugetlb_cgroup_file_legacy_init(int idx)
517{
518 char buf[32];
519 struct cftype *cft;
520 struct hstate *h = &hstates[idx];
521
522 /* format the size */
523 mem_fmt(buf, 32, huge_page_size(h));
524
525 /* Add the limit file */
526 cft = &h->cgroup_files_legacy[0];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700527 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
528 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
Tejun Heo716f4792013-12-05 12:28:03 -0500529 cft->read_u64 = hugetlb_cgroup_read_u64;
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100530 cft->write = hugetlb_cgroup_write_legacy;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700531
532 /* Add the usage file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100533 cft = &h->cgroup_files_legacy[1];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700534 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
535 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
Tejun Heo716f4792013-12-05 12:28:03 -0500536 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700537
538 /* Add the MAX usage file */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100539 cft = &h->cgroup_files_legacy[2];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700540 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
541 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
Tejun Heo6770c642014-05-13 12:16:21 -0400542 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500543 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700544
545 /* Add the failcntfile */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100546 cft = &h->cgroup_files_legacy[3];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700547 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
548 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
Tejun Heo6770c642014-05-13 12:16:21 -0400549 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500550 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700551
552 /* NULL terminate the last cft */
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100553 cft = &h->cgroup_files_legacy[4];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700554 memset(cft, 0, sizeof(*cft));
555
Tejun Heo2cf669a2014-07-15 11:05:09 -0400556 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100557 h->cgroup_files_legacy));
558}
559
560static void __init __hugetlb_cgroup_file_init(int idx)
561{
562 __hugetlb_cgroup_file_dfl_init(idx);
563 __hugetlb_cgroup_file_legacy_init(idx);
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800564}
565
566void __init hugetlb_cgroup_file_init(void)
567{
568 struct hstate *h;
569
570 for_each_hstate(h) {
571 /*
572 * Add cgroup control files only if the huge page consists
573 * of more than two normal pages. This is because we use
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800574 * page[2].private for storing cgroup details.
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800575 */
576 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
577 __hugetlb_cgroup_file_init(hstate_index(h));
578 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700579}
580
Aneesh Kumar K.V75754682012-07-31 16:42:36 -0700581/*
582 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
583 * when we migrate hugepages
584 */
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700585void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
586{
587 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700588 struct hstate *h = page_hstate(oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700589
590 if (hugetlb_cgroup_disabled())
591 return;
592
Sasha Levin309381fea2014-01-23 15:52:54 -0800593 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700594 spin_lock(&hugetlb_lock);
595 h_cg = hugetlb_cgroup_from_page(oldhpage);
596 set_hugetlb_cgroup(oldhpage, NULL);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700597
598 /* move the h_cg details to new cgroup */
599 set_hugetlb_cgroup(newhpage, h_cg);
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700600 list_move(&newhpage->lru, &h->hugepage_activelist);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700601 spin_unlock(&hugetlb_lock);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700602 return;
603}
604
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100605static struct cftype hugetlb_files[] = {
606 {} /* terminate */
607};
608
Tejun Heo073219e2014-02-08 10:36:58 -0500609struct cgroup_subsys hugetlb_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -0800610 .css_alloc = hugetlb_cgroup_css_alloc,
611 .css_offline = hugetlb_cgroup_css_offline,
612 .css_free = hugetlb_cgroup_css_free,
Giuseppe Scrivanofaced7e2019-12-16 20:38:31 +0100613 .dfl_cftypes = hugetlb_files,
614 .legacy_cftypes = hugetlb_files,
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700615};