blob: bcdabf37c40b58165d29a2e7dc33b41ef9a6d547 [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070013#include <linux/slab.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080014#include <linux/res_counter.h>
15#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070016#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080017
Balbir Singh28dbc4b2009-01-07 18:08:05 -080018void res_counter_init(struct res_counter *counter, struct res_counter *parent)
Pavel Emelianove552b662008-02-07 00:13:49 -080019{
20 spin_lock_init(&counter->lock);
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -070021 counter->limit = RESOURCE_MAX;
Balbir Singh296c81d2009-09-23 15:56:36 -070022 counter->soft_limit = RESOURCE_MAX;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080023 counter->parent = parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080024}
25
26int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
27{
28 if (counter->usage + val > counter->limit) {
29 counter->failcnt++;
30 return -ENOMEM;
31 }
32
33 counter->usage += val;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070034 if (counter->usage > counter->max_usage)
35 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080036 return 0;
37}
38
Balbir Singh28dbc4b2009-01-07 18:08:05 -080039int res_counter_charge(struct res_counter *counter, unsigned long val,
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -070040 struct res_counter **limit_fail_at)
Pavel Emelianove552b662008-02-07 00:13:49 -080041{
42 int ret;
43 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080044 struct res_counter *c, *u;
Pavel Emelianove552b662008-02-07 00:13:49 -080045
Balbir Singh28dbc4b2009-01-07 18:08:05 -080046 *limit_fail_at = NULL;
47 local_irq_save(flags);
48 for (c = counter; c != NULL; c = c->parent) {
49 spin_lock(&c->lock);
50 ret = res_counter_charge_locked(c, val);
51 spin_unlock(&c->lock);
52 if (ret < 0) {
53 *limit_fail_at = c;
54 goto undo;
55 }
56 }
57 ret = 0;
58 goto done;
59undo:
60 for (u = counter; u != c; u = u->parent) {
61 spin_lock(&u->lock);
62 res_counter_uncharge_locked(u, val);
63 spin_unlock(&u->lock);
64 }
65done:
66 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -080067 return ret;
68}
69
70void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
71{
72 if (WARN_ON(counter->usage < val))
73 val = counter->usage;
74
75 counter->usage -= val;
76}
77
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -070078void res_counter_uncharge(struct res_counter *counter, unsigned long val)
Pavel Emelianove552b662008-02-07 00:13:49 -080079{
80 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080081 struct res_counter *c;
Pavel Emelianove552b662008-02-07 00:13:49 -080082
Balbir Singh28dbc4b2009-01-07 18:08:05 -080083 local_irq_save(flags);
84 for (c = counter; c != NULL; c = c->parent) {
85 spin_lock(&c->lock);
86 res_counter_uncharge_locked(c, val);
87 spin_unlock(&c->lock);
88 }
89 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -080090}
91
92
Balbir Singh0eea1032008-02-07 00:13:57 -080093static inline unsigned long long *
94res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -080095{
96 switch (member) {
97 case RES_USAGE:
98 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070099 case RES_MAX_USAGE:
100 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -0800101 case RES_LIMIT:
102 return &counter->limit;
103 case RES_FAILCNT:
104 return &counter->failcnt;
Balbir Singh296c81d2009-09-23 15:56:36 -0700105 case RES_SOFT_LIMIT:
106 return &counter->soft_limit;
Pavel Emelianove552b662008-02-07 00:13:49 -0800107 };
108
109 BUG();
110 return NULL;
111}
112
113ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -0800114 const char __user *userbuf, size_t nbytes, loff_t *pos,
115 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -0800116{
Balbir Singh0eea1032008-02-07 00:13:57 -0800117 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800118 char buf[64], *s;
119
120 s = buf;
121 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -0800122 if (read_strategy)
123 s += read_strategy(*val, s);
124 else
125 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800126 return simple_read_from_buffer((void __user *)userbuf, nbytes,
127 pos, buf, s - buf);
128}
129
Paul Menage2c7eabf2008-04-29 00:59:58 -0700130u64 res_counter_read_u64(struct res_counter *counter, int member)
131{
132 return *res_counter_member(counter, member);
133}
134
Paul Menage856c13a2008-07-25 01:47:04 -0700135int res_counter_memparse_write_strategy(const char *buf,
136 unsigned long long *res)
Pavel Emelianove552b662008-02-07 00:13:49 -0800137{
Paul Menage856c13a2008-07-25 01:47:04 -0700138 char *end;
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700139
140 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
141 if (*buf == '-') {
142 *res = simple_strtoull(buf + 1, &end, 10);
143 if (*res != 1 || *end != '\0')
144 return -EINVAL;
145 *res = RESOURCE_MAX;
146 return 0;
147 }
148
Paul Menage856c13a2008-07-25 01:47:04 -0700149 /* FIXME - make memparse() take const char* args */
150 *res = memparse((char *)buf, &end);
151 if (*end != '\0')
152 return -EINVAL;
153
154 *res = PAGE_ALIGN(*res);
155 return 0;
156}
157
158int res_counter_write(struct res_counter *counter, int member,
159 const char *buf, write_strategy_fn write_strategy)
160{
161 char *end;
Balbir Singh0eea1032008-02-07 00:13:57 -0800162 unsigned long flags;
163 unsigned long long tmp, *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800164
Balbir Singh0eea1032008-02-07 00:13:57 -0800165 if (write_strategy) {
Paul Menage856c13a2008-07-25 01:47:04 -0700166 if (write_strategy(buf, &tmp))
167 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800168 } else {
169 tmp = simple_strtoull(buf, &end, 10);
170 if (*end != '\0')
Paul Menage856c13a2008-07-25 01:47:04 -0700171 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800172 }
173 spin_lock_irqsave(&counter->lock, flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800174 val = res_counter_member(counter, member);
175 *val = tmp;
Balbir Singh0eea1032008-02-07 00:13:57 -0800176 spin_unlock_irqrestore(&counter->lock, flags);
Paul Menage856c13a2008-07-25 01:47:04 -0700177 return 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800178}