blob: bf8e7534c803d4e7708ed6929ddd54f00d12064b [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070013#include <linux/slab.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080014#include <linux/res_counter.h>
15#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070016#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080017
Balbir Singh28dbc4b2009-01-07 18:08:05 -080018void res_counter_init(struct res_counter *counter, struct res_counter *parent)
Pavel Emelianove552b662008-02-07 00:13:49 -080019{
20 spin_lock_init(&counter->lock);
Balbir Singh0eea1032008-02-07 00:13:57 -080021 counter->limit = (unsigned long long)LLONG_MAX;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080022 counter->parent = parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080023}
24
25int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
26{
27 if (counter->usage + val > counter->limit) {
28 counter->failcnt++;
29 return -ENOMEM;
30 }
31
32 counter->usage += val;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070033 if (counter->usage > counter->max_usage)
34 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080035 return 0;
36}
37
Balbir Singh28dbc4b2009-01-07 18:08:05 -080038int res_counter_charge(struct res_counter *counter, unsigned long val,
39 struct res_counter **limit_fail_at)
Pavel Emelianove552b662008-02-07 00:13:49 -080040{
41 int ret;
42 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080043 struct res_counter *c, *u;
Pavel Emelianove552b662008-02-07 00:13:49 -080044
Balbir Singh28dbc4b2009-01-07 18:08:05 -080045 *limit_fail_at = NULL;
46 local_irq_save(flags);
47 for (c = counter; c != NULL; c = c->parent) {
48 spin_lock(&c->lock);
49 ret = res_counter_charge_locked(c, val);
50 spin_unlock(&c->lock);
51 if (ret < 0) {
52 *limit_fail_at = c;
53 goto undo;
54 }
55 }
56 ret = 0;
57 goto done;
58undo:
59 for (u = counter; u != c; u = u->parent) {
60 spin_lock(&u->lock);
61 res_counter_uncharge_locked(u, val);
62 spin_unlock(&u->lock);
63 }
64done:
65 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -080066 return ret;
67}
68
69void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
70{
71 if (WARN_ON(counter->usage < val))
72 val = counter->usage;
73
74 counter->usage -= val;
75}
76
77void res_counter_uncharge(struct res_counter *counter, unsigned long val)
78{
79 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080080 struct res_counter *c;
Pavel Emelianove552b662008-02-07 00:13:49 -080081
Balbir Singh28dbc4b2009-01-07 18:08:05 -080082 local_irq_save(flags);
83 for (c = counter; c != NULL; c = c->parent) {
84 spin_lock(&c->lock);
85 res_counter_uncharge_locked(c, val);
86 spin_unlock(&c->lock);
87 }
88 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -080089}
90
91
Balbir Singh0eea1032008-02-07 00:13:57 -080092static inline unsigned long long *
93res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -080094{
95 switch (member) {
96 case RES_USAGE:
97 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070098 case RES_MAX_USAGE:
99 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -0800100 case RES_LIMIT:
101 return &counter->limit;
102 case RES_FAILCNT:
103 return &counter->failcnt;
104 };
105
106 BUG();
107 return NULL;
108}
109
110ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -0800111 const char __user *userbuf, size_t nbytes, loff_t *pos,
112 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -0800113{
Balbir Singh0eea1032008-02-07 00:13:57 -0800114 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800115 char buf[64], *s;
116
117 s = buf;
118 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -0800119 if (read_strategy)
120 s += read_strategy(*val, s);
121 else
122 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800123 return simple_read_from_buffer((void __user *)userbuf, nbytes,
124 pos, buf, s - buf);
125}
126
Paul Menage2c7eabf2008-04-29 00:59:58 -0700127u64 res_counter_read_u64(struct res_counter *counter, int member)
128{
129 return *res_counter_member(counter, member);
130}
131
Paul Menage856c13a2008-07-25 01:47:04 -0700132int res_counter_memparse_write_strategy(const char *buf,
133 unsigned long long *res)
Pavel Emelianove552b662008-02-07 00:13:49 -0800134{
Paul Menage856c13a2008-07-25 01:47:04 -0700135 char *end;
136 /* FIXME - make memparse() take const char* args */
137 *res = memparse((char *)buf, &end);
138 if (*end != '\0')
139 return -EINVAL;
140
141 *res = PAGE_ALIGN(*res);
142 return 0;
143}
144
145int res_counter_write(struct res_counter *counter, int member,
146 const char *buf, write_strategy_fn write_strategy)
147{
148 char *end;
Balbir Singh0eea1032008-02-07 00:13:57 -0800149 unsigned long flags;
150 unsigned long long tmp, *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800151
Balbir Singh0eea1032008-02-07 00:13:57 -0800152 if (write_strategy) {
Paul Menage856c13a2008-07-25 01:47:04 -0700153 if (write_strategy(buf, &tmp))
154 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800155 } else {
156 tmp = simple_strtoull(buf, &end, 10);
157 if (*end != '\0')
Paul Menage856c13a2008-07-25 01:47:04 -0700158 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800159 }
160 spin_lock_irqsave(&counter->lock, flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800161 val = res_counter_member(counter, member);
162 *val = tmp;
Balbir Singh0eea1032008-02-07 00:13:57 -0800163 spin_unlock_irqrestore(&counter->lock, flags);
Paul Menage856c13a2008-07-25 01:47:04 -0700164 return 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800165}