blob: 838fa40abad15a8feb7add02f9b8aeeb7ad0086d [file] [log] [blame]
Thomas Graff4009232008-11-07 22:56:00 -08001/*
2 * net/sched/cls_cgroup.c Control Group Classifier
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Thomas Graff4009232008-11-07 22:56:00 -080014#include <linux/skbuff.h>
Herbert Xuf8451722010-05-24 00:12:34 -070015#include <linux/rcupdate.h>
Thomas Graff4009232008-11-07 22:56:00 -080016#include <net/rtnetlink.h>
17#include <net/pkt_cls.h>
Herbert Xuf8451722010-05-24 00:12:34 -070018#include <net/sock.h>
19#include <net/cls_cgroup.h>
Thomas Graff4009232008-11-07 22:56:00 -080020
Eric Dumazetcc7ec452011-01-19 19:26:56 +000021struct cls_cgroup_head {
Thomas Graff4009232008-11-07 22:56:00 -080022 u32 handle;
23 struct tcf_exts exts;
24 struct tcf_ematch_tree ematches;
25};
26
Eric Dumazetdc7f9f62011-07-05 23:25:42 +000027static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
Thomas Graff4009232008-11-07 22:56:00 -080028 struct tcf_result *res)
29{
30 struct cls_cgroup_head *head = tp->root;
Paul Menagee65fcfd2009-05-26 20:47:02 -070031 u32 classid;
Thomas Graff4009232008-11-07 22:56:00 -080032
Herbert Xuf8451722010-05-24 00:12:34 -070033 rcu_read_lock();
34 classid = task_cls_state(current)->classid;
35 rcu_read_unlock();
36
Thomas Graff4009232008-11-07 22:56:00 -080037 /*
38 * Due to the nature of the classifier it is required to ignore all
39 * packets originating from softirq context as accessing `current'
40 * would lead to false results.
41 *
42 * This test assumes that all callers of dev_queue_xmit() explicitely
43 * disable bh. Knowing this, it is possible to detect softirq based
44 * calls by looking at the number of nested bh disable calls because
45 * softirqs always disables bh.
46 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070047 if (in_serving_softirq()) {
Herbert Xuf8451722010-05-24 00:12:34 -070048 /* If there is an sk_classid we'll use that. */
49 if (!skb->sk)
50 return -1;
51 classid = skb->sk->sk_classid;
52 }
Thomas Graff4009232008-11-07 22:56:00 -080053
Paul Menagee65fcfd2009-05-26 20:47:02 -070054 if (!classid)
55 return -1;
56
57 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
58 return -1;
59
60 res->classid = classid;
61 res->class = 0;
62 return tcf_exts_exec(skb, &head->exts, res);
Thomas Graff4009232008-11-07 22:56:00 -080063}
64
65static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
66{
67 return 0UL;
68}
69
70static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
71{
72}
73
74static int cls_cgroup_init(struct tcf_proto *tp)
75{
76 return 0;
77}
78
79static const struct tcf_ext_map cgroup_ext_map = {
80 .action = TCA_CGROUP_ACT,
81 .police = TCA_CGROUP_POLICE,
82};
83
84static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
85 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
86};
87
Benjamin LaHaisec1b52732013-01-14 05:15:39 +000088static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
Eric W. Biedermanaf4c6642012-05-25 13:42:45 -060089 struct tcf_proto *tp, unsigned long base,
Thomas Graff4009232008-11-07 22:56:00 -080090 u32 handle, struct nlattr **tca,
91 unsigned long *arg)
92{
Eric Dumazetcc7ec452011-01-19 19:26:56 +000093 struct nlattr *tb[TCA_CGROUP_MAX + 1];
Thomas Graff4009232008-11-07 22:56:00 -080094 struct cls_cgroup_head *head = tp->root;
95 struct tcf_ematch_tree t;
96 struct tcf_exts e;
97 int err;
98
Minoru Usui52ea3a52009-06-09 04:03:09 -070099 if (!tca[TCA_OPTIONS])
100 return -EINVAL;
101
Thomas Graff4009232008-11-07 22:56:00 -0800102 if (head == NULL) {
103 if (!handle)
104 return -EINVAL;
105
106 head = kzalloc(sizeof(*head), GFP_KERNEL);
107 if (head == NULL)
108 return -ENOBUFS;
109
110 head->handle = handle;
111
112 tcf_tree_lock(tp);
113 tp->root = head;
114 tcf_tree_unlock(tp);
115 }
116
117 if (handle != head->handle)
118 return -ENOENT;
119
120 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
121 cgroup_policy);
122 if (err < 0)
123 return err;
124
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000125 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
126 &cgroup_ext_map);
Thomas Graff4009232008-11-07 22:56:00 -0800127 if (err < 0)
128 return err;
129
130 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
131 if (err < 0)
132 return err;
133
134 tcf_exts_change(tp, &head->exts, &e);
135 tcf_em_tree_change(tp, &head->ematches, &t);
136
137 return 0;
138}
139
140static void cls_cgroup_destroy(struct tcf_proto *tp)
141{
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000142 struct cls_cgroup_head *head = tp->root;
Thomas Graff4009232008-11-07 22:56:00 -0800143
144 if (head) {
145 tcf_exts_destroy(tp, &head->exts);
146 tcf_em_tree_destroy(tp, &head->ematches);
147 kfree(head);
148 }
149}
150
151static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
152{
153 return -EOPNOTSUPP;
154}
155
156static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
157{
158 struct cls_cgroup_head *head = tp->root;
159
160 if (arg->count < arg->skip)
161 goto skip;
162
163 if (arg->fn(tp, (unsigned long) head, arg) < 0) {
164 arg->stop = 1;
165 return;
166 }
167skip:
168 arg->count++;
169}
170
171static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
172 struct sk_buff *skb, struct tcmsg *t)
173{
174 struct cls_cgroup_head *head = tp->root;
175 unsigned char *b = skb_tail_pointer(skb);
176 struct nlattr *nest;
177
178 t->tcm_handle = head->handle;
179
180 nest = nla_nest_start(skb, TCA_OPTIONS);
181 if (nest == NULL)
182 goto nla_put_failure;
183
184 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
185 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
186 goto nla_put_failure;
187
188 nla_nest_end(skb, nest);
189
190 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
191 goto nla_put_failure;
192
193 return skb->len;
194
195nla_put_failure:
196 nlmsg_trim(skb, b);
197 return -1;
198}
199
200static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
201 .kind = "cgroup",
202 .init = cls_cgroup_init,
203 .change = cls_cgroup_change,
204 .classify = cls_cgroup_classify,
205 .destroy = cls_cgroup_destroy,
206 .get = cls_cgroup_get,
207 .put = cls_cgroup_put,
208 .delete = cls_cgroup_delete,
209 .walk = cls_cgroup_walk,
210 .dump = cls_cgroup_dump,
211 .owner = THIS_MODULE,
212};
213
214static int __init init_cgroup_cls(void)
215{
Daniel Borkmannfe1217c2013-12-29 18:27:10 +0100216 return register_tcf_proto_ops(&cls_cgroup_ops);
Thomas Graff4009232008-11-07 22:56:00 -0800217}
218
219static void __exit exit_cgroup_cls(void)
220{
221 unregister_tcf_proto_ops(&cls_cgroup_ops);
Thomas Graff4009232008-11-07 22:56:00 -0800222}
223
224module_init(init_cgroup_cls);
225module_exit(exit_cgroup_cls);
226MODULE_LICENSE("GPL");