blob: 4295de3e6a4ba6f82d785db46180f40a7c6284f7 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Herbert Xuf8451722010-05-24 00:12:34 -07002/*
3 * cls_cgroup.h Control Group Classifier
4 *
5 * Authors: Thomas Graf <tgraf@suug.ch>
Herbert Xuf8451722010-05-24 00:12:34 -07006 */
7
8#ifndef _NET_CLS_CGROUP_H
9#define _NET_CLS_CGROUP_H
10
11#include <linux/cgroup.h>
12#include <linux/hardirq.h>
13#include <linux/rcupdate.h>
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010014#include <net/sock.h>
Konstantin Khlebnikov23092362016-04-18 14:37:10 +030015#include <net/inet_sock.h>
Herbert Xuf8451722010-05-24 00:12:34 -070016
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010017#ifdef CONFIG_CGROUP_NET_CLASSID
18struct cgroup_cls_state {
Herbert Xuf8451722010-05-24 00:12:34 -070019 struct cgroup_subsys_state css;
20 u32 classid;
21};
22
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010023struct cgroup_cls_state *task_cls_state(struct task_struct *p);
Daniel Wagnerf3419802012-09-12 16:12:01 +020024
Herbert Xuf8451722010-05-24 00:12:34 -070025static inline u32 task_cls_classid(struct task_struct *p)
26{
Daniel Wagner920750c2012-10-25 04:16:56 +000027 u32 classid;
Li Zefan3fb5a992010-09-02 15:42:43 +000028
Herbert Xuf8451722010-05-24 00:12:34 -070029 if (in_interrupt())
30 return 0;
31
Li Zefan3fb5a992010-09-02 15:42:43 +000032 rcu_read_lock();
Tejun Heo073219e2014-02-08 10:36:58 -050033 classid = container_of(task_css(p, net_cls_cgrp_id),
Li Zefan3fb5a992010-09-02 15:42:43 +000034 struct cgroup_cls_state, css)->classid;
35 rcu_read_unlock();
36
37 return classid;
Herbert Xuf8451722010-05-24 00:12:34 -070038}
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010039
Tejun Heo2a56a1f2015-12-07 17:38:52 -050040static inline void sock_update_classid(struct sock_cgroup_data *skcd)
Herbert Xuf8451722010-05-24 00:12:34 -070041{
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010042 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -070043
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010044 classid = task_cls_classid(current);
Tejun Heo2a56a1f2015-12-07 17:38:52 -050045 sock_cgroup_set_classid(skcd, classid);
Herbert Xuf8451722010-05-24 00:12:34 -070046}
Daniel Borkmannb87a1732015-07-15 14:21:41 +020047
48static inline u32 task_get_classid(const struct sk_buff *skb)
49{
50 u32 classid = task_cls_state(current)->classid;
51
52 /* Due to the nature of the classifier it is required to ignore all
53 * packets originating from softirq context as accessing `current'
54 * would lead to false results.
55 *
56 * This test assumes that all callers of dev_queue_xmit() explicitly
57 * disable bh. Knowing this, it is possible to detect softirq based
58 * calls by looking at the number of nested bh disable calls because
59 * softirqs always disables bh.
60 */
61 if (in_serving_softirq()) {
Konstantin Khlebnikov23092362016-04-18 14:37:10 +030062 struct sock *sk = skb_to_full_sk(skb);
63
Tejun Heo2a56a1f2015-12-07 17:38:52 -050064 /* If there is an sock_cgroup_classid we'll use that. */
Konstantin Khlebnikov23092362016-04-18 14:37:10 +030065 if (!sk || !sk_fullsock(sk))
Daniel Borkmannb87a1732015-07-15 14:21:41 +020066 return 0;
67
Konstantin Khlebnikov23092362016-04-18 14:37:10 +030068 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
Daniel Borkmannb87a1732015-07-15 14:21:41 +020069 }
70
71 return classid;
72}
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010073#else /* !CONFIG_CGROUP_NET_CLASSID */
Tejun Heo2a56a1f2015-12-07 17:38:52 -050074static inline void sock_update_classid(struct sock_cgroup_data *skcd)
Daniel Wagnerf3419802012-09-12 16:12:01 +020075{
76}
Daniel Borkmannb87a1732015-07-15 14:21:41 +020077
78static inline u32 task_get_classid(const struct sk_buff *skb)
79{
80 return 0;
81}
Daniel Borkmannfe1217c2013-12-29 18:27:10 +010082#endif /* CONFIG_CGROUP_NET_CLASSID */
Herbert Xuf8451722010-05-24 00:12:34 -070083#endif /* _NET_CLS_CGROUP_H */