blob: bad1cbe59a562799b8e5c1b1085616fe67d4edd9 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* net/sched/sch_dsmark.c - Differentiated Services field marker */
3
4/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
8#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/types.h>
11#include <linux/string.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/rtnetlink.h>
David S. Miller5b0ac722008-01-21 02:21:45 -080015#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010017#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <net/dsfield.h>
19#include <net/inet_ecn.h>
20#include <asm/byteorder.h>
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
23 * classid class marking
24 * ------- ----- -------
25 * n/a 0 n/a
26 * x:0 1 use entry [0]
27 * ... ... ...
28 * x:y y>0 y+1 use entry [y]
29 * ... ... ...
30 * x:indices-1 indices use entry [indices-1]
31 * ... ... ...
32 * x:y y+1 use entry [y & (indices-1)]
33 * ... ... ...
34 * 0xffff 0x10000 use entry [indices-1]
35 */
36
37
38#define NO_DEFAULT_INDEX (1 << 16)
39
Eric Dumazet47bbbb32015-09-17 16:37:13 -070040struct mask_value {
41 u8 mask;
42 u8 value;
43};
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045struct dsmark_qdisc_data {
46 struct Qdisc *q;
John Fastabend25d8c0d2014-09-12 20:05:27 -070047 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +020048 struct tcf_block *block;
Eric Dumazet47bbbb32015-09-17 16:37:13 -070049 struct mask_value *mv;
Thomas Grafaf0d1142005-06-18 22:53:29 -070050 u16 indices;
Eric Dumazet47bbbb32015-09-17 16:37:13 -070051 u8 set_tc_index;
Thomas Grafaf0d1142005-06-18 22:53:29 -070052 u32 default_index; /* index range is 0...0xffff */
Eric Dumazet47bbbb32015-09-17 16:37:13 -070053#define DSMARK_EMBEDDED_SZ 16
54 struct mask_value embedded[DSMARK_EMBEDDED_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -070055};
56
Thomas Graf758cc43c2005-06-18 22:52:54 -070057static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
58{
Yang Yingliang17569fa2013-12-10 20:55:29 +080059 return index <= p->indices && index > 0;
Thomas Graf758cc43c2005-06-18 22:52:54 -070060}
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62/* ------------------------- Class/flow operations ------------------------- */
63
Thomas Grafaf0d1142005-06-18 22:53:29 -070064static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
Alexander Aring653d6fd2017-12-20 12:35:17 -050065 struct Qdisc *new, struct Qdisc **old,
66 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Stephen Hemminger81da99e2008-01-21 00:50:09 -080068 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Yang Yingliangc76f2a22013-12-23 17:39:00 +080070 pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
71 __func__, sch, p, new, old);
Thomas Graf486b53e2005-05-31 15:16:52 -070072
73 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +000074 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -050075 sch->handle, NULL);
Thomas Graf486b53e2005-05-31 15:16:52 -070076 if (new == NULL)
77 new = &noop_qdisc;
78 }
79
WANG Cong86a79962016-02-25 14:55:00 -080080 *old = qdisc_replace(sch, new, &p->q);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090081 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
85{
Stephen Hemminger81da99e2008-01-21 00:50:09 -080086 struct dsmark_qdisc_data *p = qdisc_priv(sch);
87 return p->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
WANG Cong143976c2017-08-24 16:51:29 -070090static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Thomas Grafaf0d1142005-06-18 22:53:29 -070092 return TC_H_MIN(classid) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095static unsigned long dsmark_bind_filter(struct Qdisc *sch,
Thomas Grafaf0d1142005-06-18 22:53:29 -070096 unsigned long parent, u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
WANG Cong143976c2017-08-24 16:51:29 -070098 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
99 __func__, sch, qdisc_priv(sch), classid);
100
101 return dsmark_find(sch, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
WANG Cong143976c2017-08-24 16:51:29 -0700104static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
106}
107
Patrick McHardy27a34212008-01-23 20:35:39 -0800108static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
109 [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
110 [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
111 [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
112 [TCA_DSMARK_MASK] = { .type = NLA_U8 },
113 [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
114};
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
Alexander Aring793d81d2017-12-20 12:35:15 -0500117 struct nlattr **tca, unsigned long *arg,
118 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800120 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800121 struct nlattr *opt = tca[TCA_OPTIONS];
122 struct nlattr *tb[TCA_DSMARK_MAX + 1];
Thomas Graf758cc43c2005-06-18 22:52:54 -0700123 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800125 pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
126 __func__, sch, p, classid, parent, *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Thomas Graf758cc43c2005-06-18 22:52:54 -0700128 if (!dsmark_valid_index(p, *arg)) {
129 err = -ENOENT;
Patrick McHardy1e904742008-01-22 22:11:17 -0800130 goto errout;
Thomas Graf758cc43c2005-06-18 22:52:54 -0700131 }
132
Patrick McHardycee63722008-01-23 20:33:32 -0800133 if (!opt)
Patrick McHardy1e904742008-01-22 22:11:17 -0800134 goto errout;
Thomas Graf758cc43c2005-06-18 22:52:54 -0700135
Johannes Berg8cb08172019-04-26 14:07:28 +0200136 err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
137 dsmark_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800138 if (err < 0)
Patrick McHardy27a34212008-01-23 20:35:39 -0800139 goto errout;
Patrick McHardycee63722008-01-23 20:33:32 -0800140
Patrick McHardy27a34212008-01-23 20:35:39 -0800141 if (tb[TCA_DSMARK_VALUE])
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700142 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700143
Patrick McHardy1e904742008-01-22 22:11:17 -0800144 if (tb[TCA_DSMARK_MASK])
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700145 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700146
147 err = 0;
148
Patrick McHardy1e904742008-01-22 22:11:17 -0800149errout:
Thomas Graf758cc43c2005-06-18 22:52:54 -0700150 return err;
151}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Thomas Grafaf0d1142005-06-18 22:53:29 -0700153static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800155 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Thomas Grafaf0d1142005-06-18 22:53:29 -0700157 if (!dsmark_valid_index(p, arg))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 return -EINVAL;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900159
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700160 p->mv[arg - 1].mask = 0xff;
161 p->mv[arg - 1].value = 0;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return 0;
164}
165
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800166static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800168 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 int i;
170
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800171 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
172 __func__, sch, p, walker);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 if (walker->stop)
175 return;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 for (i = 0; i < p->indices; i++) {
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700178 if (p->mv[i].mask == 0xff && !p->mv[i].value)
Thomas Graf0451eb02005-05-31 15:15:58 -0700179 goto ignore;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (walker->count >= walker->skip) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000181 if (walker->fn(sch, i + 1, walker) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 walker->stop = 1;
183 break;
184 }
185 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900186ignore:
Thomas Graf0451eb02005-05-31 15:15:58 -0700187 walker->count++;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Alexander Aringcbaacc42017-12-20 12:35:16 -0500191static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
192 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800194 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200195
196 return p->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199/* --------------------------- Qdisc operations ---------------------------- */
200
Eric Dumazet520ac302016-06-21 23:16:49 -0700201static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
202 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100204 unsigned int len = qdisc_pkt_len(skb);
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800205 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700206 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800208 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 if (p->set_tc_index) {
Eric Dumazetaea92fb2017-03-17 08:05:28 -0700211 int wlen = skb_network_offset(skb);
212
Jiri Pirkod8b96052015-01-13 17:13:43 +0100213 switch (tc_skb_protocol(skb)) {
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700214 case htons(ETH_P_IP):
Eric Dumazetaea92fb2017-03-17 08:05:28 -0700215 wlen += sizeof(struct iphdr);
216 if (!pskb_may_pull(skb, wlen) ||
217 skb_try_make_writable(skb, wlen))
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800218 goto drop;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800219
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800220 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
221 & ~INET_ECN_MASK;
222 break;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800223
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700224 case htons(ETH_P_IPV6):
Eric Dumazetaea92fb2017-03-17 08:05:28 -0700225 wlen += sizeof(struct ipv6hdr);
226 if (!pskb_may_pull(skb, wlen) ||
227 skb_try_make_writable(skb, wlen))
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800228 goto drop;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800229
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800230 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
231 & ~INET_ECN_MASK;
232 break;
233 default:
234 skb->tc_index = 0;
235 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 }
Thomas Grafaf0d1142005-06-18 22:53:29 -0700238
239 if (TC_H_MAJ(skb->priority) == sch->handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 skb->tc_index = TC_H_MIN(skb->priority);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700241 else {
242 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700243 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
Jiri Pirko87d83092017-05-17 11:07:54 +0200244 int result = tcf_classify(skb, fl, &res, false);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700245
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800246 pr_debug("result %d class 0x%04x\n", result, res.classid);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 switch (result) {
Patrick McHardyf6853e22007-07-15 00:02:10 -0700249#ifdef CONFIG_NET_CLS_ACT
250 case TC_ACT_QUEUED:
251 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200252 case TC_ACT_TRAP:
Eric Dumazet520ac302016-06-21 23:16:49 -0700253 __qdisc_drop(skb, to_free);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700254 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800255
Patrick McHardyf6853e22007-07-15 00:02:10 -0700256 case TC_ACT_SHOT:
Stephen Hemminger4c307192008-01-21 02:23:49 -0800257 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258#endif
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700259 case TC_ACT_OK:
Patrick McHardyf6853e22007-07-15 00:02:10 -0700260 skb->tc_index = TC_H_MIN(res.classid);
261 break;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800262
Patrick McHardyf6853e22007-07-15 00:02:10 -0700263 default:
264 if (p->default_index != NO_DEFAULT_INDEX)
265 skb->tc_index = p->default_index;
266 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Eric Dumazet520ac302016-06-21 23:16:49 -0700270 err = qdisc_enqueue(skb, p->q, to_free);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700271 if (err != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700272 if (net_xmit_drop_count(err))
John Fastabend25331d62014-09-28 11:53:29 -0700273 qdisc_qstats_drop(sch);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700274 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 }
Thomas Grafaf0d1142005-06-18 22:53:29 -0700276
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100277 sch->qstats.backlog += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Thomas Grafaf0d1142005-06-18 22:53:29 -0700280 return NET_XMIT_SUCCESS;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800281
282drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700283 qdisc_drop(skb, sch, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700284 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700285}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
288{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800289 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 struct sk_buff *skb;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700291 u32 index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800293 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700294
Kyeong Yoof8b33d82016-03-07 17:07:57 +1300295 skb = qdisc_dequeue_peeked(p->q);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700296 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 return NULL;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700298
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800299 qdisc_bstats_update(sch, skb);
WANG Congbdf17662016-02-25 14:55:03 -0800300 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 sch->q.qlen--;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700302
303 index = skb->tc_index & (p->indices - 1);
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800304 pr_debug("index %d->%d\n", skb->tc_index, index);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700305
Jiri Pirkod8b96052015-01-13 17:13:43 +0100306 switch (tc_skb_protocol(skb)) {
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700307 case htons(ETH_P_IP):
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700308 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
309 p->mv[index].value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 break;
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700311 case htons(ETH_P_IPV6):
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700312 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
313 p->mv[index].value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 break;
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800315 default:
316 /*
317 * Only complain if a change was actually attempted.
318 * This way, we can send non-IP traffic through dsmark
319 * and don't need yet another qdisc as a bypass.
320 */
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700321 if (p->mv[index].mask != 0xff || p->mv[index].value)
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800322 pr_warn("%s: unsupported protocol %d\n",
Jiri Pirkod8b96052015-01-13 17:13:43 +0100323 __func__, ntohs(tc_skb_protocol(skb)));
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800324 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700325 }
Thomas Grafaf0d1142005-06-18 22:53:29 -0700326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return skb;
328}
329
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700330static struct sk_buff *dsmark_peek(struct Qdisc *sch)
331{
332 struct dsmark_qdisc_data *p = qdisc_priv(sch);
333
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800334 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700335
336 return p->q->ops->peek(p->q);
337}
338
Alexander Aringe63d7df2017-12-20 12:35:13 -0500339static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
340 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800342 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800343 struct nlattr *tb[TCA_DSMARK_MAX + 1];
David S. Miller9d4f97f2017-05-17 16:03:16 -0400344 int err = -EINVAL;
Thomas Graf758cc43c2005-06-18 22:52:54 -0700345 u32 default_index = NO_DEFAULT_INDEX;
346 u16 indices;
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700347 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800349 pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700350
Patrick McHardycee63722008-01-23 20:33:32 -0800351 if (!opt)
Thomas Graf758cc43c2005-06-18 22:52:54 -0700352 goto errout;
353
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500354 err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200355 if (err)
356 return err;
357
Johannes Berg8cb08172019-04-26 14:07:28 +0200358 err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
359 dsmark_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800360 if (err < 0)
361 goto errout;
362
363 err = -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -0800364 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
David S. Miller5b0ac722008-01-21 02:21:45 -0800365
366 if (hweight32(indices) != 1)
Thomas Graf758cc43c2005-06-18 22:52:54 -0700367 goto errout;
368
Patrick McHardy27a34212008-01-23 20:35:39 -0800369 if (tb[TCA_DSMARK_DEFAULT_INDEX])
Patrick McHardy1e904742008-01-22 22:11:17 -0800370 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700371
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700372 if (indices <= DSMARK_EMBEDDED_SZ)
373 p->mv = p->embedded;
374 else
375 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
376 if (!p->mv) {
Thomas Graf758cc43c2005-06-18 22:52:54 -0700377 err = -ENOMEM;
378 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700380 for (i = 0; i < indices; i++) {
381 p->mv[i].mask = 0xff;
382 p->mv[i].value = 0;
383 }
Thomas Graf758cc43c2005-06-18 22:52:54 -0700384 p->indices = indices;
385 p->default_index = default_index;
Patrick McHardy1e904742008-01-22 22:11:17 -0800386 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700387
Alexander Aringa38a98822017-12-20 12:35:21 -0500388 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
389 NULL);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700390 if (p->q == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 p->q = &noop_qdisc;
Jiri Kosina49b49972017-03-08 16:03:32 +0100392 else
393 qdisc_hash_add(p->q, true);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700394
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800395 pr_debug("%s: qdisc %p\n", __func__, p->q);
Thomas Graf758cc43c2005-06-18 22:52:54 -0700396
397 err = 0;
398errout:
Thomas Graf758cc43c2005-06-18 22:52:54 -0700399 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402static void dsmark_reset(struct Qdisc *sch)
403{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800404 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800406 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 qdisc_reset(p->q);
WANG Congbdf17662016-02-25 14:55:03 -0800408 sch->qstats.backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 sch->q.qlen = 0;
410}
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412static void dsmark_destroy(struct Qdisc *sch)
413{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800414 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800416 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700417
Jiri Pirko6529eab2017-05-17 11:07:55 +0200418 tcf_block_put(p->block);
Vlad Buslov86bd4462018-09-24 19:22:50 +0300419 qdisc_put(p->q);
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700420 if (p->mv != p->embedded)
421 kfree(p->mv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
Thomas Graf02f23f02005-06-18 22:53:12 -0700425 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800427 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800428 struct nlattr *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800430 pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
Thomas Graf02f23f02005-06-18 22:53:12 -0700431
432 if (!dsmark_valid_index(p, cl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 return -EINVAL;
Thomas Graf02f23f02005-06-18 22:53:12 -0700434
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000435 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
Patrick McHardycdc7f8e2006-03-20 19:01:06 -0800436 tcm->tcm_info = p->q->handle;
Thomas Graf02f23f02005-06-18 22:53:12 -0700437
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200438 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800439 if (opts == NULL)
440 goto nla_put_failure;
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700441 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
442 nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
David S. Miller1b34ec42012-03-29 05:11:39 -0400443 goto nla_put_failure;
Thomas Graf02f23f02005-06-18 22:53:12 -0700444
Patrick McHardy1e904742008-01-22 22:11:17 -0800445 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Patrick McHardy1e904742008-01-22 22:11:17 -0800447nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700448 nla_nest_cancel(skb, opts);
449 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
452static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
453{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800454 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800455 struct nlattr *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200457 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800458 if (opts == NULL)
459 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400460 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
461 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
David S. Miller1b34ec42012-03-29 05:11:39 -0400463 if (p->default_index != NO_DEFAULT_INDEX &&
464 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
465 goto nla_put_failure;
Thomas Graf02f23f02005-06-18 22:53:12 -0700466
David S. Miller1b34ec42012-03-29 05:11:39 -0400467 if (p->set_tc_index &&
468 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
469 goto nla_put_failure;
Thomas Graf02f23f02005-06-18 22:53:12 -0700470
Patrick McHardy1e904742008-01-22 22:11:17 -0800471 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Patrick McHardy1e904742008-01-22 22:11:17 -0800473nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700474 nla_nest_cancel(skb, opts);
475 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
Eric Dumazet20fea082007-11-14 01:44:41 -0800478static const struct Qdisc_class_ops dsmark_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 .graft = dsmark_graft,
480 .leaf = dsmark_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700481 .find = dsmark_find,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 .change = dsmark_change,
483 .delete = dsmark_delete,
484 .walk = dsmark_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +0200485 .tcf_block = dsmark_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 .bind_tcf = dsmark_bind_filter,
WANG Cong143976c2017-08-24 16:51:29 -0700487 .unbind_tcf = dsmark_unbind_filter,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 .dump = dsmark_dump_class,
489};
490
Eric Dumazet20fea082007-11-14 01:44:41 -0800491static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 .next = NULL,
493 .cl_ops = &dsmark_class_ops,
494 .id = "dsmark",
495 .priv_size = sizeof(struct dsmark_qdisc_data),
496 .enqueue = dsmark_enqueue,
497 .dequeue = dsmark_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700498 .peek = dsmark_peek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 .init = dsmark_init,
500 .reset = dsmark_reset,
501 .destroy = dsmark_destroy,
502 .change = NULL,
503 .dump = dsmark_dump,
504 .owner = THIS_MODULE,
505};
506
507static int __init dsmark_module_init(void)
508{
509 return register_qdisc(&dsmark_qdisc_ops);
510}
Thomas Grafaf0d1142005-06-18 22:53:29 -0700511
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900512static void __exit dsmark_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 unregister_qdisc(&dsmark_qdisc_ops);
515}
Thomas Grafaf0d1142005-06-18 22:53:29 -0700516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517module_init(dsmark_module_init)
518module_exit(dsmark_module_exit)
Thomas Grafaf0d1142005-06-18 22:53:29 -0700519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520MODULE_LICENSE("GPL");