blob: 5f55bf149d9fb29c3d300a5bb3bd0bee82110d1d [file] [log] [blame]
John Fastabendb8970f02011-01-17 08:06:09 +00001/*
2 * net/sched/sch_mqprio.c
3 *
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040017#include <linux/module.h>
John Fastabendb8970f02011-01-17 08:06:09 +000018#include <net/netlink.h>
19#include <net/pkt_sched.h>
20#include <net/sch_generic.h>
21
22struct mqprio_sched {
23 struct Qdisc **qdiscs;
Alexander Duyck2026fec2017-03-15 10:39:18 -070024 int hw_offload;
John Fastabendb8970f02011-01-17 08:06:09 +000025};
26
27static void mqprio_destroy(struct Qdisc *sch)
28{
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
John Fastabend16e5cc62016-02-16 21:16:43 -080031 struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO};
John Fastabendb8970f02011-01-17 08:06:09 +000032 unsigned int ntx;
33
Ben Hutchingsac7100b2011-02-14 19:02:23 +000034 if (priv->qdiscs) {
35 for (ntx = 0;
36 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
37 ntx++)
38 qdisc_destroy(priv->qdiscs[ntx]);
39 kfree(priv->qdiscs);
40 }
John Fastabendb8970f02011-01-17 08:06:09 +000041
Alexander Duyck2026fec2017-03-15 10:39:18 -070042 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
John Fastabend16e5cc62016-02-16 21:16:43 -080043 dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
John Fastabendb8970f02011-01-17 08:06:09 +000044 else
45 netdev_set_num_tc(dev, 0);
John Fastabendb8970f02011-01-17 08:06:09 +000046}
47
48static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
49{
50 int i, j;
51
52 /* Verify num_tc is not out of max range */
53 if (qopt->num_tc > TC_MAX_QUEUE)
54 return -EINVAL;
55
56 /* Verify priority mapping uses valid tcs */
57 for (i = 0; i < TC_BITMASK + 1; i++) {
58 if (qopt->prio_tc_map[i] >= qopt->num_tc)
59 return -EINVAL;
60 }
61
Alexander Duyck2026fec2017-03-15 10:39:18 -070062 /* Limit qopt->hw to maximum supported offload value. Drivers have
63 * the option of overriding this later if they don't support the a
64 * given offload type.
65 */
66 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
67 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
John Fastabendb8970f02011-01-17 08:06:09 +000068
Alexander Duyck2026fec2017-03-15 10:39:18 -070069 /* If hardware offload is requested we will leave it to the device
70 * to either populate the queue counts itself or to validate the
71 * provided queue counts. If ndo_setup_tc is not present then
72 * hardware doesn't support offload and we should return an error.
John Fastabendb8970f02011-01-17 08:06:09 +000073 */
74 if (qopt->hw)
Alexander Duyck2026fec2017-03-15 10:39:18 -070075 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
John Fastabendb8970f02011-01-17 08:06:09 +000076
77 for (i = 0; i < qopt->num_tc; i++) {
78 unsigned int last = qopt->offset[i] + qopt->count[i];
79
80 /* Verify the queue count is in tx range being equal to the
81 * real_num_tx_queues indicates the last queue is in use.
82 */
83 if (qopt->offset[i] >= dev->real_num_tx_queues ||
84 !qopt->count[i] ||
85 last > dev->real_num_tx_queues)
86 return -EINVAL;
87
88 /* Verify that the offset and counts do not overlap */
89 for (j = i + 1; j < qopt->num_tc; j++) {
90 if (last > qopt->offset[j])
91 return -EINVAL;
92 }
93 }
94
95 return 0;
96}
97
98static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
99{
100 struct net_device *dev = qdisc_dev(sch);
101 struct mqprio_sched *priv = qdisc_priv(sch);
102 struct netdev_queue *dev_queue;
103 struct Qdisc *qdisc;
104 int i, err = -EOPNOTSUPP;
105 struct tc_mqprio_qopt *qopt = NULL;
106
107 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
108 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
109
110 if (sch->parent != TC_H_ROOT)
111 return -EOPNOTSUPP;
112
113 if (!netif_is_multiqueue(dev))
114 return -EOPNOTSUPP;
115
Thomas Graf7838f2c2011-12-22 02:05:07 +0000116 if (!opt || nla_len(opt) < sizeof(*qopt))
John Fastabendb8970f02011-01-17 08:06:09 +0000117 return -EINVAL;
118
119 qopt = nla_data(opt);
120 if (mqprio_parse_opt(dev, qopt))
121 return -EINVAL;
122
123 /* pre-allocate qdisc, attachment can't fail */
124 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
125 GFP_KERNEL);
Eric Dumazet87b60cf2017-02-10 10:31:49 -0800126 if (!priv->qdiscs)
127 return -ENOMEM;
John Fastabendb8970f02011-01-17 08:06:09 +0000128
129 for (i = 0; i < dev->num_tx_queues; i++) {
130 dev_queue = netdev_get_tx_queue(dev, i);
Eric Dumazet1f27cde2016-03-02 08:21:43 -0800131 qdisc = qdisc_create_dflt(dev_queue,
132 get_default_qdisc_ops(dev, i),
John Fastabendb8970f02011-01-17 08:06:09 +0000133 TC_H_MAKE(TC_H_MAJ(sch->handle),
134 TC_H_MIN(i + 1)));
Eric Dumazet87b60cf2017-02-10 10:31:49 -0800135 if (!qdisc)
136 return -ENOMEM;
137
John Fastabendb8970f02011-01-17 08:06:09 +0000138 priv->qdiscs[i] = qdisc;
Eric Dumazet4eaf3b82015-12-01 20:08:51 -0800139 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
John Fastabendb8970f02011-01-17 08:06:09 +0000140 }
141
142 /* If the mqprio options indicate that hardware should own
143 * the queue mapping then run ndo_setup_tc otherwise use the
144 * supplied and verified mapping
145 */
146 if (qopt->hw) {
John Fastabend16e5cc62016-02-16 21:16:43 -0800147 struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO,
David S. Miller241deec2016-03-01 17:44:59 -0500148 { .tc = qopt->num_tc }};
John Fastabend16e5cc62016-02-16 21:16:43 -0800149
John Fastabend16e5cc62016-02-16 21:16:43 -0800150 err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
John Fastabendb8970f02011-01-17 08:06:09 +0000151 if (err)
Eric Dumazet87b60cf2017-02-10 10:31:49 -0800152 return err;
Alexander Duyck2026fec2017-03-15 10:39:18 -0700153
154 priv->hw_offload = qopt->hw;
John Fastabendb8970f02011-01-17 08:06:09 +0000155 } else {
156 netdev_set_num_tc(dev, qopt->num_tc);
157 for (i = 0; i < qopt->num_tc; i++)
158 netdev_set_tc_queue(dev, i,
159 qopt->count[i], qopt->offset[i]);
160 }
161
162 /* Always use supplied priority mappings */
163 for (i = 0; i < TC_BITMASK + 1; i++)
164 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
165
166 sch->flags |= TCQ_F_MQROOT;
167 return 0;
John Fastabendb8970f02011-01-17 08:06:09 +0000168}
169
170static void mqprio_attach(struct Qdisc *sch)
171{
172 struct net_device *dev = qdisc_dev(sch);
173 struct mqprio_sched *priv = qdisc_priv(sch);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800174 struct Qdisc *qdisc, *old;
John Fastabendb8970f02011-01-17 08:06:09 +0000175 unsigned int ntx;
176
177 /* Attach underlying qdisc */
178 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
179 qdisc = priv->qdiscs[ntx];
Eric Dumazet95dc1922013-12-05 11:12:02 -0800180 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
181 if (old)
182 qdisc_destroy(old);
183 if (ntx < dev->real_num_tx_queues)
Jiri Kosina49b49972017-03-08 16:03:32 +0100184 qdisc_hash_add(qdisc, false);
John Fastabendb8970f02011-01-17 08:06:09 +0000185 }
186 kfree(priv->qdiscs);
187 priv->qdiscs = NULL;
188}
189
190static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
191 unsigned long cl)
192{
193 struct net_device *dev = qdisc_dev(sch);
194 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
195
196 if (ntx >= dev->num_tx_queues)
197 return NULL;
198 return netdev_get_tx_queue(dev, ntx);
199}
200
201static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
202 struct Qdisc **old)
203{
204 struct net_device *dev = qdisc_dev(sch);
205 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
206
207 if (!dev_queue)
208 return -EINVAL;
209
210 if (dev->flags & IFF_UP)
211 dev_deactivate(dev);
212
213 *old = dev_graft_qdisc(dev_queue, new);
214
Eric Dumazet1abbe132012-12-11 15:54:33 +0000215 if (new)
Eric Dumazet4eaf3b82015-12-01 20:08:51 -0800216 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
Eric Dumazet1abbe132012-12-11 15:54:33 +0000217
John Fastabendb8970f02011-01-17 08:06:09 +0000218 if (dev->flags & IFF_UP)
219 dev_activate(dev);
220
221 return 0;
222}
223
224static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
225{
226 struct net_device *dev = qdisc_dev(sch);
227 struct mqprio_sched *priv = qdisc_priv(sch);
228 unsigned char *b = skb_tail_pointer(skb);
Eric Dumazet144ce872011-01-26 07:21:57 +0000229 struct tc_mqprio_qopt opt = { 0 };
John Fastabendb8970f02011-01-17 08:06:09 +0000230 struct Qdisc *qdisc;
231 unsigned int i;
232
233 sch->q.qlen = 0;
234 memset(&sch->bstats, 0, sizeof(sch->bstats));
235 memset(&sch->qstats, 0, sizeof(sch->qstats));
236
237 for (i = 0; i < dev->num_tx_queues; i++) {
John Fastabend46e5da40a2014-09-12 20:04:52 -0700238 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
John Fastabendb8970f02011-01-17 08:06:09 +0000239 spin_lock_bh(qdisc_lock(qdisc));
240 sch->q.qlen += qdisc->q.qlen;
241 sch->bstats.bytes += qdisc->bstats.bytes;
242 sch->bstats.packets += qdisc->bstats.packets;
John Fastabendb8970f02011-01-17 08:06:09 +0000243 sch->qstats.backlog += qdisc->qstats.backlog;
244 sch->qstats.drops += qdisc->qstats.drops;
245 sch->qstats.requeues += qdisc->qstats.requeues;
246 sch->qstats.overlimits += qdisc->qstats.overlimits;
247 spin_unlock_bh(qdisc_lock(qdisc));
248 }
249
250 opt.num_tc = netdev_get_num_tc(dev);
251 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
Alexander Duyck2026fec2017-03-15 10:39:18 -0700252 opt.hw = priv->hw_offload;
John Fastabendb8970f02011-01-17 08:06:09 +0000253
254 for (i = 0; i < netdev_get_num_tc(dev); i++) {
255 opt.count[i] = dev->tc_to_txq[i].count;
256 opt.offset[i] = dev->tc_to_txq[i].offset;
257 }
258
David S. Miller1b34ec42012-03-29 05:11:39 -0400259 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
260 goto nla_put_failure;
John Fastabendb8970f02011-01-17 08:06:09 +0000261
262 return skb->len;
263nla_put_failure:
264 nlmsg_trim(skb, b);
265 return -1;
266}
267
268static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
269{
270 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
271
272 if (!dev_queue)
273 return NULL;
274
275 return dev_queue->qdisc_sleeping;
276}
277
278static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
279{
280 struct net_device *dev = qdisc_dev(sch);
281 unsigned int ntx = TC_H_MIN(classid);
282
283 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
284 return 0;
285 return ntx;
286}
287
288static void mqprio_put(struct Qdisc *sch, unsigned long cl)
289{
290}
291
292static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
293 struct sk_buff *skb, struct tcmsg *tcm)
294{
295 struct net_device *dev = qdisc_dev(sch);
296
297 if (cl <= netdev_get_num_tc(dev)) {
298 tcm->tcm_parent = TC_H_ROOT;
299 tcm->tcm_info = 0;
300 } else {
301 int i;
302 struct netdev_queue *dev_queue;
303
304 dev_queue = mqprio_queue_get(sch, cl);
305 tcm->tcm_parent = 0;
306 for (i = 0; i < netdev_get_num_tc(dev); i++) {
307 struct netdev_tc_txq tc = dev->tc_to_txq[i];
308 int q_idx = cl - netdev_get_num_tc(dev);
309
310 if (q_idx > tc.offset &&
311 q_idx <= tc.offset + tc.count) {
312 tcm->tcm_parent =
313 TC_H_MAKE(TC_H_MAJ(sch->handle),
314 TC_H_MIN(i + 1));
315 break;
316 }
317 }
318 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
319 }
320 tcm->tcm_handle |= TC_H_MIN(cl);
321 return 0;
322}
323
324static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
stephen hemmingerea18fd92011-02-23 09:06:51 +0000325 struct gnet_dump *d)
326 __releases(d->lock)
327 __acquires(d->lock)
John Fastabendb8970f02011-01-17 08:06:09 +0000328{
329 struct net_device *dev = qdisc_dev(sch);
330
331 if (cl <= netdev_get_num_tc(dev)) {
332 int i;
John Fastabend64015852014-09-28 11:53:57 -0700333 __u32 qlen = 0;
John Fastabendb8970f02011-01-17 08:06:09 +0000334 struct Qdisc *qdisc;
335 struct gnet_stats_queue qstats = {0};
336 struct gnet_stats_basic_packed bstats = {0};
337 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
338
339 /* Drop lock here it will be reclaimed before touching
340 * statistics this is required because the d->lock we
341 * hold here is the look on dev_queue->qdisc_sleeping
342 * also acquired below.
343 */
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700344 if (d->lock)
345 spin_unlock_bh(d->lock);
John Fastabendb8970f02011-01-17 08:06:09 +0000346
347 for (i = tc.offset; i < tc.offset + tc.count; i++) {
John Fastabend46e5da40a2014-09-12 20:04:52 -0700348 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
349
350 qdisc = rtnl_dereference(q->qdisc);
John Fastabendb8970f02011-01-17 08:06:09 +0000351 spin_lock_bh(qdisc_lock(qdisc));
John Fastabend64015852014-09-28 11:53:57 -0700352 qlen += qdisc->q.qlen;
John Fastabendb8970f02011-01-17 08:06:09 +0000353 bstats.bytes += qdisc->bstats.bytes;
354 bstats.packets += qdisc->bstats.packets;
John Fastabendb8970f02011-01-17 08:06:09 +0000355 qstats.backlog += qdisc->qstats.backlog;
356 qstats.drops += qdisc->qstats.drops;
357 qstats.requeues += qdisc->qstats.requeues;
358 qstats.overlimits += qdisc->qstats.overlimits;
359 spin_unlock_bh(qdisc_lock(qdisc));
360 }
361 /* Reclaim root sleeping lock before completing stats */
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700362 if (d->lock)
363 spin_lock_bh(d->lock);
364 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700365 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
John Fastabendb8970f02011-01-17 08:06:09 +0000366 return -1;
367 } else {
368 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
369
370 sch = dev_queue->qdisc_sleeping;
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700371 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
372 d, NULL, &sch->bstats) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700373 gnet_stats_copy_queue(d, NULL,
374 &sch->qstats, sch->q.qlen) < 0)
John Fastabendb8970f02011-01-17 08:06:09 +0000375 return -1;
376 }
377 return 0;
378}
379
380static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
381{
382 struct net_device *dev = qdisc_dev(sch);
383 unsigned long ntx;
384
385 if (arg->stop)
386 return;
387
388 /* Walk hierarchy with a virtual class per tc */
389 arg->count = arg->skip;
390 for (ntx = arg->skip;
391 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
392 ntx++) {
393 if (arg->fn(sch, ntx + 1, arg) < 0) {
394 arg->stop = 1;
395 break;
396 }
397 arg->count++;
398 }
399}
400
401static const struct Qdisc_class_ops mqprio_class_ops = {
402 .graft = mqprio_graft,
403 .leaf = mqprio_leaf,
404 .get = mqprio_get,
405 .put = mqprio_put,
406 .walk = mqprio_walk,
407 .dump = mqprio_dump_class,
408 .dump_stats = mqprio_dump_class_stats,
409};
410
stephen hemmingerea18fd92011-02-23 09:06:51 +0000411static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
John Fastabendb8970f02011-01-17 08:06:09 +0000412 .cl_ops = &mqprio_class_ops,
413 .id = "mqprio",
414 .priv_size = sizeof(struct mqprio_sched),
415 .init = mqprio_init,
416 .destroy = mqprio_destroy,
417 .attach = mqprio_attach,
418 .dump = mqprio_dump,
419 .owner = THIS_MODULE,
420};
421
422static int __init mqprio_module_init(void)
423{
424 return register_qdisc(&mqprio_qdisc_ops);
425}
426
427static void __exit mqprio_module_exit(void)
428{
429 unregister_qdisc(&mqprio_qdisc_ops);
430}
431
432module_init(mqprio_module_init);
433module_exit(mqprio_module_exit);
434
435MODULE_LICENSE("GPL");