blob: 203659bc3906419f6a00edca96561efb503d608d [file] [log] [blame]
David S. Miller6ec1c692009-09-06 01:58:51 -07001/*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
3 *
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070013#include <linux/kernel.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040014#include <linux/export.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070015#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/netlink.h>
Jakub Kicinskif971b132018-05-25 21:53:35 -070019#include <net/pkt_cls.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070020#include <net/pkt_sched.h>
John Fastabendb01ac092017-12-07 09:57:20 -080021#include <net/sch_generic.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070022
23struct mq_sched {
24 struct Qdisc **qdiscs;
25};
26
Jakub Kicinskif971b132018-05-25 21:53:35 -070027static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
28{
29 struct net_device *dev = qdisc_dev(sch);
30 struct tc_mq_qopt_offload opt = {
31 .command = cmd,
32 .handle = sch->handle,
33 };
34
35 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
36 return -EOPNOTSUPP;
37
38 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
39}
40
Jakub Kicinski58f89272018-11-07 17:33:36 -080041static int mq_offload_stats(struct Qdisc *sch)
Jakub Kicinski47c669a42018-05-25 21:53:37 -070042{
Jakub Kicinski47c669a42018-05-25 21:53:37 -070043 struct tc_mq_qopt_offload opt = {
44 .command = TC_MQ_STATS,
45 .handle = sch->handle,
46 .stats = {
47 .bstats = &sch->bstats,
48 .qstats = &sch->qstats,
49 },
50 };
51
Jakub Kicinski58f89272018-11-07 17:33:36 -080052 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
Jakub Kicinski47c669a42018-05-25 21:53:37 -070053}
54
David S. Miller6ec1c692009-09-06 01:58:51 -070055static void mq_destroy(struct Qdisc *sch)
56{
57 struct net_device *dev = qdisc_dev(sch);
58 struct mq_sched *priv = qdisc_priv(sch);
59 unsigned int ntx;
60
Jakub Kicinskif971b132018-05-25 21:53:35 -070061 mq_offload(sch, TC_MQ_DESTROY);
62
David S. Miller6ec1c692009-09-06 01:58:51 -070063 if (!priv->qdiscs)
64 return;
65 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
Vlad Buslov86bd4462018-09-24 19:22:50 +030066 qdisc_put(priv->qdiscs[ntx]);
David S. Miller6ec1c692009-09-06 01:58:51 -070067 kfree(priv->qdiscs);
68}
69
Alexander Aringe63d7df2017-12-20 12:35:13 -050070static int mq_init(struct Qdisc *sch, struct nlattr *opt,
71 struct netlink_ext_ack *extack)
David S. Miller6ec1c692009-09-06 01:58:51 -070072{
73 struct net_device *dev = qdisc_dev(sch);
74 struct mq_sched *priv = qdisc_priv(sch);
75 struct netdev_queue *dev_queue;
76 struct Qdisc *qdisc;
77 unsigned int ntx;
78
79 if (sch->parent != TC_H_ROOT)
80 return -EOPNOTSUPP;
81
82 if (!netif_is_multiqueue(dev))
83 return -EOPNOTSUPP;
84
85 /* pre-allocate qdiscs, attachment can't fail */
86 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
87 GFP_KERNEL);
Eric Dumazet87b60cf2017-02-10 10:31:49 -080088 if (!priv->qdiscs)
David S. Miller6ec1c692009-09-06 01:58:51 -070089 return -ENOMEM;
90
91 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
92 dev_queue = netdev_get_tx_queue(dev, ntx);
Eric Dumazet1f27cde2016-03-02 08:21:43 -080093 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
David S. Miller6ec1c692009-09-06 01:58:51 -070094 TC_H_MAKE(TC_H_MAJ(sch->handle),
Alexander Aringa38a98822017-12-20 12:35:21 -050095 TC_H_MIN(ntx + 1)),
96 extack);
Eric Dumazet87b60cf2017-02-10 10:31:49 -080097 if (!qdisc)
98 return -ENOMEM;
David S. Miller6ec1c692009-09-06 01:58:51 -070099 priv->qdiscs[ntx] = qdisc;
Eric Dumazet4eaf3b82015-12-01 20:08:51 -0800100 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
David S. Miller6ec1c692009-09-06 01:58:51 -0700101 }
102
Patrick McHardy23bcf632009-09-09 18:11:23 -0700103 sch->flags |= TCQ_F_MQROOT;
Jakub Kicinskif971b132018-05-25 21:53:35 -0700104
105 mq_offload(sch, TC_MQ_CREATE);
David S. Miller6ec1c692009-09-06 01:58:51 -0700106 return 0;
David S. Miller6ec1c692009-09-06 01:58:51 -0700107}
108
109static void mq_attach(struct Qdisc *sch)
110{
111 struct net_device *dev = qdisc_dev(sch);
112 struct mq_sched *priv = qdisc_priv(sch);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800113 struct Qdisc *qdisc, *old;
David S. Miller6ec1c692009-09-06 01:58:51 -0700114 unsigned int ntx;
115
116 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
117 qdisc = priv->qdiscs[ntx];
Eric Dumazet95dc1922013-12-05 11:12:02 -0800118 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
119 if (old)
Vlad Buslov86bd4462018-09-24 19:22:50 +0300120 qdisc_put(old);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800121#ifdef CONFIG_NET_SCHED
122 if (ntx < dev->real_num_tx_queues)
Jiri Kosina49b49972017-03-08 16:03:32 +0100123 qdisc_hash_add(qdisc, false);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800124#endif
125
David S. Miller6ec1c692009-09-06 01:58:51 -0700126 }
127 kfree(priv->qdiscs);
128 priv->qdiscs = NULL;
129}
130
131static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
132{
133 struct net_device *dev = qdisc_dev(sch);
134 struct Qdisc *qdisc;
135 unsigned int ntx;
John Fastabendce679e82017-12-07 09:57:39 -0800136 __u32 qlen = 0;
David S. Miller6ec1c692009-09-06 01:58:51 -0700137
138 sch->q.qlen = 0;
139 memset(&sch->bstats, 0, sizeof(sch->bstats));
140 memset(&sch->qstats, 0, sizeof(sch->qstats));
141
John Fastabendce679e82017-12-07 09:57:39 -0800142 /* MQ supports lockless qdiscs. However, statistics accounting needs
143 * to account for all, none, or a mix of locked and unlocked child
144 * qdiscs. Percpu stats are added to counters in-band and locking
145 * qdisc totals are added at end.
146 */
David S. Miller6ec1c692009-09-06 01:58:51 -0700147 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
148 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
149 spin_lock_bh(qdisc_lock(qdisc));
John Fastabendb01ac092017-12-07 09:57:20 -0800150
151 if (qdisc_is_percpu_stats(qdisc)) {
John Fastabendce679e82017-12-07 09:57:39 -0800152 qlen = qdisc_qlen_sum(qdisc);
153 __gnet_stats_copy_basic(NULL, &sch->bstats,
154 qdisc->cpu_bstats,
155 &qdisc->bstats);
156 __gnet_stats_copy_queue(&sch->qstats,
157 qdisc->cpu_qstats,
158 &qdisc->qstats, qlen);
159 } else {
160 sch->q.qlen += qdisc->q.qlen;
161 sch->bstats.bytes += qdisc->bstats.bytes;
162 sch->bstats.packets += qdisc->bstats.packets;
Jakub Kicinski47c669a42018-05-25 21:53:37 -0700163 sch->qstats.qlen += qdisc->qstats.qlen;
John Fastabendce679e82017-12-07 09:57:39 -0800164 sch->qstats.backlog += qdisc->qstats.backlog;
165 sch->qstats.drops += qdisc->qstats.drops;
166 sch->qstats.requeues += qdisc->qstats.requeues;
167 sch->qstats.overlimits += qdisc->qstats.overlimits;
John Fastabendb01ac092017-12-07 09:57:20 -0800168 }
169
David S. Miller6ec1c692009-09-06 01:58:51 -0700170 spin_unlock_bh(qdisc_lock(qdisc));
171 }
John Fastabendce679e82017-12-07 09:57:39 -0800172
Jakub Kicinski58f89272018-11-07 17:33:36 -0800173 return mq_offload_stats(sch);
David S. Miller6ec1c692009-09-06 01:58:51 -0700174}
175
176static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
177{
178 struct net_device *dev = qdisc_dev(sch);
179 unsigned long ntx = cl - 1;
180
181 if (ntx >= dev->num_tx_queues)
182 return NULL;
183 return netdev_get_tx_queue(dev, ntx);
184}
185
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700186static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
187 struct tcmsg *tcm)
David S. Miller6ec1c692009-09-06 01:58:51 -0700188{
Jesus Sanchez-Palenciace8a75f2017-10-16 18:01:24 -0700189 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
David S. Miller6ec1c692009-09-06 01:58:51 -0700190}
191
192static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500193 struct Qdisc **old, struct netlink_ext_ack *extack)
David S. Miller6ec1c692009-09-06 01:58:51 -0700194{
195 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800196 struct tc_mq_qopt_offload graft_offload;
David S. Miller6ec1c692009-09-06 01:58:51 -0700197 struct net_device *dev = qdisc_dev(sch);
198
199 if (dev->flags & IFF_UP)
200 dev_deactivate(dev);
201
202 *old = dev_graft_qdisc(dev_queue, new);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000203 if (new)
Eric Dumazet4eaf3b82015-12-01 20:08:51 -0800204 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
David S. Miller6ec1c692009-09-06 01:58:51 -0700205 if (dev->flags & IFF_UP)
206 dev_activate(dev);
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800207
208 graft_offload.handle = sch->handle;
209 graft_offload.graft_params.queue = cl - 1;
210 graft_offload.graft_params.child_handle = new ? new->handle : 0;
211 graft_offload.command = TC_MQ_GRAFT;
212
213 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
214 TC_SETUP_QDISC_MQ, &graft_offload, extack);
David S. Miller6ec1c692009-09-06 01:58:51 -0700215 return 0;
216}
217
218static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
219{
220 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
221
222 return dev_queue->qdisc_sleeping;
223}
224
WANG Cong143976c2017-08-24 16:51:29 -0700225static unsigned long mq_find(struct Qdisc *sch, u32 classid)
David S. Miller6ec1c692009-09-06 01:58:51 -0700226{
227 unsigned int ntx = TC_H_MIN(classid);
228
229 if (!mq_queue_get(sch, ntx))
230 return 0;
231 return ntx;
232}
233
David S. Miller6ec1c692009-09-06 01:58:51 -0700234static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
235 struct sk_buff *skb, struct tcmsg *tcm)
236{
237 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
238
239 tcm->tcm_parent = TC_H_ROOT;
240 tcm->tcm_handle |= TC_H_MIN(cl);
241 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
242 return 0;
243}
244
245static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
246 struct gnet_dump *d)
247{
248 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
249
250 sch = dev_queue->qdisc_sleeping;
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700251 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700252 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
David S. Miller6ec1c692009-09-06 01:58:51 -0700253 return -1;
254 return 0;
255}
256
257static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
258{
259 struct net_device *dev = qdisc_dev(sch);
260 unsigned int ntx;
261
262 if (arg->stop)
263 return;
264
265 arg->count = arg->skip;
266 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
267 if (arg->fn(sch, ntx + 1, arg) < 0) {
268 arg->stop = 1;
269 break;
270 }
271 arg->count++;
272 }
273}
274
275static const struct Qdisc_class_ops mq_class_ops = {
276 .select_queue = mq_select_queue,
277 .graft = mq_graft,
278 .leaf = mq_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700279 .find = mq_find,
David S. Miller6ec1c692009-09-06 01:58:51 -0700280 .walk = mq_walk,
281 .dump = mq_dump_class,
282 .dump_stats = mq_dump_class_stats,
283};
284
285struct Qdisc_ops mq_qdisc_ops __read_mostly = {
286 .cl_ops = &mq_class_ops,
287 .id = "mq",
288 .priv_size = sizeof(struct mq_sched),
289 .init = mq_init,
290 .destroy = mq_destroy,
291 .attach = mq_attach,
292 .dump = mq_dump,
293 .owner = THIS_MODULE,
294};