blob: 83d2e54bf303a4353be84416d998a76f37734086 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
David S. Miller6ec1c692009-09-06 01:58:51 -07002/*
3 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
4 *
5 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
David S. Miller6ec1c692009-09-06 01:58:51 -07006 */
7
8#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070010#include <linux/kernel.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040011#include <linux/export.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070012#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <net/netlink.h>
Jakub Kicinskif971b132018-05-25 21:53:35 -070016#include <net/pkt_cls.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070017#include <net/pkt_sched.h>
John Fastabendb01ac092017-12-07 09:57:20 -080018#include <net/sch_generic.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070019
20struct mq_sched {
21 struct Qdisc **qdiscs;
22};
23
Jakub Kicinskif971b132018-05-25 21:53:35 -070024static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
25{
26 struct net_device *dev = qdisc_dev(sch);
27 struct tc_mq_qopt_offload opt = {
28 .command = cmd,
29 .handle = sch->handle,
30 };
31
32 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
33 return -EOPNOTSUPP;
34
35 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
36}
37
Jakub Kicinski58f89272018-11-07 17:33:36 -080038static int mq_offload_stats(struct Qdisc *sch)
Jakub Kicinski47c669a42018-05-25 21:53:37 -070039{
Jakub Kicinski47c669a42018-05-25 21:53:37 -070040 struct tc_mq_qopt_offload opt = {
41 .command = TC_MQ_STATS,
42 .handle = sch->handle,
43 .stats = {
44 .bstats = &sch->bstats,
45 .qstats = &sch->qstats,
46 },
47 };
48
Jakub Kicinski58f89272018-11-07 17:33:36 -080049 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
Jakub Kicinski47c669a42018-05-25 21:53:37 -070050}
51
David S. Miller6ec1c692009-09-06 01:58:51 -070052static void mq_destroy(struct Qdisc *sch)
53{
54 struct net_device *dev = qdisc_dev(sch);
55 struct mq_sched *priv = qdisc_priv(sch);
56 unsigned int ntx;
57
Jakub Kicinskif971b132018-05-25 21:53:35 -070058 mq_offload(sch, TC_MQ_DESTROY);
59
David S. Miller6ec1c692009-09-06 01:58:51 -070060 if (!priv->qdiscs)
61 return;
62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
Vlad Buslov86bd4462018-09-24 19:22:50 +030063 qdisc_put(priv->qdiscs[ntx]);
David S. Miller6ec1c692009-09-06 01:58:51 -070064 kfree(priv->qdiscs);
65}
66
Alexander Aringe63d7df2017-12-20 12:35:13 -050067static int mq_init(struct Qdisc *sch, struct nlattr *opt,
68 struct netlink_ext_ack *extack)
David S. Miller6ec1c692009-09-06 01:58:51 -070069{
70 struct net_device *dev = qdisc_dev(sch);
71 struct mq_sched *priv = qdisc_priv(sch);
72 struct netdev_queue *dev_queue;
73 struct Qdisc *qdisc;
74 unsigned int ntx;
75
76 if (sch->parent != TC_H_ROOT)
77 return -EOPNOTSUPP;
78
79 if (!netif_is_multiqueue(dev))
80 return -EOPNOTSUPP;
81
82 /* pre-allocate qdiscs, attachment can't fail */
83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
84 GFP_KERNEL);
Eric Dumazet87b60cf2017-02-10 10:31:49 -080085 if (!priv->qdiscs)
David S. Miller6ec1c692009-09-06 01:58:51 -070086 return -ENOMEM;
87
88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
89 dev_queue = netdev_get_tx_queue(dev, ntx);
Eric Dumazet1f27cde2016-03-02 08:21:43 -080090 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
David S. Miller6ec1c692009-09-06 01:58:51 -070091 TC_H_MAKE(TC_H_MAJ(sch->handle),
Alexander Aringa38a98822017-12-20 12:35:21 -050092 TC_H_MIN(ntx + 1)),
93 extack);
Eric Dumazet87b60cf2017-02-10 10:31:49 -080094 if (!qdisc)
95 return -ENOMEM;
David S. Miller6ec1c692009-09-06 01:58:51 -070096 priv->qdiscs[ntx] = qdisc;
Eric Dumazet4eaf3b82015-12-01 20:08:51 -080097 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
David S. Miller6ec1c692009-09-06 01:58:51 -070098 }
99
Patrick McHardy23bcf632009-09-09 18:11:23 -0700100 sch->flags |= TCQ_F_MQROOT;
Jakub Kicinskif971b132018-05-25 21:53:35 -0700101
102 mq_offload(sch, TC_MQ_CREATE);
David S. Miller6ec1c692009-09-06 01:58:51 -0700103 return 0;
David S. Miller6ec1c692009-09-06 01:58:51 -0700104}
105
106static void mq_attach(struct Qdisc *sch)
107{
108 struct net_device *dev = qdisc_dev(sch);
109 struct mq_sched *priv = qdisc_priv(sch);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800110 struct Qdisc *qdisc, *old;
David S. Miller6ec1c692009-09-06 01:58:51 -0700111 unsigned int ntx;
112
113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
114 qdisc = priv->qdiscs[ntx];
Eric Dumazet95dc1922013-12-05 11:12:02 -0800115 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
116 if (old)
Vlad Buslov86bd4462018-09-24 19:22:50 +0300117 qdisc_put(old);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800118#ifdef CONFIG_NET_SCHED
119 if (ntx < dev->real_num_tx_queues)
Jiri Kosina49b49972017-03-08 16:03:32 +0100120 qdisc_hash_add(qdisc, false);
Eric Dumazet95dc1922013-12-05 11:12:02 -0800121#endif
122
David S. Miller6ec1c692009-09-06 01:58:51 -0700123 }
124 kfree(priv->qdiscs);
125 priv->qdiscs = NULL;
126}
127
128static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
129{
130 struct net_device *dev = qdisc_dev(sch);
131 struct Qdisc *qdisc;
132 unsigned int ntx;
133
134 sch->q.qlen = 0;
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200135 gnet_stats_basic_sync_init(&sch->bstats);
David S. Miller6ec1c692009-09-06 01:58:51 -0700136 memset(&sch->qstats, 0, sizeof(sch->qstats));
137
John Fastabendce679e82017-12-07 09:57:39 -0800138 /* MQ supports lockless qdiscs. However, statistics accounting needs
139 * to account for all, none, or a mix of locked and unlocked child
140 * qdiscs. Percpu stats are added to counters in-band and locking
141 * qdisc totals are added at end.
142 */
David S. Miller6ec1c692009-09-06 01:58:51 -0700143 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
144 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
145 spin_lock_bh(qdisc_lock(qdisc));
John Fastabendb01ac092017-12-07 09:57:20 -0800146
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +0200147 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
148 &qdisc->bstats, false);
Sebastian Andrzej Siewior7361df42021-10-16 10:49:04 +0200149 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
150 &qdisc->qstats);
151 sch->q.qlen += qdisc_qlen(qdisc);
John Fastabendb01ac092017-12-07 09:57:20 -0800152
David S. Miller6ec1c692009-09-06 01:58:51 -0700153 spin_unlock_bh(qdisc_lock(qdisc));
154 }
John Fastabendce679e82017-12-07 09:57:39 -0800155
Jakub Kicinski58f89272018-11-07 17:33:36 -0800156 return mq_offload_stats(sch);
David S. Miller6ec1c692009-09-06 01:58:51 -0700157}
158
159static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
160{
161 struct net_device *dev = qdisc_dev(sch);
162 unsigned long ntx = cl - 1;
163
164 if (ntx >= dev->num_tx_queues)
165 return NULL;
166 return netdev_get_tx_queue(dev, ntx);
167}
168
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700169static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
170 struct tcmsg *tcm)
David S. Miller6ec1c692009-09-06 01:58:51 -0700171{
Jesus Sanchez-Palenciace8a75f2017-10-16 18:01:24 -0700172 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
David S. Miller6ec1c692009-09-06 01:58:51 -0700173}
174
175static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500176 struct Qdisc **old, struct netlink_ext_ack *extack)
David S. Miller6ec1c692009-09-06 01:58:51 -0700177{
178 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800179 struct tc_mq_qopt_offload graft_offload;
David S. Miller6ec1c692009-09-06 01:58:51 -0700180 struct net_device *dev = qdisc_dev(sch);
181
182 if (dev->flags & IFF_UP)
183 dev_deactivate(dev);
184
185 *old = dev_graft_qdisc(dev_queue, new);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000186 if (new)
Eric Dumazet4eaf3b82015-12-01 20:08:51 -0800187 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
David S. Miller6ec1c692009-09-06 01:58:51 -0700188 if (dev->flags & IFF_UP)
189 dev_activate(dev);
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800190
191 graft_offload.handle = sch->handle;
192 graft_offload.graft_params.queue = cl - 1;
193 graft_offload.graft_params.child_handle = new ? new->handle : 0;
194 graft_offload.command = TC_MQ_GRAFT;
195
196 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
197 TC_SETUP_QDISC_MQ, &graft_offload, extack);
David S. Miller6ec1c692009-09-06 01:58:51 -0700198 return 0;
199}
200
201static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
202{
203 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
204
205 return dev_queue->qdisc_sleeping;
206}
207
WANG Cong143976c2017-08-24 16:51:29 -0700208static unsigned long mq_find(struct Qdisc *sch, u32 classid)
David S. Miller6ec1c692009-09-06 01:58:51 -0700209{
210 unsigned int ntx = TC_H_MIN(classid);
211
212 if (!mq_queue_get(sch, ntx))
213 return 0;
214 return ntx;
215}
216
David S. Miller6ec1c692009-09-06 01:58:51 -0700217static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
218 struct sk_buff *skb, struct tcmsg *tcm)
219{
220 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
221
222 tcm->tcm_parent = TC_H_ROOT;
223 tcm->tcm_handle |= TC_H_MIN(cl);
224 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
225 return 0;
226}
227
228static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
229 struct gnet_dump *d)
230{
231 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
232
233 sch = dev_queue->qdisc_sleeping;
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +0200234 if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
Paolo Abeni5dd431b2019-03-28 16:53:12 +0100235 qdisc_qstats_copy(d, sch) < 0)
David S. Miller6ec1c692009-09-06 01:58:51 -0700236 return -1;
237 return 0;
238}
239
240static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
241{
242 struct net_device *dev = qdisc_dev(sch);
243 unsigned int ntx;
244
245 if (arg->stop)
246 return;
247
248 arg->count = arg->skip;
249 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
250 if (arg->fn(sch, ntx + 1, arg) < 0) {
251 arg->stop = 1;
252 break;
253 }
254 arg->count++;
255 }
256}
257
258static const struct Qdisc_class_ops mq_class_ops = {
259 .select_queue = mq_select_queue,
260 .graft = mq_graft,
261 .leaf = mq_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700262 .find = mq_find,
David S. Miller6ec1c692009-09-06 01:58:51 -0700263 .walk = mq_walk,
264 .dump = mq_dump_class,
265 .dump_stats = mq_dump_class_stats,
266};
267
268struct Qdisc_ops mq_qdisc_ops __read_mostly = {
269 .cl_ops = &mq_class_ops,
270 .id = "mq",
271 .priv_size = sizeof(struct mq_sched),
272 .init = mq_init,
273 .destroy = mq_destroy,
274 .attach = mq_attach,
Jakub Kicinski1e080f12021-09-13 15:53:30 -0700275 .change_real_num_tx = mq_change_real_num_tx,
David S. Miller6ec1c692009-09-06 01:58:51 -0700276 .dump = mq_dump,
277 .owner = THIS_MODULE,
278};