Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/sch_mq.c Classful multiqueue dummy scheduler |
| 4 | * |
| 5 | * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/slab.h> |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 10 | #include <linux/kernel.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 12 | #include <linux/string.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <net/netlink.h> |
Jakub Kicinski | f971b13 | 2018-05-25 21:53:35 -0700 | [diff] [blame] | 16 | #include <net/pkt_cls.h> |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 17 | #include <net/pkt_sched.h> |
John Fastabend | b01ac09 | 2017-12-07 09:57:20 -0800 | [diff] [blame] | 18 | #include <net/sch_generic.h> |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 19 | |
| 20 | struct mq_sched { |
| 21 | struct Qdisc **qdiscs; |
| 22 | }; |
| 23 | |
Jakub Kicinski | f971b13 | 2018-05-25 21:53:35 -0700 | [diff] [blame] | 24 | static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd) |
| 25 | { |
| 26 | struct net_device *dev = qdisc_dev(sch); |
| 27 | struct tc_mq_qopt_offload opt = { |
| 28 | .command = cmd, |
| 29 | .handle = sch->handle, |
| 30 | }; |
| 31 | |
| 32 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) |
| 33 | return -EOPNOTSUPP; |
| 34 | |
| 35 | return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt); |
| 36 | } |
| 37 | |
Jakub Kicinski | 58f8927 | 2018-11-07 17:33:36 -0800 | [diff] [blame] | 38 | static int mq_offload_stats(struct Qdisc *sch) |
Jakub Kicinski | 47c669a4 | 2018-05-25 21:53:37 -0700 | [diff] [blame] | 39 | { |
Jakub Kicinski | 47c669a4 | 2018-05-25 21:53:37 -0700 | [diff] [blame] | 40 | struct tc_mq_qopt_offload opt = { |
| 41 | .command = TC_MQ_STATS, |
| 42 | .handle = sch->handle, |
| 43 | .stats = { |
| 44 | .bstats = &sch->bstats, |
| 45 | .qstats = &sch->qstats, |
| 46 | }, |
| 47 | }; |
| 48 | |
Jakub Kicinski | 58f8927 | 2018-11-07 17:33:36 -0800 | [diff] [blame] | 49 | return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt); |
Jakub Kicinski | 47c669a4 | 2018-05-25 21:53:37 -0700 | [diff] [blame] | 50 | } |
| 51 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 52 | static void mq_destroy(struct Qdisc *sch) |
| 53 | { |
| 54 | struct net_device *dev = qdisc_dev(sch); |
| 55 | struct mq_sched *priv = qdisc_priv(sch); |
| 56 | unsigned int ntx; |
| 57 | |
Jakub Kicinski | f971b13 | 2018-05-25 21:53:35 -0700 | [diff] [blame] | 58 | mq_offload(sch, TC_MQ_DESTROY); |
| 59 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 60 | if (!priv->qdiscs) |
| 61 | return; |
| 62 | for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 63 | qdisc_put(priv->qdiscs[ntx]); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 64 | kfree(priv->qdiscs); |
| 65 | } |
| 66 | |
Alexander Aring | e63d7df | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 67 | static int mq_init(struct Qdisc *sch, struct nlattr *opt, |
| 68 | struct netlink_ext_ack *extack) |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 69 | { |
| 70 | struct net_device *dev = qdisc_dev(sch); |
| 71 | struct mq_sched *priv = qdisc_priv(sch); |
| 72 | struct netdev_queue *dev_queue; |
| 73 | struct Qdisc *qdisc; |
| 74 | unsigned int ntx; |
| 75 | |
| 76 | if (sch->parent != TC_H_ROOT) |
| 77 | return -EOPNOTSUPP; |
| 78 | |
| 79 | if (!netif_is_multiqueue(dev)) |
| 80 | return -EOPNOTSUPP; |
| 81 | |
| 82 | /* pre-allocate qdiscs, attachment can't fail */ |
| 83 | priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), |
| 84 | GFP_KERNEL); |
Eric Dumazet | 87b60cf | 2017-02-10 10:31:49 -0800 | [diff] [blame] | 85 | if (!priv->qdiscs) |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 86 | return -ENOMEM; |
| 87 | |
| 88 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 89 | dev_queue = netdev_get_tx_queue(dev, ntx); |
Eric Dumazet | 1f27cde | 2016-03-02 08:21:43 -0800 | [diff] [blame] | 90 | qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx), |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 91 | TC_H_MAKE(TC_H_MAJ(sch->handle), |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 92 | TC_H_MIN(ntx + 1)), |
| 93 | extack); |
Eric Dumazet | 87b60cf | 2017-02-10 10:31:49 -0800 | [diff] [blame] | 94 | if (!qdisc) |
| 95 | return -ENOMEM; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 96 | priv->qdiscs[ntx] = qdisc; |
Eric Dumazet | 4eaf3b8 | 2015-12-01 20:08:51 -0800 | [diff] [blame] | 97 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 98 | } |
| 99 | |
Patrick McHardy | 23bcf63 | 2009-09-09 18:11:23 -0700 | [diff] [blame] | 100 | sch->flags |= TCQ_F_MQROOT; |
Jakub Kicinski | f971b13 | 2018-05-25 21:53:35 -0700 | [diff] [blame] | 101 | |
| 102 | mq_offload(sch, TC_MQ_CREATE); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 103 | return 0; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | static void mq_attach(struct Qdisc *sch) |
| 107 | { |
| 108 | struct net_device *dev = qdisc_dev(sch); |
| 109 | struct mq_sched *priv = qdisc_priv(sch); |
Eric Dumazet | 95dc192 | 2013-12-05 11:12:02 -0800 | [diff] [blame] | 110 | struct Qdisc *qdisc, *old; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 111 | unsigned int ntx; |
| 112 | |
| 113 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 114 | qdisc = priv->qdiscs[ntx]; |
Eric Dumazet | 95dc192 | 2013-12-05 11:12:02 -0800 | [diff] [blame] | 115 | old = dev_graft_qdisc(qdisc->dev_queue, qdisc); |
| 116 | if (old) |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 117 | qdisc_put(old); |
Eric Dumazet | 95dc192 | 2013-12-05 11:12:02 -0800 | [diff] [blame] | 118 | #ifdef CONFIG_NET_SCHED |
| 119 | if (ntx < dev->real_num_tx_queues) |
Jiri Kosina | 49b4997 | 2017-03-08 16:03:32 +0100 | [diff] [blame] | 120 | qdisc_hash_add(qdisc, false); |
Eric Dumazet | 95dc192 | 2013-12-05 11:12:02 -0800 | [diff] [blame] | 121 | #endif |
| 122 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 123 | } |
| 124 | kfree(priv->qdiscs); |
| 125 | priv->qdiscs = NULL; |
| 126 | } |
| 127 | |
| 128 | static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 129 | { |
| 130 | struct net_device *dev = qdisc_dev(sch); |
| 131 | struct Qdisc *qdisc; |
| 132 | unsigned int ntx; |
John Fastabend | ce679e8 | 2017-12-07 09:57:39 -0800 | [diff] [blame] | 133 | __u32 qlen = 0; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 134 | |
| 135 | sch->q.qlen = 0; |
| 136 | memset(&sch->bstats, 0, sizeof(sch->bstats)); |
| 137 | memset(&sch->qstats, 0, sizeof(sch->qstats)); |
| 138 | |
John Fastabend | ce679e8 | 2017-12-07 09:57:39 -0800 | [diff] [blame] | 139 | /* MQ supports lockless qdiscs. However, statistics accounting needs |
| 140 | * to account for all, none, or a mix of locked and unlocked child |
| 141 | * qdiscs. Percpu stats are added to counters in-band and locking |
| 142 | * qdisc totals are added at end. |
| 143 | */ |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 144 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 145 | qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; |
| 146 | spin_lock_bh(qdisc_lock(qdisc)); |
John Fastabend | b01ac09 | 2017-12-07 09:57:20 -0800 | [diff] [blame] | 147 | |
| 148 | if (qdisc_is_percpu_stats(qdisc)) { |
John Fastabend | ce679e8 | 2017-12-07 09:57:39 -0800 | [diff] [blame] | 149 | qlen = qdisc_qlen_sum(qdisc); |
| 150 | __gnet_stats_copy_basic(NULL, &sch->bstats, |
| 151 | qdisc->cpu_bstats, |
| 152 | &qdisc->bstats); |
| 153 | __gnet_stats_copy_queue(&sch->qstats, |
| 154 | qdisc->cpu_qstats, |
| 155 | &qdisc->qstats, qlen); |
Dust Li | 2f23cd4 | 2019-12-03 11:17:40 +0800 | [diff] [blame] | 156 | sch->q.qlen += qlen; |
John Fastabend | ce679e8 | 2017-12-07 09:57:39 -0800 | [diff] [blame] | 157 | } else { |
| 158 | sch->q.qlen += qdisc->q.qlen; |
| 159 | sch->bstats.bytes += qdisc->bstats.bytes; |
| 160 | sch->bstats.packets += qdisc->bstats.packets; |
Jakub Kicinski | 47c669a4 | 2018-05-25 21:53:37 -0700 | [diff] [blame] | 161 | sch->qstats.qlen += qdisc->qstats.qlen; |
John Fastabend | ce679e8 | 2017-12-07 09:57:39 -0800 | [diff] [blame] | 162 | sch->qstats.backlog += qdisc->qstats.backlog; |
| 163 | sch->qstats.drops += qdisc->qstats.drops; |
| 164 | sch->qstats.requeues += qdisc->qstats.requeues; |
| 165 | sch->qstats.overlimits += qdisc->qstats.overlimits; |
John Fastabend | b01ac09 | 2017-12-07 09:57:20 -0800 | [diff] [blame] | 166 | } |
| 167 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 168 | spin_unlock_bh(qdisc_lock(qdisc)); |
| 169 | } |
John Fastabend | ce679e8 | 2017-12-07 09:57:39 -0800 | [diff] [blame] | 170 | |
Jakub Kicinski | 58f8927 | 2018-11-07 17:33:36 -0800 | [diff] [blame] | 171 | return mq_offload_stats(sch); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) |
| 175 | { |
| 176 | struct net_device *dev = qdisc_dev(sch); |
| 177 | unsigned long ntx = cl - 1; |
| 178 | |
| 179 | if (ntx >= dev->num_tx_queues) |
| 180 | return NULL; |
| 181 | return netdev_get_tx_queue(dev, ntx); |
| 182 | } |
| 183 | |
Jarek Poplawski | 926e61b | 2009-09-15 02:53:07 -0700 | [diff] [blame] | 184 | static struct netdev_queue *mq_select_queue(struct Qdisc *sch, |
| 185 | struct tcmsg *tcm) |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 186 | { |
Jesus Sanchez-Palencia | ce8a75f | 2017-10-16 18:01:24 -0700 | [diff] [blame] | 187 | return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, |
Alexander Aring | 653d6fd | 2017-12-20 12:35:17 -0500 | [diff] [blame] | 191 | struct Qdisc **old, struct netlink_ext_ack *extack) |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 192 | { |
| 193 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
Jakub Kicinski | d577a3d | 2018-11-12 14:58:14 -0800 | [diff] [blame] | 194 | struct tc_mq_qopt_offload graft_offload; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 195 | struct net_device *dev = qdisc_dev(sch); |
| 196 | |
| 197 | if (dev->flags & IFF_UP) |
| 198 | dev_deactivate(dev); |
| 199 | |
| 200 | *old = dev_graft_qdisc(dev_queue, new); |
Eric Dumazet | 1abbe13 | 2012-12-11 15:54:33 +0000 | [diff] [blame] | 201 | if (new) |
Eric Dumazet | 4eaf3b8 | 2015-12-01 20:08:51 -0800 | [diff] [blame] | 202 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 203 | if (dev->flags & IFF_UP) |
| 204 | dev_activate(dev); |
Jakub Kicinski | d577a3d | 2018-11-12 14:58:14 -0800 | [diff] [blame] | 205 | |
| 206 | graft_offload.handle = sch->handle; |
| 207 | graft_offload.graft_params.queue = cl - 1; |
| 208 | graft_offload.graft_params.child_handle = new ? new->handle : 0; |
| 209 | graft_offload.command = TC_MQ_GRAFT; |
| 210 | |
| 211 | qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old, |
| 212 | TC_SETUP_QDISC_MQ, &graft_offload, extack); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) |
| 217 | { |
| 218 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 219 | |
| 220 | return dev_queue->qdisc_sleeping; |
| 221 | } |
| 222 | |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 223 | static unsigned long mq_find(struct Qdisc *sch, u32 classid) |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 224 | { |
| 225 | unsigned int ntx = TC_H_MIN(classid); |
| 226 | |
| 227 | if (!mq_queue_get(sch, ntx)) |
| 228 | return 0; |
| 229 | return ntx; |
| 230 | } |
| 231 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 232 | static int mq_dump_class(struct Qdisc *sch, unsigned long cl, |
| 233 | struct sk_buff *skb, struct tcmsg *tcm) |
| 234 | { |
| 235 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 236 | |
| 237 | tcm->tcm_parent = TC_H_ROOT; |
| 238 | tcm->tcm_handle |= TC_H_MIN(cl); |
| 239 | tcm->tcm_info = dev_queue->qdisc_sleeping->handle; |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, |
| 244 | struct gnet_dump *d) |
| 245 | { |
| 246 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 247 | |
| 248 | sch = dev_queue->qdisc_sleeping; |
Dust Li | 14e54ab | 2019-11-28 14:29:09 +0800 | [diff] [blame] | 249 | if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats, |
| 250 | &sch->bstats) < 0 || |
Paolo Abeni | 5dd431b | 2019-03-28 16:53:12 +0100 | [diff] [blame] | 251 | qdisc_qstats_copy(d, sch) < 0) |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 252 | return -1; |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
| 257 | { |
| 258 | struct net_device *dev = qdisc_dev(sch); |
| 259 | unsigned int ntx; |
| 260 | |
| 261 | if (arg->stop) |
| 262 | return; |
| 263 | |
| 264 | arg->count = arg->skip; |
| 265 | for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { |
| 266 | if (arg->fn(sch, ntx + 1, arg) < 0) { |
| 267 | arg->stop = 1; |
| 268 | break; |
| 269 | } |
| 270 | arg->count++; |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | static const struct Qdisc_class_ops mq_class_ops = { |
| 275 | .select_queue = mq_select_queue, |
| 276 | .graft = mq_graft, |
| 277 | .leaf = mq_leaf, |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 278 | .find = mq_find, |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 279 | .walk = mq_walk, |
| 280 | .dump = mq_dump_class, |
| 281 | .dump_stats = mq_dump_class_stats, |
| 282 | }; |
| 283 | |
| 284 | struct Qdisc_ops mq_qdisc_ops __read_mostly = { |
| 285 | .cl_ops = &mq_class_ops, |
| 286 | .id = "mq", |
| 287 | .priv_size = sizeof(struct mq_sched), |
| 288 | .init = mq_init, |
| 289 | .destroy = mq_destroy, |
| 290 | .attach = mq_attach, |
| 291 | .dump = mq_dump, |
| 292 | .owner = THIS_MODULE, |
| 293 | }; |