Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/sch_generic.c Generic packet scheduler routines. |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 |
| 7 | * - Ingress support |
| 8 | */ |
| 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/errno.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/netdevice.h> |
| 18 | #include <linux/skbuff.h> |
| 19 | #include <linux/rtnetlink.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/rcupdate.h> |
| 22 | #include <linux/list.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
nikolay@redhat.com | 07ce76a | 2013-08-03 22:07:47 +0200 | [diff] [blame] | 24 | #include <linux/if_vlan.h> |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 25 | #include <linux/skb_array.h> |
Chris Dion | 32d3e51 | 2017-12-06 10:50:28 -0500 | [diff] [blame] | 26 | #include <linux/if_macvlan.h> |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 27 | #include <net/sch_generic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <net/pkt_sched.h> |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 29 | #include <net/dst.h> |
Jesper Dangaard Brouer | e543002 | 2017-08-15 21:11:03 +0200 | [diff] [blame] | 30 | #include <trace/events/qdisc.h> |
Cong Wang | 141b6b2 | 2019-05-01 19:56:59 -0700 | [diff] [blame] | 31 | #include <trace/events/net.h> |
Steffen Klassert | f53c723 | 2017-12-20 10:41:36 +0100 | [diff] [blame] | 32 | #include <net/xfrm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
stephen hemminger | 34aedd3 | 2013-08-31 10:15:33 -0700 | [diff] [blame] | 34 | /* Qdisc to use by default */ |
| 35 | const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; |
| 36 | EXPORT_SYMBOL(default_qdisc_ops); |
| 37 | |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 38 | static void qdisc_maybe_clear_missed(struct Qdisc *q, |
| 39 | const struct netdev_queue *txq) |
| 40 | { |
| 41 | clear_bit(__QDISC_STATE_MISSED, &q->state); |
| 42 | |
| 43 | /* Make sure the below netif_xmit_frozen_or_stopped() |
| 44 | * checking happens after clearing STATE_MISSED. |
| 45 | */ |
| 46 | smp_mb__after_atomic(); |
| 47 | |
| 48 | /* Checking netif_xmit_frozen_or_stopped() again to |
| 49 | * make sure STATE_MISSED is set if the STATE_MISSED |
| 50 | * set by netif_tx_wake_queue()'s rescheduling of |
| 51 | * net_tx_action() is cleared by the above clear_bit(). |
| 52 | */ |
| 53 | if (!netif_xmit_frozen_or_stopped(txq)) |
| 54 | set_bit(__QDISC_STATE_MISSED, &q->state); |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 55 | else |
| 56 | set_bit(__QDISC_STATE_DRAINING, &q->state); |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 57 | } |
| 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | /* Main transmission queue. */ |
| 60 | |
Patrick McHardy | 0463d4a | 2007-04-16 17:02:10 -0700 | [diff] [blame] | 61 | /* Modifications to data participating in scheduling must be protected with |
David S. Miller | 5fb6622 | 2008-08-02 20:02:43 -0700 | [diff] [blame] | 62 | * qdisc_lock(qdisc) spinlock. |
Patrick McHardy | 0463d4a | 2007-04-16 17:02:10 -0700 | [diff] [blame] | 63 | * |
| 64 | * The idea is the following: |
David S. Miller | c7e4f3b | 2008-07-16 03:22:39 -0700 | [diff] [blame] | 65 | * - enqueue, dequeue are serialized via qdisc root lock |
| 66 | * - ingress filtering is also serialized via qdisc root lock |
Patrick McHardy | 0463d4a | 2007-04-16 17:02:10 -0700 | [diff] [blame] | 67 | * - updates to tree and tree walking are only done under the rtnl mutex. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | */ |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 69 | |
Eric Dumazet | b88dd52 | 2019-09-05 05:20:22 -0700 | [diff] [blame] | 70 | #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL) |
| 71 | |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 72 | static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) |
| 73 | { |
| 74 | const struct netdev_queue *txq = q->dev_queue; |
| 75 | spinlock_t *lock = NULL; |
| 76 | struct sk_buff *skb; |
| 77 | |
| 78 | if (q->flags & TCQ_F_NOLOCK) { |
| 79 | lock = qdisc_lock(q); |
| 80 | spin_lock(lock); |
| 81 | } |
| 82 | |
| 83 | skb = skb_peek(&q->skb_bad_txq); |
| 84 | if (skb) { |
| 85 | /* check the reason of requeuing without tx lock first */ |
| 86 | txq = skb_get_tx_queue(txq->dev, skb); |
| 87 | if (!netif_xmit_frozen_or_stopped(txq)) { |
| 88 | skb = __skb_dequeue(&q->skb_bad_txq); |
| 89 | if (qdisc_is_percpu_stats(q)) { |
| 90 | qdisc_qstats_cpu_backlog_dec(q, skb); |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 91 | qdisc_qstats_cpu_qlen_dec(q); |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 92 | } else { |
| 93 | qdisc_qstats_backlog_dec(q, skb); |
| 94 | q->q.qlen--; |
| 95 | } |
| 96 | } else { |
Eric Dumazet | b88dd52 | 2019-09-05 05:20:22 -0700 | [diff] [blame] | 97 | skb = SKB_XOFF_MAGIC; |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 98 | qdisc_maybe_clear_missed(q, txq); |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 99 | } |
| 100 | } |
| 101 | |
| 102 | if (lock) |
| 103 | spin_unlock(lock); |
| 104 | |
| 105 | return skb; |
| 106 | } |
| 107 | |
| 108 | static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) |
| 109 | { |
| 110 | struct sk_buff *skb = skb_peek(&q->skb_bad_txq); |
| 111 | |
| 112 | if (unlikely(skb)) |
| 113 | skb = __skb_dequeue_bad_txq(q); |
| 114 | |
| 115 | return skb; |
| 116 | } |
| 117 | |
| 118 | static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, |
| 119 | struct sk_buff *skb) |
| 120 | { |
| 121 | spinlock_t *lock = NULL; |
| 122 | |
| 123 | if (q->flags & TCQ_F_NOLOCK) { |
| 124 | lock = qdisc_lock(q); |
| 125 | spin_lock(lock); |
| 126 | } |
| 127 | |
| 128 | __skb_queue_tail(&q->skb_bad_txq, skb); |
| 129 | |
Eric Dumazet | cce6294c | 2018-03-14 18:53:00 -0700 | [diff] [blame] | 130 | if (qdisc_is_percpu_stats(q)) { |
| 131 | qdisc_qstats_cpu_backlog_inc(q, skb); |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 132 | qdisc_qstats_cpu_qlen_inc(q); |
Eric Dumazet | cce6294c | 2018-03-14 18:53:00 -0700 | [diff] [blame] | 133 | } else { |
| 134 | qdisc_qstats_backlog_inc(q, skb); |
| 135 | q->q.qlen++; |
| 136 | } |
| 137 | |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 138 | if (lock) |
| 139 | spin_unlock(lock); |
| 140 | } |
| 141 | |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 142 | static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 143 | { |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 144 | spinlock_t *lock = NULL; |
Wei Yongjun | 9540d97 | 2017-12-27 17:05:52 +0800 | [diff] [blame] | 145 | |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 146 | if (q->flags & TCQ_F_NOLOCK) { |
| 147 | lock = qdisc_lock(q); |
| 148 | spin_lock(lock); |
Wei Yongjun | 9540d97 | 2017-12-27 17:05:52 +0800 | [diff] [blame] | 149 | } |
Jarek Poplawski | 6252352 | 2008-10-06 10:41:50 -0700 | [diff] [blame] | 150 | |
Wei Yongjun | 9540d97 | 2017-12-27 17:05:52 +0800 | [diff] [blame] | 151 | while (skb) { |
| 152 | struct sk_buff *next = skb->next; |
| 153 | |
| 154 | __skb_queue_tail(&q->gso_skb, skb); |
| 155 | |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 156 | /* it's still part of the queue */ |
| 157 | if (qdisc_is_percpu_stats(q)) { |
| 158 | qdisc_qstats_cpu_requeues_inc(q); |
| 159 | qdisc_qstats_cpu_backlog_inc(q, skb); |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 160 | qdisc_qstats_cpu_qlen_inc(q); |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 161 | } else { |
| 162 | q->qstats.requeues++; |
| 163 | qdisc_qstats_backlog_inc(q, skb); |
| 164 | q->q.qlen++; |
| 165 | } |
Wei Yongjun | 9540d97 | 2017-12-27 17:05:52 +0800 | [diff] [blame] | 166 | |
| 167 | skb = next; |
| 168 | } |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 169 | |
| 170 | if (lock) { |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 171 | spin_unlock(lock); |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 172 | set_bit(__QDISC_STATE_MISSED, &q->state); |
| 173 | } else { |
| 174 | __netif_schedule(q); |
| 175 | } |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 176 | } |
| 177 | |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 178 | static void try_bulk_dequeue_skb(struct Qdisc *q, |
| 179 | struct sk_buff *skb, |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 180 | const struct netdev_queue *txq, |
| 181 | int *packets) |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 182 | { |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 183 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 184 | |
| 185 | while (bytelimit > 0) { |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 186 | struct sk_buff *nskb = q->dequeue(q); |
| 187 | |
| 188 | if (!nskb) |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 189 | break; |
| 190 | |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 191 | bytelimit -= nskb->len; /* covers GSO len */ |
| 192 | skb->next = nskb; |
| 193 | skb = nskb; |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 194 | (*packets)++; /* GSO counts as one pkt */ |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 195 | } |
David S. Miller | a8305bf | 2018-07-29 20:42:53 -0700 | [diff] [blame] | 196 | skb_mark_not_on_list(skb); |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 197 | } |
| 198 | |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 199 | /* This variant of try_bulk_dequeue_skb() makes sure |
| 200 | * all skbs in the chain are for the same txq |
| 201 | */ |
| 202 | static void try_bulk_dequeue_skb_slow(struct Qdisc *q, |
| 203 | struct sk_buff *skb, |
| 204 | int *packets) |
| 205 | { |
| 206 | int mapping = skb_get_queue_mapping(skb); |
| 207 | struct sk_buff *nskb; |
| 208 | int cnt = 0; |
| 209 | |
| 210 | do { |
| 211 | nskb = q->dequeue(q); |
| 212 | if (!nskb) |
| 213 | break; |
| 214 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 215 | qdisc_enqueue_skb_bad_txq(q, nskb); |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 216 | break; |
| 217 | } |
| 218 | skb->next = nskb; |
| 219 | skb = nskb; |
| 220 | } while (++cnt < 8); |
| 221 | (*packets) += cnt; |
David S. Miller | a8305bf | 2018-07-29 20:42:53 -0700 | [diff] [blame] | 222 | skb_mark_not_on_list(skb); |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 225 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). |
| 226 | * A requeued skb (via q->gso_skb) can also be a SKB list. |
| 227 | */ |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 228 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, |
| 229 | int *packets) |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 230 | { |
Eric Dumazet | 1abbe13 | 2012-12-11 15:54:33 +0000 | [diff] [blame] | 231 | const struct netdev_queue *txq = q->dev_queue; |
John Fastabend | fd8e8d1 | 2017-12-07 09:56:42 -0800 | [diff] [blame] | 232 | struct sk_buff *skb = NULL; |
Jarek Poplawski | 554794d | 2008-10-06 09:54:39 -0700 | [diff] [blame] | 233 | |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 234 | *packets = 1; |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 235 | if (unlikely(!skb_queue_empty(&q->gso_skb))) { |
| 236 | spinlock_t *lock = NULL; |
| 237 | |
| 238 | if (q->flags & TCQ_F_NOLOCK) { |
| 239 | lock = qdisc_lock(q); |
| 240 | spin_lock(lock); |
| 241 | } |
| 242 | |
| 243 | skb = skb_peek(&q->gso_skb); |
| 244 | |
| 245 | /* skb may be null if another cpu pulls gso_skb off in between |
| 246 | * empty check and lock. |
| 247 | */ |
| 248 | if (!skb) { |
| 249 | if (lock) |
| 250 | spin_unlock(lock); |
| 251 | goto validate; |
| 252 | } |
| 253 | |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 254 | /* skb in gso_skb were already validated */ |
| 255 | *validate = false; |
Steffen Klassert | f53c723 | 2017-12-20 10:41:36 +0100 | [diff] [blame] | 256 | if (xfrm_offload(skb)) |
| 257 | *validate = true; |
Jarek Poplawski | ebf0598 | 2008-09-22 22:16:23 -0700 | [diff] [blame] | 258 | /* check the reason of requeuing without tx lock first */ |
Daniel Borkmann | 10c51b5623 | 2014-08-27 11:11:27 +0200 | [diff] [blame] | 259 | txq = skb_get_tx_queue(txq->dev, skb); |
Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 260 | if (!netif_xmit_frozen_or_stopped(txq)) { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 261 | skb = __skb_dequeue(&q->gso_skb); |
| 262 | if (qdisc_is_percpu_stats(q)) { |
| 263 | qdisc_qstats_cpu_backlog_dec(q, skb); |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 264 | qdisc_qstats_cpu_qlen_dec(q); |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 265 | } else { |
| 266 | qdisc_qstats_backlog_dec(q, skb); |
| 267 | q->q.qlen--; |
| 268 | } |
| 269 | } else { |
Jarek Poplawski | ebf0598 | 2008-09-22 22:16:23 -0700 | [diff] [blame] | 270 | skb = NULL; |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 271 | qdisc_maybe_clear_missed(q, txq); |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 272 | } |
| 273 | if (lock) |
| 274 | spin_unlock(lock); |
Jesper Dangaard Brouer | e543002 | 2017-08-15 21:11:03 +0200 | [diff] [blame] | 275 | goto trace; |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 276 | } |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 277 | validate: |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 278 | *validate = true; |
John Fastabend | fd8e8d1 | 2017-12-07 09:56:42 -0800 | [diff] [blame] | 279 | |
| 280 | if ((q->flags & TCQ_F_ONETXQUEUE) && |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 281 | netif_xmit_frozen_or_stopped(txq)) { |
| 282 | qdisc_maybe_clear_missed(q, txq); |
John Fastabend | fd8e8d1 | 2017-12-07 09:56:42 -0800 | [diff] [blame] | 283 | return skb; |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 284 | } |
John Fastabend | fd8e8d1 | 2017-12-07 09:56:42 -0800 | [diff] [blame] | 285 | |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 286 | skb = qdisc_dequeue_skb_bad_txq(q); |
Eric Dumazet | b88dd52 | 2019-09-05 05:20:22 -0700 | [diff] [blame] | 287 | if (unlikely(skb)) { |
| 288 | if (skb == SKB_XOFF_MAGIC) |
| 289 | return NULL; |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 290 | goto bulk; |
Eric Dumazet | b88dd52 | 2019-09-05 05:20:22 -0700 | [diff] [blame] | 291 | } |
John Fastabend | fd8e8d1 | 2017-12-07 09:56:42 -0800 | [diff] [blame] | 292 | skb = q->dequeue(q); |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 293 | if (skb) { |
| 294 | bulk: |
| 295 | if (qdisc_may_bulk(q)) |
| 296 | try_bulk_dequeue_skb(q, skb, txq, packets); |
| 297 | else |
| 298 | try_bulk_dequeue_skb_slow(q, skb, packets); |
Jarek Poplawski | ebf0598 | 2008-09-22 22:16:23 -0700 | [diff] [blame] | 299 | } |
Jesper Dangaard Brouer | e543002 | 2017-08-15 21:11:03 +0200 | [diff] [blame] | 300 | trace: |
| 301 | trace_qdisc_dequeue(q, txq, *packets, skb); |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 302 | return skb; |
| 303 | } |
| 304 | |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 305 | /* |
Jesper Dangaard Brouer | 10770bc | 2014-09-02 16:35:33 +0200 | [diff] [blame] | 306 | * Transmit possibly several skbs, and handle the return status as |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 307 | * required. Owning qdisc running bit guarantees that only one CPU |
| 308 | * can execute this function. |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 309 | * |
| 310 | * Returns to the caller: |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 311 | * false - hardware queue frozen backoff |
| 312 | * true - feel free to send more pkts |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 313 | */ |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 314 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
| 315 | struct net_device *dev, struct netdev_queue *txq, |
| 316 | spinlock_t *root_lock, bool validate) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | { |
Peter P Waskiewicz Jr | 5f1a485 | 2007-11-13 20:40:55 -0800 | [diff] [blame] | 318 | int ret = NETDEV_TX_BUSY; |
Steffen Klassert | f53c723 | 2017-12-20 10:41:36 +0100 | [diff] [blame] | 319 | bool again = false; |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 320 | |
| 321 | /* And release qdisc */ |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 322 | if (root_lock) |
| 323 | spin_unlock(root_lock); |
Herbert Xu | d90df3a | 2007-05-10 04:55:14 -0700 | [diff] [blame] | 324 | |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 325 | /* Note that we validate skb (GSO, checksum, ...) outside of locks */ |
| 326 | if (validate) |
Steffen Klassert | f53c723 | 2017-12-20 10:41:36 +0100 | [diff] [blame] | 327 | skb = validate_xmit_skb_list(skb, dev, &again); |
| 328 | |
| 329 | #ifdef CONFIG_XFRM_OFFLOAD |
| 330 | if (unlikely(again)) { |
| 331 | if (root_lock) |
| 332 | spin_lock(root_lock); |
| 333 | |
| 334 | dev_requeue_skb(skb, q); |
| 335 | return false; |
| 336 | } |
| 337 | #endif |
Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 338 | |
Lars Persson | 3dcd493fb | 2016-04-12 08:45:52 +0200 | [diff] [blame] | 339 | if (likely(skb)) { |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 340 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
| 341 | if (!netif_xmit_frozen_or_stopped(txq)) |
| 342 | skb = dev_hard_start_xmit(skb, dev, txq, &ret); |
Yunsheng Lin | dcad9ee | 2021-05-14 11:17:01 +0800 | [diff] [blame] | 343 | else |
| 344 | qdisc_maybe_clear_missed(q, txq); |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 345 | |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 346 | HARD_TX_UNLOCK(dev, txq); |
Lars Persson | 3dcd493fb | 2016-04-12 08:45:52 +0200 | [diff] [blame] | 347 | } else { |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 348 | if (root_lock) |
| 349 | spin_lock(root_lock); |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 350 | return true; |
Eric Dumazet | 55a93b3 | 2014-10-03 15:31:07 -0700 | [diff] [blame] | 351 | } |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 352 | |
| 353 | if (root_lock) |
| 354 | spin_lock(root_lock); |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 355 | |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 356 | if (!dev_xmit_complete(ret)) { |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 357 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 358 | if (unlikely(ret != NETDEV_TX_BUSY)) |
| 359 | net_warn_ratelimited("BUG %s code %d qlen %d\n", |
| 360 | dev->name, ret, q->q.qlen); |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 361 | |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 362 | dev_requeue_skb(skb, q); |
| 363 | return false; |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 364 | } |
| 365 | |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 366 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | } |
| 368 | |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 369 | /* |
| 370 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. |
| 371 | * |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 372 | * running seqcount guarantees only one CPU can process |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 373 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for |
| 374 | * this queue. |
| 375 | * |
| 376 | * netif_tx_lock serializes accesses to device driver. |
| 377 | * |
| 378 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, |
| 379 | * if one is grabbed, another must be free. |
| 380 | * |
| 381 | * Note, that this procedure can be called by a watchdog timer |
| 382 | * |
| 383 | * Returns to the caller: |
| 384 | * 0 - queue is empty or throttled. |
| 385 | * >0 - queue is not empty. |
| 386 | * |
| 387 | */ |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 388 | static inline bool qdisc_restart(struct Qdisc *q, int *packets) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 389 | { |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 390 | spinlock_t *root_lock = NULL; |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 391 | struct netdev_queue *txq; |
| 392 | struct net_device *dev; |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 393 | struct sk_buff *skb; |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 394 | bool validate; |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 395 | |
| 396 | /* Dequeue packet */ |
John Fastabend | eb82a99 | 2018-03-24 22:25:06 -0700 | [diff] [blame] | 397 | skb = dequeue_skb(q, &validate, packets); |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 398 | if (unlikely(!skb)) |
John Fastabend | eb82a99 | 2018-03-24 22:25:06 -0700 | [diff] [blame] | 399 | return false; |
John Fastabend | eb82a99 | 2018-03-24 22:25:06 -0700 | [diff] [blame] | 400 | |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 401 | if (!(q->flags & TCQ_F_NOLOCK)) |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 402 | root_lock = qdisc_lock(q); |
| 403 | |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 404 | dev = qdisc_dev(q); |
Daniel Borkmann | 10c51b5623 | 2014-08-27 11:11:27 +0200 | [diff] [blame] | 405 | txq = skb_get_tx_queue(dev, skb); |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 406 | |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 407 | return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 408 | } |
| 409 | |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 410 | void __qdisc_run(struct Qdisc *q) |
Herbert Xu | 48d8332 | 2006-06-19 23:57:59 -0700 | [diff] [blame] | 411 | { |
Matthias Tafelmeier | 3d48b53 | 2016-12-29 21:37:21 +0100 | [diff] [blame] | 412 | int quota = dev_tx_weight; |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 413 | int packets; |
Herbert Xu | 2ba2506 | 2008-03-28 16:25:26 -0700 | [diff] [blame] | 414 | |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 415 | while (qdisc_restart(q, &packets)) { |
Jesper Dangaard Brouer | b8358d7 | 2014-10-09 12:18:10 +0200 | [diff] [blame] | 416 | quota -= packets; |
Eric Dumazet | b60fa1c | 2019-10-01 14:02:36 -0700 | [diff] [blame] | 417 | if (quota <= 0) { |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 418 | if (q->flags & TCQ_F_NOLOCK) |
| 419 | set_bit(__QDISC_STATE_MISSED, &q->state); |
| 420 | else |
| 421 | __netif_schedule(q); |
| 422 | |
Herbert Xu | 2ba2506 | 2008-03-28 16:25:26 -0700 | [diff] [blame] | 423 | break; |
| 424 | } |
| 425 | } |
Herbert Xu | 48d8332 | 2006-06-19 23:57:59 -0700 | [diff] [blame] | 426 | } |
| 427 | |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 428 | unsigned long dev_trans_start(struct net_device *dev) |
| 429 | { |
nikolay@redhat.com | 07ce76a | 2013-08-03 22:07:47 +0200 | [diff] [blame] | 430 | unsigned long val, res; |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 431 | unsigned int i; |
| 432 | |
nikolay@redhat.com | 07ce76a | 2013-08-03 22:07:47 +0200 | [diff] [blame] | 433 | if (is_vlan_dev(dev)) |
| 434 | dev = vlan_dev_real_dev(dev); |
Chris Dion | 32d3e51 | 2017-12-06 10:50:28 -0500 | [diff] [blame] | 435 | else if (netif_is_macvlan(dev)) |
| 436 | dev = macvlan_dev_real_dev(dev); |
Eric Dumazet | 5337824 | 2021-11-16 19:29:22 -0800 | [diff] [blame] | 437 | res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); |
Florian Westphal | 9b36627 | 2016-05-03 16:33:14 +0200 | [diff] [blame] | 438 | for (i = 1; i < dev->num_tx_queues; i++) { |
Eric Dumazet | 5337824 | 2021-11-16 19:29:22 -0800 | [diff] [blame] | 439 | val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 440 | if (val && time_after(val, res)) |
| 441 | res = val; |
| 442 | } |
nikolay@redhat.com | 07ce76a | 2013-08-03 22:07:47 +0200 | [diff] [blame] | 443 | |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 444 | return res; |
| 445 | } |
| 446 | EXPORT_SYMBOL(dev_trans_start); |
| 447 | |
Eric Dumazet | dab8fe3 | 2021-11-16 19:29:23 -0800 | [diff] [blame] | 448 | static void netif_freeze_queues(struct net_device *dev) |
| 449 | { |
| 450 | unsigned int i; |
| 451 | int cpu; |
| 452 | |
| 453 | cpu = smp_processor_id(); |
| 454 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 455 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 456 | |
| 457 | /* We are the only thread of execution doing a |
| 458 | * freeze, but we have to grab the _xmit_lock in |
| 459 | * order to synchronize with threads which are in |
| 460 | * the ->hard_start_xmit() handler and already |
| 461 | * checked the frozen bit. |
| 462 | */ |
| 463 | __netif_tx_lock(txq, cpu); |
| 464 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
| 465 | __netif_tx_unlock(txq); |
| 466 | } |
| 467 | } |
| 468 | |
| 469 | void netif_tx_lock(struct net_device *dev) |
| 470 | { |
| 471 | spin_lock(&dev->tx_global_lock); |
| 472 | netif_freeze_queues(dev); |
| 473 | } |
| 474 | EXPORT_SYMBOL(netif_tx_lock); |
| 475 | |
| 476 | static void netif_unfreeze_queues(struct net_device *dev) |
| 477 | { |
| 478 | unsigned int i; |
| 479 | |
| 480 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 481 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 482 | |
| 483 | /* No need to grab the _xmit_lock here. If the |
| 484 | * queue is not stopped for another reason, we |
| 485 | * force a schedule. |
| 486 | */ |
| 487 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); |
| 488 | netif_schedule_queue(txq); |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | void netif_tx_unlock(struct net_device *dev) |
| 493 | { |
| 494 | netif_unfreeze_queues(dev); |
| 495 | spin_unlock(&dev->tx_global_lock); |
| 496 | } |
| 497 | EXPORT_SYMBOL(netif_tx_unlock); |
| 498 | |
Kees Cook | cdeabbb | 2017-10-16 17:29:17 -0700 | [diff] [blame] | 499 | static void dev_watchdog(struct timer_list *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | { |
Kees Cook | cdeabbb | 2017-10-16 17:29:17 -0700 | [diff] [blame] | 501 | struct net_device *dev = from_timer(dev, t, watchdog_timer); |
Eric Dumazet | f12bf6f | 2021-12-06 17:30:30 -0800 | [diff] [blame] | 502 | bool release = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | |
Eric Dumazet | bec251b | 2021-11-16 19:29:24 -0800 | [diff] [blame] | 504 | spin_lock(&dev->tx_global_lock); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 505 | if (!qdisc_tx_is_noop(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | if (netif_device_present(dev) && |
| 507 | netif_running(dev) && |
| 508 | netif_carrier_ok(dev)) { |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 509 | int some_queue_timedout = 0; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 510 | unsigned int i; |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 511 | unsigned long trans_start; |
Stephen Hemminger | 338f756 | 2006-05-16 15:02:12 -0700 | [diff] [blame] | 512 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 513 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 514 | struct netdev_queue *txq; |
| 515 | |
| 516 | txq = netdev_get_tx_queue(dev, i); |
Eric Dumazet | 5337824 | 2021-11-16 19:29:22 -0800 | [diff] [blame] | 517 | trans_start = READ_ONCE(txq->trans_start); |
Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 518 | if (netif_xmit_stopped(txq) && |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 519 | time_after(jiffies, (trans_start + |
| 520 | dev->watchdog_timeo))) { |
| 521 | some_queue_timedout = 1; |
Eric Dumazet | 8160fb4 | 2021-11-16 19:29:21 -0800 | [diff] [blame] | 522 | atomic_long_inc(&txq->trans_timeout); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 523 | break; |
| 524 | } |
| 525 | } |
| 526 | |
Eric Dumazet | bec251b | 2021-11-16 19:29:24 -0800 | [diff] [blame] | 527 | if (unlikely(some_queue_timedout)) { |
Cong Wang | 141b6b2 | 2019-05-01 19:56:59 -0700 | [diff] [blame] | 528 | trace_net_dev_xmit_timeout(dev, i); |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 529 | WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", |
David S. Miller | 3019de1 | 2011-06-06 16:41:33 -0700 | [diff] [blame] | 530 | dev->name, netdev_drivername(dev), i); |
Eric Dumazet | bec251b | 2021-11-16 19:29:24 -0800 | [diff] [blame] | 531 | netif_freeze_queues(dev); |
Michael S. Tsirkin | 0290bd2 | 2019-12-10 09:23:51 -0500 | [diff] [blame] | 532 | dev->netdev_ops->ndo_tx_timeout(dev, i); |
Eric Dumazet | bec251b | 2021-11-16 19:29:24 -0800 | [diff] [blame] | 533 | netif_unfreeze_queues(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | } |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 535 | if (!mod_timer(&dev->watchdog_timer, |
| 536 | round_jiffies(jiffies + |
| 537 | dev->watchdog_timeo))) |
Eric Dumazet | f12bf6f | 2021-12-06 17:30:30 -0800 | [diff] [blame] | 538 | release = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | } |
| 540 | } |
Eric Dumazet | bec251b | 2021-11-16 19:29:24 -0800 | [diff] [blame] | 541 | spin_unlock(&dev->tx_global_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | |
Eric Dumazet | f12bf6f | 2021-12-06 17:30:30 -0800 | [diff] [blame] | 543 | if (release) |
| 544 | dev_put_track(dev, &dev->watchdog_dev_tracker); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } |
| 546 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | void __netdev_watchdog_up(struct net_device *dev) |
| 548 | { |
Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 549 | if (dev->netdev_ops->ndo_tx_timeout) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | if (dev->watchdog_timeo <= 0) |
| 551 | dev->watchdog_timeo = 5*HZ; |
Venkatesh Pallipadi | 60468d5 | 2007-05-31 21:28:44 -0700 | [diff] [blame] | 552 | if (!mod_timer(&dev->watchdog_timer, |
| 553 | round_jiffies(jiffies + dev->watchdog_timeo))) |
Eric Dumazet | f12bf6f | 2021-12-06 17:30:30 -0800 | [diff] [blame] | 554 | dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | } |
| 556 | } |
Valentin Longchamp | 1a3db27 | 2020-06-09 22:11:54 +0200 | [diff] [blame] | 557 | EXPORT_SYMBOL_GPL(__netdev_watchdog_up); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | |
| 559 | static void dev_watchdog_up(struct net_device *dev) |
| 560 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | __netdev_watchdog_up(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | } |
| 563 | |
| 564 | static void dev_watchdog_down(struct net_device *dev) |
| 565 | { |
Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 566 | netif_tx_lock_bh(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | if (del_timer(&dev->watchdog_timer)) |
Eric Dumazet | f12bf6f | 2021-12-06 17:30:30 -0800 | [diff] [blame] | 568 | dev_put_track(dev, &dev->watchdog_dev_tracker); |
Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 569 | netif_tx_unlock_bh(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } |
| 571 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 572 | /** |
| 573 | * netif_carrier_on - set carrier |
| 574 | * @dev: network device |
| 575 | * |
Jouke Witteveen | 989723b | 2019-02-07 17:14:32 +0100 | [diff] [blame] | 576 | * Device has detected acquisition of carrier. |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 577 | */ |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 578 | void netif_carrier_on(struct net_device *dev) |
| 579 | { |
Jeff Garzik | bfaae0f | 2007-10-17 23:26:43 -0700 | [diff] [blame] | 580 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { |
David S. Miller | b473001 | 2008-11-19 15:33:54 -0800 | [diff] [blame] | 581 | if (dev->reg_state == NETREG_UNINITIALIZED) |
| 582 | return; |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 583 | atomic_inc(&dev->carrier_up_count); |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 584 | linkwatch_fire_event(dev); |
Jeff Garzik | bfaae0f | 2007-10-17 23:26:43 -0700 | [diff] [blame] | 585 | if (netif_running(dev)) |
| 586 | __netdev_watchdog_up(dev); |
| 587 | } |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 588 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 589 | EXPORT_SYMBOL(netif_carrier_on); |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 590 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 591 | /** |
| 592 | * netif_carrier_off - clear carrier |
| 593 | * @dev: network device |
| 594 | * |
| 595 | * Device has detected loss of carrier. |
| 596 | */ |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 597 | void netif_carrier_off(struct net_device *dev) |
| 598 | { |
David S. Miller | b473001 | 2008-11-19 15:33:54 -0800 | [diff] [blame] | 599 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { |
| 600 | if (dev->reg_state == NETREG_UNINITIALIZED) |
| 601 | return; |
David Decotigny | b2d3bcf | 2018-01-18 09:59:13 -0800 | [diff] [blame] | 602 | atomic_inc(&dev->carrier_down_count); |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 603 | linkwatch_fire_event(dev); |
David S. Miller | b473001 | 2008-11-19 15:33:54 -0800 | [diff] [blame] | 604 | } |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 605 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 606 | EXPORT_SYMBOL(netif_carrier_off); |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 607 | |
Jakub Kicinski | 490dcec | 2021-05-19 10:18:25 -0700 | [diff] [blame] | 608 | /** |
| 609 | * netif_carrier_event - report carrier state event |
| 610 | * @dev: network device |
| 611 | * |
| 612 | * Device has detected a carrier event but the carrier state wasn't changed. |
| 613 | * Use in drivers when querying carrier state asynchronously, to avoid missing |
| 614 | * events (link flaps) if link recovers before it's queried. |
| 615 | */ |
| 616 | void netif_carrier_event(struct net_device *dev) |
| 617 | { |
| 618 | if (dev->reg_state == NETREG_UNINITIALIZED) |
| 619 | return; |
| 620 | atomic_inc(&dev->carrier_up_count); |
| 621 | atomic_inc(&dev->carrier_down_count); |
| 622 | linkwatch_fire_event(dev); |
| 623 | } |
| 624 | EXPORT_SYMBOL_GPL(netif_carrier_event); |
| 625 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces |
| 627 | under all circumstances. It is difficult to invent anything faster or |
| 628 | cheaper. |
| 629 | */ |
| 630 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 631 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, |
| 632 | struct sk_buff **to_free) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 634 | __qdisc_drop(skb, to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | return NET_XMIT_CN; |
| 636 | } |
| 637 | |
Yang Yingliang | 82d567c | 2013-12-10 20:55:31 +0800 | [diff] [blame] | 638 | static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | { |
| 640 | return NULL; |
| 641 | } |
| 642 | |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 643 | struct Qdisc_ops noop_qdisc_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | .id = "noop", |
| 645 | .priv_size = 0, |
| 646 | .enqueue = noop_enqueue, |
| 647 | .dequeue = noop_dequeue, |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 648 | .peek = noop_dequeue, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | .owner = THIS_MODULE, |
| 650 | }; |
| 651 | |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 652 | static struct netdev_queue noop_netdev_queue = { |
Li RongQing | 3b40bf4 | 2019-02-25 10:43:06 +0800 | [diff] [blame] | 653 | RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), |
Jarek Poplawski | 9f3ffae | 2008-10-19 23:37:47 -0700 | [diff] [blame] | 654 | .qdisc_sleeping = &noop_qdisc, |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 655 | }; |
| 656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | struct Qdisc noop_qdisc = { |
| 658 | .enqueue = noop_enqueue, |
| 659 | .dequeue = noop_dequeue, |
| 660 | .flags = TCQ_F_BUILTIN, |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 661 | .ops = &noop_qdisc_ops, |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 662 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 663 | .dev_queue = &noop_netdev_queue, |
Eric Dumazet | 7b5edbc | 2010-10-15 19:22:34 +0000 | [diff] [blame] | 664 | .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), |
Eric Dumazet | f98ebd4 | 2018-10-09 15:20:50 -0700 | [diff] [blame] | 665 | .gso_skb = { |
| 666 | .next = (struct sk_buff *)&noop_qdisc.gso_skb, |
| 667 | .prev = (struct sk_buff *)&noop_qdisc.gso_skb, |
| 668 | .qlen = 0, |
| 669 | .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), |
| 670 | }, |
| 671 | .skb_bad_txq = { |
| 672 | .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, |
| 673 | .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, |
| 674 | .qlen = 0, |
| 675 | .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), |
| 676 | }, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | }; |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 678 | EXPORT_SYMBOL(noop_qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | |
Alexander Aring | e63d7df | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 680 | static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, |
| 681 | struct netlink_ext_ack *extack) |
Phil Sutter | d66d6c3 | 2015-08-27 21:21:38 +0200 | [diff] [blame] | 682 | { |
| 683 | /* register_qdisc() assigns a default of noop_enqueue if unset, |
| 684 | * but __dev_queue_xmit() treats noqueue only as such |
| 685 | * if this is NULL - so clear it here. */ |
| 686 | qdisc->enqueue = NULL; |
| 687 | return 0; |
| 688 | } |
| 689 | |
| 690 | struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | .id = "noqueue", |
| 692 | .priv_size = 0, |
Phil Sutter | d66d6c3 | 2015-08-27 21:21:38 +0200 | [diff] [blame] | 693 | .init = noqueue_init, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | .enqueue = noop_enqueue, |
| 695 | .dequeue = noop_dequeue, |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 696 | .peek = noop_dequeue, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | .owner = THIS_MODULE, |
| 698 | }; |
| 699 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 700 | static const u8 prio2band[TC_PRIO_MAX + 1] = { |
| 701 | 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 |
| 702 | }; |
Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 703 | |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 704 | /* 3-band FIFO queue: old style, but should be a bit faster than |
| 705 | generic prio+fifo combination. |
| 706 | */ |
| 707 | |
| 708 | #define PFIFO_FAST_BANDS 3 |
| 709 | |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 710 | /* |
| 711 | * Private data for a pfifo_fast scheduler containing: |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 712 | * - rings for priority bands |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 713 | */ |
| 714 | struct pfifo_fast_priv { |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 715 | struct skb_array q[PFIFO_FAST_BANDS]; |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 716 | }; |
| 717 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 718 | static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, |
| 719 | int band) |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 720 | { |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 721 | return &priv->q[band]; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 722 | } |
| 723 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 724 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, |
| 725 | struct sk_buff **to_free) |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 726 | { |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 727 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
| 728 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
| 729 | struct skb_array *q = band2list(priv, band); |
Eric Dumazet | cce6294c | 2018-03-14 18:53:00 -0700 | [diff] [blame] | 730 | unsigned int pkt_len = qdisc_pkt_len(skb); |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 731 | int err; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 732 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 733 | err = skb_array_produce(q, skb); |
Thomas Graf | 821d24ae | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 734 | |
Davide Caratti | 092e22e | 2019-08-27 23:18:53 +0200 | [diff] [blame] | 735 | if (unlikely(err)) { |
| 736 | if (qdisc_is_percpu_stats(qdisc)) |
| 737 | return qdisc_drop_cpu(skb, qdisc, to_free); |
| 738 | else |
| 739 | return qdisc_drop(skb, qdisc, to_free); |
| 740 | } |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 741 | |
Paolo Abeni | 8a53e61 | 2019-04-10 14:32:40 +0200 | [diff] [blame] | 742 | qdisc_update_stats_at_enqueue(qdisc, pkt_len); |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 743 | return NET_XMIT_SUCCESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | } |
| 745 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 746 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | { |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 748 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 749 | struct sk_buff *skb = NULL; |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 750 | bool need_retry = true; |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 751 | int band; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 753 | retry: |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 754 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { |
| 755 | struct skb_array *q = band2list(priv, band); |
Florian Westphal | ec32336 | 2016-09-18 00:57:32 +0200 | [diff] [blame] | 756 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 757 | if (__skb_array_empty(q)) |
| 758 | continue; |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 759 | |
Paolo Abeni | 021a17e | 2018-05-15 16:24:37 +0200 | [diff] [blame] | 760 | skb = __skb_array_consume(q); |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 761 | } |
| 762 | if (likely(skb)) { |
Paolo Abeni | 8a53e61 | 2019-04-10 14:32:40 +0200 | [diff] [blame] | 763 | qdisc_update_stats_at_dequeue(qdisc, skb); |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 764 | } else if (need_retry && |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 765 | READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) { |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 766 | /* Delay clearing the STATE_MISSED here to reduce |
| 767 | * the overhead of the second spin_trylock() in |
| 768 | * qdisc_run_begin() and __netif_schedule() calling |
| 769 | * in qdisc_run_end(). |
| 770 | */ |
| 771 | clear_bit(__QDISC_STATE_MISSED, &qdisc->state); |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 772 | clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 773 | |
| 774 | /* Make sure dequeuing happens after clearing |
| 775 | * STATE_MISSED. |
| 776 | */ |
| 777 | smp_mb__after_atomic(); |
| 778 | |
| 779 | need_retry = false; |
| 780 | |
| 781 | goto retry; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 782 | } |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 783 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 784 | return skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | } |
| 786 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 787 | static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 788 | { |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 789 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 790 | struct sk_buff *skb = NULL; |
| 791 | int band; |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 792 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 793 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { |
| 794 | struct skb_array *q = band2list(priv, band); |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 795 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 796 | skb = __skb_array_peek(q); |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 797 | } |
| 798 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 799 | return skb; |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 800 | } |
| 801 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 802 | static void pfifo_fast_reset(struct Qdisc *qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | { |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 804 | int i, band; |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 805 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 806 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 807 | for (band = 0; band < PFIFO_FAST_BANDS; band++) { |
| 808 | struct skb_array *q = band2list(priv, band); |
| 809 | struct sk_buff *skb; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 810 | |
Cong Wang | 1df94c3 | 2017-12-18 14:34:26 -0800 | [diff] [blame] | 811 | /* NULL ring is possible if destroy path is due to a failed |
| 812 | * skb_array_init() in pfifo_fast_init() case. |
| 813 | */ |
| 814 | if (!q->ring.queue) |
| 815 | continue; |
| 816 | |
Paolo Abeni | 021a17e | 2018-05-15 16:24:37 +0200 | [diff] [blame] | 817 | while ((skb = __skb_array_consume(q)) != NULL) |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 818 | kfree_skb(skb); |
| 819 | } |
| 820 | |
Davide Caratti | 04d37cf | 2019-08-27 12:29:09 +0200 | [diff] [blame] | 821 | if (qdisc_is_percpu_stats(qdisc)) { |
| 822 | for_each_possible_cpu(i) { |
| 823 | struct gnet_stats_queue *q; |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 824 | |
Davide Caratti | 04d37cf | 2019-08-27 12:29:09 +0200 | [diff] [blame] | 825 | q = per_cpu_ptr(qdisc->cpu_qstats, i); |
| 826 | q->backlog = 0; |
| 827 | q->qlen = 0; |
| 828 | } |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 829 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | } |
| 831 | |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 832 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) |
| 833 | { |
| 834 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
| 835 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 836 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 837 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) |
| 838 | goto nla_put_failure; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 839 | return skb->len; |
| 840 | |
| 841 | nla_put_failure: |
| 842 | return -1; |
| 843 | } |
| 844 | |
Alexander Aring | e63d7df | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 845 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, |
| 846 | struct netlink_ext_ack *extack) |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 847 | { |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 848 | unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 849 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 850 | int prio; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 851 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 852 | /* guard against zero length rings */ |
| 853 | if (!qlen) |
| 854 | return -EINVAL; |
| 855 | |
| 856 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { |
| 857 | struct skb_array *q = band2list(priv, prio); |
| 858 | int err; |
| 859 | |
| 860 | err = skb_array_init(q, qlen, GFP_KERNEL); |
| 861 | if (err) |
| 862 | return -ENOMEM; |
| 863 | } |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 864 | |
Eric Dumazet | 2362493 | 2011-01-21 16:26:09 -0800 | [diff] [blame] | 865 | /* Can by-pass the queue discipline */ |
| 866 | qdisc->flags |= TCQ_F_CAN_BYPASS; |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 867 | return 0; |
| 868 | } |
| 869 | |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 870 | static void pfifo_fast_destroy(struct Qdisc *sch) |
| 871 | { |
| 872 | struct pfifo_fast_priv *priv = qdisc_priv(sch); |
| 873 | int prio; |
| 874 | |
| 875 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { |
| 876 | struct skb_array *q = band2list(priv, prio); |
| 877 | |
| 878 | /* NULL ring is possible if destroy path is due to a failed |
| 879 | * skb_array_init() in pfifo_fast_init() case. |
| 880 | */ |
Cong Wang | 1df94c3 | 2017-12-18 14:34:26 -0800 | [diff] [blame] | 881 | if (!q->ring.queue) |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 882 | continue; |
| 883 | /* Destroy ring but no need to kfree_skb because a call to |
| 884 | * pfifo_fast_reset() has already done that work. |
| 885 | */ |
| 886 | ptr_ring_cleanup(&q->ring, NULL); |
| 887 | } |
| 888 | } |
| 889 | |
Cong Wang | 7007ba6 | 2018-01-25 18:26:24 -0800 | [diff] [blame] | 890 | static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, |
| 891 | unsigned int new_len) |
| 892 | { |
| 893 | struct pfifo_fast_priv *priv = qdisc_priv(sch); |
| 894 | struct skb_array *bands[PFIFO_FAST_BANDS]; |
| 895 | int prio; |
| 896 | |
| 897 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { |
| 898 | struct skb_array *q = band2list(priv, prio); |
| 899 | |
| 900 | bands[prio] = q; |
| 901 | } |
| 902 | |
| 903 | return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len, |
| 904 | GFP_KERNEL); |
| 905 | } |
| 906 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 907 | struct Qdisc_ops pfifo_fast_ops __read_mostly = { |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 908 | .id = "pfifo_fast", |
Krishna Kumar | fd3ae5e | 2009-08-18 21:55:59 +0000 | [diff] [blame] | 909 | .priv_size = sizeof(struct pfifo_fast_priv), |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 910 | .enqueue = pfifo_fast_enqueue, |
| 911 | .dequeue = pfifo_fast_dequeue, |
Jarek Poplawski | 99c0db2 | 2008-10-31 00:45:27 -0700 | [diff] [blame] | 912 | .peek = pfifo_fast_peek, |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 913 | .init = pfifo_fast_init, |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 914 | .destroy = pfifo_fast_destroy, |
David S. Miller | d3678b4 | 2008-07-21 09:56:13 -0700 | [diff] [blame] | 915 | .reset = pfifo_fast_reset, |
| 916 | .dump = pfifo_fast_dump, |
Cong Wang | 7007ba6 | 2018-01-25 18:26:24 -0800 | [diff] [blame] | 917 | .change_tx_queue_len = pfifo_fast_change_tx_queue_len, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | .owner = THIS_MODULE, |
John Fastabend | c5ad119 | 2017-12-07 09:58:19 -0800 | [diff] [blame] | 919 | .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | }; |
Eric Dumazet | 1f27cde | 2016-03-02 08:21:43 -0800 | [diff] [blame] | 921 | EXPORT_SYMBOL(pfifo_fast_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 923 | static struct lock_class_key qdisc_tx_busylock; |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 924 | |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 925 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
Alexander Aring | d0bd684 | 2017-12-20 12:35:20 -0500 | [diff] [blame] | 926 | const struct Qdisc_ops *ops, |
| 927 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | struct Qdisc *sch; |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 930 | unsigned int size = sizeof(*sch) + ops->priv_size; |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 931 | int err = -ENOBUFS; |
Jesus Sanchez-Palencia | 26aa045 | 2017-10-16 18:01:23 -0700 | [diff] [blame] | 932 | struct net_device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | |
Jesus Sanchez-Palencia | 26aa045 | 2017-10-16 18:01:23 -0700 | [diff] [blame] | 934 | if (!dev_queue) { |
Alexander Aring | d0bd684 | 2017-12-20 12:35:20 -0500 | [diff] [blame] | 935 | NL_SET_ERR_MSG(extack, "No device queue given"); |
Jesus Sanchez-Palencia | 26aa045 | 2017-10-16 18:01:23 -0700 | [diff] [blame] | 936 | err = -EINVAL; |
| 937 | goto errout; |
| 938 | } |
| 939 | |
| 940 | dev = dev_queue->dev; |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 941 | sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue)); |
Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 942 | |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 943 | if (!sch) |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 944 | goto errout; |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 945 | __skb_queue_head_init(&sch->gso_skb); |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 946 | __skb_queue_head_init(&sch->skb_bad_txq); |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 947 | qdisc_skb_head_init(&sch->q); |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 948 | gnet_stats_basic_sync_init(&sch->bstats); |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 949 | spin_lock_init(&sch->q.lock); |
Eric Dumazet | 23d3b8b | 2012-09-05 01:02:56 +0000 | [diff] [blame] | 950 | |
John Fastabend | d59f5ff | 2017-12-07 09:55:26 -0800 | [diff] [blame] | 951 | if (ops->static_flags & TCQ_F_CPUSTATS) { |
| 952 | sch->cpu_bstats = |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 953 | netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); |
John Fastabend | d59f5ff | 2017-12-07 09:55:26 -0800 | [diff] [blame] | 954 | if (!sch->cpu_bstats) |
| 955 | goto errout1; |
| 956 | |
| 957 | sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); |
| 958 | if (!sch->cpu_qstats) { |
| 959 | free_percpu(sch->cpu_bstats); |
| 960 | goto errout1; |
| 961 | } |
| 962 | } |
| 963 | |
Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 964 | spin_lock_init(&sch->busylock); |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 965 | lockdep_set_class(&sch->busylock, |
| 966 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); |
| 967 | |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 968 | /* seqlock has the same scope of busylock, for NOLOCK qdisc */ |
| 969 | spin_lock_init(&sch->seqlock); |
Yunsheng Lin | 06f5553 | 2021-08-03 18:58:21 +0800 | [diff] [blame] | 970 | lockdep_set_class(&sch->seqlock, |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 971 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); |
| 972 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | sch->ops = ops; |
John Fastabend | d59f5ff | 2017-12-07 09:55:26 -0800 | [diff] [blame] | 974 | sch->flags = ops->static_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | sch->enqueue = ops->enqueue; |
| 976 | sch->dequeue = ops->dequeue; |
David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 977 | sch->dev_queue = dev_queue; |
Eric Dumazet | 606509f | 2021-12-04 20:22:13 -0800 | [diff] [blame] | 978 | dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL); |
Reshetova, Elena | 7b93640 | 2017-07-04 15:53:07 +0300 | [diff] [blame] | 979 | refcount_set(&sch->refcnt, 1); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 980 | |
| 981 | return sch; |
John Fastabend | d59f5ff | 2017-12-07 09:55:26 -0800 | [diff] [blame] | 982 | errout1: |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 983 | kfree(sch); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 984 | errout: |
WANG Cong | 01e123d | 2008-06-27 19:51:35 -0700 | [diff] [blame] | 985 | return ERR_PTR(err); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 986 | } |
| 987 | |
Changli Gao | 3511c91 | 2010-10-16 13:04:08 +0000 | [diff] [blame] | 988 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
stephen hemminger | d2a7f26 | 2013-08-31 10:15:50 -0700 | [diff] [blame] | 989 | const struct Qdisc_ops *ops, |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 990 | unsigned int parentid, |
| 991 | struct netlink_ext_ack *extack) |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 992 | { |
| 993 | struct Qdisc *sch; |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 994 | |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 995 | if (!try_module_get(ops->owner)) { |
| 996 | NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); |
Eric Dumazet | 166ee5b | 2016-08-24 09:39:02 -0700 | [diff] [blame] | 997 | return NULL; |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 998 | } |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 999 | |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 1000 | sch = qdisc_alloc(dev_queue, ops, extack); |
Eric Dumazet | 166ee5b | 2016-08-24 09:39:02 -0700 | [diff] [blame] | 1001 | if (IS_ERR(sch)) { |
| 1002 | module_put(ops->owner); |
| 1003 | return NULL; |
| 1004 | } |
Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 1005 | sch->parent = parentid; |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 1006 | |
Cong Wang | f5a7833 | 2020-05-26 21:35:25 -0700 | [diff] [blame] | 1007 | if (!ops->init || ops->init(sch, NULL, extack) == 0) { |
| 1008 | trace_qdisc_create(ops, dev_queue->dev, parentid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | return sch; |
Cong Wang | f5a7833 | 2020-05-26 21:35:25 -0700 | [diff] [blame] | 1010 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1012 | qdisc_put(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | return NULL; |
| 1014 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 1015 | EXPORT_SYMBOL(qdisc_create_dflt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | |
David S. Miller | 5fb6622 | 2008-08-02 20:02:43 -0700 | [diff] [blame] | 1017 | /* Under qdisc_lock(qdisc) and BH! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | |
| 1019 | void qdisc_reset(struct Qdisc *qdisc) |
| 1020 | { |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 1021 | const struct Qdisc_ops *ops = qdisc->ops; |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1022 | struct sk_buff *skb, *tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | |
Cong Wang | a34dac0 | 2020-05-26 21:35:24 -0700 | [diff] [blame] | 1024 | trace_qdisc_reset(qdisc); |
| 1025 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 | if (ops->reset) |
| 1027 | ops->reset(qdisc); |
Jarek Poplawski | 67305eb | 2008-11-03 02:52:50 -0800 | [diff] [blame] | 1028 | |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1029 | skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { |
| 1030 | __skb_unlink(skb, &qdisc->gso_skb); |
| 1031 | kfree_skb_list(skb); |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 1032 | } |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1033 | |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 1034 | skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { |
| 1035 | __skb_unlink(skb, &qdisc->skb_bad_txq); |
| 1036 | kfree_skb_list(skb); |
| 1037 | } |
| 1038 | |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 1039 | qdisc->q.qlen = 0; |
Konstantin Khlebnikov | c8e1812 | 2017-09-20 15:45:36 +0300 | [diff] [blame] | 1040 | qdisc->qstats.backlog = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1041 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 1042 | EXPORT_SYMBOL(qdisc_reset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | |
Daniel Borkmann | 81d947e | 2018-01-15 23:12:09 +0100 | [diff] [blame] | 1044 | void qdisc_free(struct Qdisc *qdisc) |
Eric Dumazet | 5d944c6 | 2010-03-31 07:06:04 +0000 | [diff] [blame] | 1045 | { |
John Fastabend | 73c20a8 | 2016-01-05 09:11:36 -0800 | [diff] [blame] | 1046 | if (qdisc_is_percpu_stats(qdisc)) { |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 1047 | free_percpu(qdisc->cpu_bstats); |
John Fastabend | 73c20a8 | 2016-01-05 09:11:36 -0800 | [diff] [blame] | 1048 | free_percpu(qdisc->cpu_qstats); |
| 1049 | } |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 1050 | |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 1051 | kfree(qdisc); |
Eric Dumazet | 5d944c6 | 2010-03-31 07:06:04 +0000 | [diff] [blame] | 1052 | } |
| 1053 | |
Wei Yongjun | 5362700 | 2018-09-27 14:47:56 +0000 | [diff] [blame] | 1054 | static void qdisc_free_cb(struct rcu_head *head) |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 1055 | { |
| 1056 | struct Qdisc *q = container_of(head, struct Qdisc, rcu); |
| 1057 | |
| 1058 | qdisc_free(q); |
| 1059 | } |
| 1060 | |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1061 | static void qdisc_destroy(struct Qdisc *qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | { |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 1063 | const struct Qdisc_ops *ops = qdisc->ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 1065 | #ifdef CONFIG_NET_SCHED |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 1066 | qdisc_hash_del(qdisc); |
Jarek Poplawski | f6e0b23 | 2008-08-22 03:24:05 -0700 | [diff] [blame] | 1067 | |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 1068 | qdisc_put_stab(rtnl_dereference(qdisc->stab)); |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 1069 | #endif |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 1070 | gen_kill_estimator(&qdisc->rate_est); |
Cong Wang | 4909dab | 2020-05-26 21:35:23 -0700 | [diff] [blame] | 1071 | |
| 1072 | qdisc_reset(qdisc); |
| 1073 | |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 1074 | if (ops->destroy) |
| 1075 | ops->destroy(qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 1077 | module_put(ops->owner); |
Eric Dumazet | 606509f | 2021-12-04 20:22:13 -0800 | [diff] [blame] | 1078 | dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker); |
David S. Miller | 8a34c5d | 2008-07-17 00:47:45 -0700 | [diff] [blame] | 1079 | |
Cong Wang | a34dac0 | 2020-05-26 21:35:24 -0700 | [diff] [blame] | 1080 | trace_qdisc_destroy(qdisc); |
| 1081 | |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 1082 | call_rcu(&qdisc->rcu, qdisc_free_cb); |
David S. Miller | 8a34c5d | 2008-07-17 00:47:45 -0700 | [diff] [blame] | 1083 | } |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1084 | |
| 1085 | void qdisc_put(struct Qdisc *qdisc) |
| 1086 | { |
Cong Wang | 6efb971 | 2019-09-12 10:22:30 -0700 | [diff] [blame] | 1087 | if (!qdisc) |
| 1088 | return; |
| 1089 | |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1090 | if (qdisc->flags & TCQ_F_BUILTIN || |
| 1091 | !refcount_dec_and_test(&qdisc->refcnt)) |
| 1092 | return; |
| 1093 | |
| 1094 | qdisc_destroy(qdisc); |
| 1095 | } |
| 1096 | EXPORT_SYMBOL(qdisc_put); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 1098 | /* Version of qdisc_put() that is called with rtnl mutex unlocked. |
| 1099 | * Intended to be used as optimization, this function only takes rtnl lock if |
| 1100 | * qdisc reference counter reached zero. |
| 1101 | */ |
| 1102 | |
| 1103 | void qdisc_put_unlocked(struct Qdisc *qdisc) |
| 1104 | { |
| 1105 | if (qdisc->flags & TCQ_F_BUILTIN || |
| 1106 | !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) |
| 1107 | return; |
| 1108 | |
| 1109 | qdisc_destroy(qdisc); |
| 1110 | rtnl_unlock(); |
| 1111 | } |
| 1112 | EXPORT_SYMBOL(qdisc_put_unlocked); |
| 1113 | |
Patrick McHardy | 589983c | 2009-09-04 06:41:20 +0000 | [diff] [blame] | 1114 | /* Attach toplevel qdisc to device queue. */ |
| 1115 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
| 1116 | struct Qdisc *qdisc) |
| 1117 | { |
| 1118 | struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; |
| 1119 | spinlock_t *root_lock; |
| 1120 | |
| 1121 | root_lock = qdisc_lock(oqdisc); |
| 1122 | spin_lock_bh(root_lock); |
| 1123 | |
Patrick McHardy | 589983c | 2009-09-04 06:41:20 +0000 | [diff] [blame] | 1124 | /* ... and graft new one */ |
| 1125 | if (qdisc == NULL) |
| 1126 | qdisc = &noop_qdisc; |
| 1127 | dev_queue->qdisc_sleeping = qdisc; |
| 1128 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); |
| 1129 | |
| 1130 | spin_unlock_bh(root_lock); |
| 1131 | |
| 1132 | return oqdisc; |
| 1133 | } |
John Fastabend | b8970f0 | 2011-01-17 08:06:09 +0000 | [diff] [blame] | 1134 | EXPORT_SYMBOL(dev_graft_qdisc); |
Patrick McHardy | 589983c | 2009-09-04 06:41:20 +0000 | [diff] [blame] | 1135 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1136 | static void attach_one_default_qdisc(struct net_device *dev, |
| 1137 | struct netdev_queue *dev_queue, |
| 1138 | void *_unused) |
| 1139 | { |
Phil Sutter | 3e692f2 | 2015-08-27 21:21:39 +0200 | [diff] [blame] | 1140 | struct Qdisc *qdisc; |
| 1141 | const struct Qdisc_ops *ops = default_qdisc_ops; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1142 | |
Phil Sutter | 3e692f2 | 2015-08-27 21:21:39 +0200 | [diff] [blame] | 1143 | if (dev->priv_flags & IFF_NO_QUEUE) |
| 1144 | ops = &noqueue_qdisc_ops; |
Vincent Prince | 546b85b | 2019-10-23 15:44:20 +0200 | [diff] [blame] | 1145 | else if(dev->type == ARPHRD_CAN) |
| 1146 | ops = &pfifo_fast_ops; |
Phil Sutter | 3e692f2 | 2015-08-27 21:21:39 +0200 | [diff] [blame] | 1147 | |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 1148 | qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); |
Jesper Dangaard Brouer | bf6dba7 | 2020-04-30 13:42:22 +0200 | [diff] [blame] | 1149 | if (!qdisc) |
Phil Sutter | 3e692f2 | 2015-08-27 21:21:39 +0200 | [diff] [blame] | 1150 | return; |
Jesper Dangaard Brouer | bf6dba7 | 2020-04-30 13:42:22 +0200 | [diff] [blame] | 1151 | |
Phil Sutter | 3e692f2 | 2015-08-27 21:21:39 +0200 | [diff] [blame] | 1152 | if (!netif_is_multiqueue(dev)) |
Eric Dumazet | 4eaf3b8 | 2015-12-01 20:08:51 -0800 | [diff] [blame] | 1153 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1154 | dev_queue->qdisc_sleeping = qdisc; |
| 1155 | } |
| 1156 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1157 | static void attach_default_qdiscs(struct net_device *dev) |
| 1158 | { |
| 1159 | struct netdev_queue *txq; |
| 1160 | struct Qdisc *qdisc; |
| 1161 | |
| 1162 | txq = netdev_get_tx_queue(dev, 0); |
| 1163 | |
Phil Sutter | 4b46995 | 2015-08-13 19:01:07 +0200 | [diff] [blame] | 1164 | if (!netif_is_multiqueue(dev) || |
Phil Sutter | 4b46995 | 2015-08-13 19:01:07 +0200 | [diff] [blame] | 1165 | dev->priv_flags & IFF_NO_QUEUE) { |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1166 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
| 1167 | dev->qdisc = txq->qdisc_sleeping; |
Eric Dumazet | 551143d | 2017-08-24 21:12:28 -0700 | [diff] [blame] | 1168 | qdisc_refcount_inc(dev->qdisc); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1169 | } else { |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 1170 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1171 | if (qdisc) { |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1172 | dev->qdisc = qdisc; |
Eric Dumazet | e57a784 | 2013-12-12 15:41:56 -0800 | [diff] [blame] | 1173 | qdisc->ops->attach(qdisc); |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1174 | } |
| 1175 | } |
Jesper Dangaard Brouer | bf6dba7 | 2020-04-30 13:42:22 +0200 | [diff] [blame] | 1176 | |
| 1177 | /* Detect default qdisc setup/init failed and fallback to "noqueue" */ |
| 1178 | if (dev->qdisc == &noop_qdisc) { |
| 1179 | netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", |
| 1180 | default_qdisc_ops->id, noqueue_qdisc_ops.id); |
| 1181 | dev->priv_flags |= IFF_NO_QUEUE; |
| 1182 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
| 1183 | dev->qdisc = txq->qdisc_sleeping; |
| 1184 | qdisc_refcount_inc(dev->qdisc); |
| 1185 | dev->priv_flags ^= IFF_NO_QUEUE; |
| 1186 | } |
| 1187 | |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 1188 | #ifdef CONFIG_NET_SCHED |
WANG Cong | 92f9170 | 2017-04-04 18:51:30 -0700 | [diff] [blame] | 1189 | if (dev->qdisc != &noop_qdisc) |
Jiri Kosina | 49b4997 | 2017-03-08 16:03:32 +0100 | [diff] [blame] | 1190 | qdisc_hash_add(dev->qdisc, false); |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 1191 | #endif |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1192 | } |
| 1193 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1194 | static void transition_one_qdisc(struct net_device *dev, |
| 1195 | struct netdev_queue *dev_queue, |
| 1196 | void *_need_watchdog) |
| 1197 | { |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 1198 | struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1199 | int *need_watchdog_p = _need_watchdog; |
| 1200 | |
David S. Miller | a9312ae | 2008-08-17 21:51:03 -0700 | [diff] [blame] | 1201 | if (!(new_qdisc->flags & TCQ_F_BUILTIN)) |
| 1202 | clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); |
| 1203 | |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 1204 | rcu_assign_pointer(dev_queue->qdisc, new_qdisc); |
Phil Sutter | 3e692f2 | 2015-08-27 21:21:39 +0200 | [diff] [blame] | 1205 | if (need_watchdog_p) { |
Eric Dumazet | 5337824 | 2021-11-16 19:29:22 -0800 | [diff] [blame] | 1206 | WRITE_ONCE(dev_queue->trans_start, 0); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1207 | *need_watchdog_p = 1; |
Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 1208 | } |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1209 | } |
| 1210 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 | void dev_activate(struct net_device *dev) |
| 1212 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1213 | int need_watchdog; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | /* No queueing discipline is attached to device; |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 1216 | * create default one for devices, which need queueing |
| 1217 | * and noqueue_qdisc for virtual interfaces |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | */ |
| 1219 | |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 1220 | if (dev->qdisc == &noop_qdisc) |
| 1221 | attach_default_qdiscs(dev); |
Patrick McHardy | af356af | 2009-09-04 06:41:18 +0000 | [diff] [blame] | 1222 | |
Tommy S. Christensen | cacaddf | 2005-05-03 16:18:52 -0700 | [diff] [blame] | 1223 | if (!netif_carrier_ok(dev)) |
| 1224 | /* Delay activation until next carrier-on event */ |
| 1225 | return; |
| 1226 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1227 | need_watchdog = 0; |
| 1228 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 1229 | if (dev_ingress_queue(dev)) |
| 1230 | transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1231 | |
| 1232 | if (need_watchdog) { |
Florian Westphal | 860e953 | 2016-05-03 16:33:13 +0200 | [diff] [blame] | 1233 | netif_trans_update(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | dev_watchdog_up(dev); |
| 1235 | } |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1236 | } |
John Fastabend | b8970f0 | 2011-01-17 08:06:09 +0000 | [diff] [blame] | 1237 | EXPORT_SYMBOL(dev_activate); |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1238 | |
Cong Wang | 70f5096 | 2020-05-26 21:35:26 -0700 | [diff] [blame] | 1239 | static void qdisc_deactivate(struct Qdisc *qdisc) |
| 1240 | { |
Cong Wang | 70f5096 | 2020-05-26 21:35:26 -0700 | [diff] [blame] | 1241 | if (qdisc->flags & TCQ_F_BUILTIN) |
| 1242 | return; |
Cong Wang | 70f5096 | 2020-05-26 21:35:26 -0700 | [diff] [blame] | 1243 | |
| 1244 | set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); |
Cong Wang | 70f5096 | 2020-05-26 21:35:26 -0700 | [diff] [blame] | 1245 | } |
| 1246 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1247 | static void dev_deactivate_queue(struct net_device *dev, |
| 1248 | struct netdev_queue *dev_queue, |
| 1249 | void *_qdisc_default) |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1250 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1251 | struct Qdisc *qdisc_default = _qdisc_default; |
David S. Miller | 970565b | 2008-07-08 23:10:33 -0700 | [diff] [blame] | 1252 | struct Qdisc *qdisc; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1253 | |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 1254 | qdisc = rtnl_dereference(dev_queue->qdisc); |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1255 | if (qdisc) { |
Cong Wang | 70f5096 | 2020-05-26 21:35:26 -0700 | [diff] [blame] | 1256 | qdisc_deactivate(qdisc); |
Jarek Poplawski | f7a54c1 | 2008-08-27 02:22:07 -0700 | [diff] [blame] | 1257 | rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1258 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | } |
| 1260 | |
Yunsheng Lin | 2fb541c | 2020-09-08 19:02:34 +0800 | [diff] [blame] | 1261 | static void dev_reset_queue(struct net_device *dev, |
| 1262 | struct netdev_queue *dev_queue, |
| 1263 | void *_unused) |
| 1264 | { |
| 1265 | struct Qdisc *qdisc; |
| 1266 | bool nolock; |
| 1267 | |
| 1268 | qdisc = dev_queue->qdisc_sleeping; |
| 1269 | if (!qdisc) |
| 1270 | return; |
| 1271 | |
| 1272 | nolock = qdisc->flags & TCQ_F_NOLOCK; |
| 1273 | |
| 1274 | if (nolock) |
| 1275 | spin_lock_bh(&qdisc->seqlock); |
| 1276 | spin_lock_bh(qdisc_lock(qdisc)); |
| 1277 | |
| 1278 | qdisc_reset(qdisc); |
| 1279 | |
| 1280 | spin_unlock_bh(qdisc_lock(qdisc)); |
Yunsheng Lin | 102b55e | 2021-05-14 11:17:00 +0800 | [diff] [blame] | 1281 | if (nolock) { |
| 1282 | clear_bit(__QDISC_STATE_MISSED, &qdisc->state); |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 1283 | clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); |
Yunsheng Lin | 2fb541c | 2020-09-08 19:02:34 +0800 | [diff] [blame] | 1284 | spin_unlock_bh(&qdisc->seqlock); |
Yunsheng Lin | 102b55e | 2021-05-14 11:17:00 +0800 | [diff] [blame] | 1285 | } |
Yunsheng Lin | 2fb541c | 2020-09-08 19:02:34 +0800 | [diff] [blame] | 1286 | } |
| 1287 | |
David S. Miller | 4335cd2 | 2008-08-17 21:58:07 -0700 | [diff] [blame] | 1288 | static bool some_qdisc_is_busy(struct net_device *dev) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1289 | { |
| 1290 | unsigned int i; |
| 1291 | |
| 1292 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 1293 | struct netdev_queue *dev_queue; |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 1294 | spinlock_t *root_lock; |
David S. Miller | e2627c8 | 2008-07-16 00:56:32 -0700 | [diff] [blame] | 1295 | struct Qdisc *q; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1296 | int val; |
| 1297 | |
| 1298 | dev_queue = netdev_get_tx_queue(dev, i); |
David S. Miller | b9a3b11 | 2008-08-13 15:18:38 -0700 | [diff] [blame] | 1299 | q = dev_queue->qdisc_sleeping; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1300 | |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 1301 | root_lock = qdisc_lock(q); |
| 1302 | spin_lock_bh(root_lock); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1303 | |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 1304 | val = (qdisc_is_running(q) || |
| 1305 | test_bit(__QDISC_STATE_SCHED, &q->state)); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1306 | |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 1307 | spin_unlock_bh(root_lock); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1308 | |
| 1309 | if (val) |
| 1310 | return true; |
| 1311 | } |
| 1312 | return false; |
| 1313 | } |
| 1314 | |
Eric Dumazet | 3137663 | 2011-05-19 23:42:09 +0000 | [diff] [blame] | 1315 | /** |
| 1316 | * dev_deactivate_many - deactivate transmissions on several devices |
| 1317 | * @head: list of devices to deactivate |
| 1318 | * |
| 1319 | * This function returns only when all outstanding transmissions |
| 1320 | * have completed, unless all devices are in dismantle phase. |
| 1321 | */ |
Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1322 | void dev_deactivate_many(struct list_head *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | { |
Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1324 | struct net_device *dev; |
Herbert Xu | 41a23b0 | 2007-05-10 14:12:47 -0700 | [diff] [blame] | 1325 | |
Eric W. Biederman | 5cde282 | 2013-10-05 19:26:05 -0700 | [diff] [blame] | 1326 | list_for_each_entry(dev, head, close_list) { |
Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1327 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, |
| 1328 | &noop_qdisc); |
| 1329 | if (dev_ingress_queue(dev)) |
| 1330 | dev_deactivate_queue(dev, dev_ingress_queue(dev), |
| 1331 | &noop_qdisc); |
| 1332 | |
| 1333 | dev_watchdog_down(dev); |
| 1334 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1335 | |
Yunsheng Lin | 2fb541c | 2020-09-08 19:02:34 +0800 | [diff] [blame] | 1336 | /* Wait for outstanding qdisc-less dev_queue_xmit calls or |
| 1337 | * outstanding qdisc enqueuing calls. |
Eric Dumazet | 3137663 | 2011-05-19 23:42:09 +0000 | [diff] [blame] | 1338 | * This is avoided if all devices are in dismantle phase : |
| 1339 | * Caller will call synchronize_net() for us |
| 1340 | */ |
John Fastabend | 7bbde83 | 2017-12-07 09:56:04 -0800 | [diff] [blame] | 1341 | synchronize_net(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | |
Yunsheng Lin | 2fb541c | 2020-09-08 19:02:34 +0800 | [diff] [blame] | 1343 | list_for_each_entry(dev, head, close_list) { |
| 1344 | netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); |
| 1345 | |
| 1346 | if (dev_ingress_queue(dev)) |
| 1347 | dev_reset_queue(dev, dev_ingress_queue(dev), NULL); |
| 1348 | } |
| 1349 | |
Herbert Xu | d4828d8 | 2006-06-22 02:28:18 -0700 | [diff] [blame] | 1350 | /* Wait for outstanding qdisc_run calls. */ |
John Fastabend | 7bbde83 | 2017-12-07 09:56:04 -0800 | [diff] [blame] | 1351 | list_for_each_entry(dev, head, close_list) { |
Marc Kleine-Budde | 4eab421 | 2019-10-16 10:28:33 +0200 | [diff] [blame] | 1352 | while (some_qdisc_is_busy(dev)) { |
| 1353 | /* wait_event() would avoid this sleep-loop but would |
| 1354 | * require expensive checks in the fast paths of packet |
| 1355 | * processing which isn't worth it. |
| 1356 | */ |
| 1357 | schedule_timeout_uninterruptible(1); |
| 1358 | } |
John Fastabend | 7bbde83 | 2017-12-07 09:56:04 -0800 | [diff] [blame] | 1359 | } |
Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1360 | } |
| 1361 | |
| 1362 | void dev_deactivate(struct net_device *dev) |
| 1363 | { |
| 1364 | LIST_HEAD(single); |
| 1365 | |
Eric W. Biederman | 5cde282 | 2013-10-05 19:26:05 -0700 | [diff] [blame] | 1366 | list_add(&dev->close_list, &single); |
Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1367 | dev_deactivate_many(&single); |
Eric W. Biederman | 5f04d50 | 2011-02-20 11:49:45 -0800 | [diff] [blame] | 1368 | list_del(&single); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | } |
John Fastabend | b8970f0 | 2011-01-17 08:06:09 +0000 | [diff] [blame] | 1370 | EXPORT_SYMBOL(dev_deactivate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | |
Cong Wang | 48bfd55 | 2018-01-25 18:26:23 -0800 | [diff] [blame] | 1372 | static int qdisc_change_tx_queue_len(struct net_device *dev, |
| 1373 | struct netdev_queue *dev_queue) |
| 1374 | { |
| 1375 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
| 1376 | const struct Qdisc_ops *ops = qdisc->ops; |
| 1377 | |
| 1378 | if (ops->change_tx_queue_len) |
| 1379 | return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); |
| 1380 | return 0; |
| 1381 | } |
| 1382 | |
Jakub Kicinski | 1e080f1 | 2021-09-13 15:53:30 -0700 | [diff] [blame] | 1383 | void dev_qdisc_change_real_num_tx(struct net_device *dev, |
| 1384 | unsigned int new_real_tx) |
| 1385 | { |
| 1386 | struct Qdisc *qdisc = dev->qdisc; |
| 1387 | |
| 1388 | if (qdisc->ops->change_real_num_tx) |
| 1389 | qdisc->ops->change_real_num_tx(qdisc, new_real_tx); |
| 1390 | } |
| 1391 | |
Jakub Kicinski | f7116fb | 2021-09-17 06:55:06 -0700 | [diff] [blame] | 1392 | void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) |
| 1393 | { |
| 1394 | #ifdef CONFIG_NET_SCHED |
| 1395 | struct net_device *dev = qdisc_dev(sch); |
| 1396 | struct Qdisc *qdisc; |
| 1397 | unsigned int i; |
| 1398 | |
| 1399 | for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { |
| 1400 | qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; |
| 1401 | /* Only update the default qdiscs we created, |
| 1402 | * qdiscs with handles are always hashed. |
| 1403 | */ |
| 1404 | if (qdisc != &noop_qdisc && !qdisc->handle) |
| 1405 | qdisc_hash_del(qdisc); |
| 1406 | } |
| 1407 | for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { |
| 1408 | qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; |
| 1409 | if (qdisc != &noop_qdisc && !qdisc->handle) |
| 1410 | qdisc_hash_add(qdisc, false); |
| 1411 | } |
| 1412 | #endif |
| 1413 | } |
| 1414 | EXPORT_SYMBOL(mq_change_real_num_tx); |
| 1415 | |
Cong Wang | 48bfd55 | 2018-01-25 18:26:23 -0800 | [diff] [blame] | 1416 | int dev_qdisc_change_tx_queue_len(struct net_device *dev) |
| 1417 | { |
| 1418 | bool up = dev->flags & IFF_UP; |
| 1419 | unsigned int i; |
| 1420 | int ret = 0; |
| 1421 | |
| 1422 | if (up) |
| 1423 | dev_deactivate(dev); |
| 1424 | |
| 1425 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 1426 | ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); |
| 1427 | |
| 1428 | /* TODO: revert changes on a partial failure */ |
| 1429 | if (ret) |
| 1430 | break; |
| 1431 | } |
| 1432 | |
| 1433 | if (up) |
| 1434 | dev_activate(dev); |
| 1435 | return ret; |
| 1436 | } |
| 1437 | |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1438 | static void dev_init_scheduler_queue(struct net_device *dev, |
| 1439 | struct netdev_queue *dev_queue, |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1440 | void *_qdisc) |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1441 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1442 | struct Qdisc *qdisc = _qdisc; |
| 1443 | |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 1444 | rcu_assign_pointer(dev_queue->qdisc, qdisc); |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1445 | dev_queue->qdisc_sleeping = qdisc; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1446 | } |
| 1447 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | void dev_init_scheduler(struct net_device *dev) |
| 1449 | { |
Patrick McHardy | af356af | 2009-09-04 06:41:18 +0000 | [diff] [blame] | 1450 | dev->qdisc = &noop_qdisc; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1451 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 1452 | if (dev_ingress_queue(dev)) |
| 1453 | dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | |
Kees Cook | cdeabbb | 2017-10-16 17:29:17 -0700 | [diff] [blame] | 1455 | timer_setup(&dev->watchdog_timer, dev_watchdog, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | } |
| 1457 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1458 | static void shutdown_scheduler_queue(struct net_device *dev, |
| 1459 | struct netdev_queue *dev_queue, |
| 1460 | void *_qdisc_default) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | { |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1462 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1463 | struct Qdisc *qdisc_default = _qdisc_default; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1465 | if (qdisc) { |
Jarek Poplawski | f7a54c1 | 2008-08-27 02:22:07 -0700 | [diff] [blame] | 1466 | rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1467 | dev_queue->qdisc_sleeping = qdisc_default; |
| 1468 | |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1469 | qdisc_put(qdisc); |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 1470 | } |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 1471 | } |
| 1472 | |
| 1473 | void dev_shutdown(struct net_device *dev) |
| 1474 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1475 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 1476 | if (dev_ingress_queue(dev)) |
| 1477 | shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1478 | qdisc_put(dev->qdisc); |
Patrick McHardy | af356af | 2009-09-04 06:41:18 +0000 | [diff] [blame] | 1479 | dev->qdisc = &noop_qdisc; |
| 1480 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1481 | WARN_ON(timer_pending(&dev->watchdog_timer)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | } |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1483 | |
Baowen Zheng | 2ffe039 | 2021-03-12 15:08:31 +0100 | [diff] [blame] | 1484 | /** |
| 1485 | * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division |
| 1486 | * @rate: Rate to compute reciprocal division values of |
| 1487 | * @mult: Multiplier for reciprocal division |
| 1488 | * @shift: Shift for reciprocal division |
| 1489 | * |
| 1490 | * The multiplier and shift for reciprocal division by rate are stored |
| 1491 | * in mult and shift. |
| 1492 | * |
| 1493 | * The deal here is to replace a divide by a reciprocal one |
| 1494 | * in fast path (a reciprocal divide is a multiply and a shift) |
| 1495 | * |
| 1496 | * Normal formula would be : |
| 1497 | * time_in_ns = (NSEC_PER_SEC * len) / rate_bps |
| 1498 | * |
| 1499 | * We compute mult/shift to use instead : |
| 1500 | * time_in_ns = (len * mult) >> shift; |
| 1501 | * |
| 1502 | * We try to get the highest possible mult value for accuracy, |
| 1503 | * but have to make sure no overflows will ever happen. |
| 1504 | * |
| 1505 | * reciprocal_value() is not used here it doesn't handle 64-bit values. |
| 1506 | */ |
| 1507 | static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift) |
| 1508 | { |
| 1509 | u64 factor = NSEC_PER_SEC; |
| 1510 | |
| 1511 | *mult = 1; |
| 1512 | *shift = 0; |
| 1513 | |
| 1514 | if (rate <= 0) |
| 1515 | return; |
| 1516 | |
| 1517 | for (;;) { |
| 1518 | *mult = div64_u64(factor, rate); |
| 1519 | if (*mult & (1U << 31) || factor & (1ULL << 63)) |
| 1520 | break; |
| 1521 | factor <<= 1; |
| 1522 | (*shift)++; |
| 1523 | } |
| 1524 | } |
| 1525 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1526 | void psched_ratecfg_precompute(struct psched_ratecfg *r, |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 1527 | const struct tc_ratespec *conf, |
| 1528 | u64 rate64) |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1529 | { |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1530 | memset(r, 0, sizeof(*r)); |
| 1531 | r->overhead = conf->overhead; |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 1532 | r->rate_bytes_ps = max_t(u64, conf->rate, rate64); |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1533 | r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); |
Baowen Zheng | 2ffe039 | 2021-03-12 15:08:31 +0100 | [diff] [blame] | 1534 | psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1535 | } |
| 1536 | EXPORT_SYMBOL(psched_ratecfg_precompute); |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1537 | |
Baowen Zheng | 2ffe039 | 2021-03-12 15:08:31 +0100 | [diff] [blame] | 1538 | void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64) |
| 1539 | { |
| 1540 | r->rate_pkts_ps = pktrate64; |
| 1541 | psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift); |
| 1542 | } |
| 1543 | EXPORT_SYMBOL(psched_ppscfg_precompute); |
| 1544 | |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1545 | void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, |
| 1546 | struct tcf_proto *tp_head) |
| 1547 | { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 1548 | /* Protected with chain0->filter_chain_lock. |
| 1549 | * Can't access chain directly because tp_head can be NULL. |
| 1550 | */ |
| 1551 | struct mini_Qdisc *miniq_old = |
| 1552 | rcu_dereference_protected(*miniqp->p_miniq, 1); |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1553 | struct mini_Qdisc *miniq; |
| 1554 | |
| 1555 | if (!tp_head) { |
| 1556 | RCU_INIT_POINTER(*miniqp->p_miniq, NULL); |
Seth Forshee | 2674638 | 2021-10-26 08:06:59 -0500 | [diff] [blame] | 1557 | } else { |
Seth Forshee | 85c0c3e | 2021-10-26 13:37:21 -0500 | [diff] [blame] | 1558 | miniq = miniq_old != &miniqp->miniq1 ? |
Seth Forshee | 2674638 | 2021-10-26 08:06:59 -0500 | [diff] [blame] | 1559 | &miniqp->miniq1 : &miniqp->miniq2; |
| 1560 | |
| 1561 | /* We need to make sure that readers won't see the miniq |
| 1562 | * we are about to modify. So ensure that at least one RCU |
| 1563 | * grace period has elapsed since the miniq was made |
| 1564 | * inactive. |
| 1565 | */ |
| 1566 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
| 1567 | cond_synchronize_rcu(miniq->rcu_state); |
| 1568 | else if (!poll_state_synchronize_rcu(miniq->rcu_state)) |
| 1569 | synchronize_rcu_expedited(); |
| 1570 | |
| 1571 | miniq->filter_list = tp_head; |
| 1572 | rcu_assign_pointer(*miniqp->p_miniq, miniq); |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1573 | } |
| 1574 | |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1575 | if (miniq_old) |
Seth Forshee | 2674638 | 2021-10-26 08:06:59 -0500 | [diff] [blame] | 1576 | /* This is counterpart of the rcu sync above. We need to |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1577 | * block potential new user of miniq_old until all readers |
| 1578 | * are not seeing it. |
| 1579 | */ |
Seth Forshee | 2674638 | 2021-10-26 08:06:59 -0500 | [diff] [blame] | 1580 | miniq_old->rcu_state = start_poll_synchronize_rcu(); |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1581 | } |
| 1582 | EXPORT_SYMBOL(mini_qdisc_pair_swap); |
| 1583 | |
Paul Blakey | 7d17c54 | 2020-02-16 12:01:22 +0200 | [diff] [blame] | 1584 | void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, |
| 1585 | struct tcf_block *block) |
| 1586 | { |
| 1587 | miniqp->miniq1.block = block; |
| 1588 | miniqp->miniq2.block = block; |
| 1589 | } |
| 1590 | EXPORT_SYMBOL(mini_qdisc_pair_block_init); |
| 1591 | |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1592 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, |
| 1593 | struct mini_Qdisc __rcu **p_miniq) |
| 1594 | { |
| 1595 | miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; |
| 1596 | miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; |
| 1597 | miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; |
| 1598 | miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; |
Seth Forshee | 2674638 | 2021-10-26 08:06:59 -0500 | [diff] [blame] | 1599 | miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); |
| 1600 | miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1601 | miniqp->p_miniq = p_miniq; |
| 1602 | } |
| 1603 | EXPORT_SYMBOL(mini_qdisc_pair_init); |