Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __NET_SCHED_GENERIC_H |
| 3 | #define __NET_SCHED_GENERIC_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/netdevice.h> |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/pkt_sched.h> |
| 9 | #include <linux/pkt_cls.h> |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 10 | #include <linux/percpu.h> |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 11 | #include <linux/dynamic_queue_limits.h> |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 12 | #include <linux/list.h> |
Reshetova, Elena | 7b93640 | 2017-07-04 15:53:07 +0300 | [diff] [blame] | 13 | #include <linux/refcount.h> |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 14 | #include <linux/workqueue.h> |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 15 | #include <linux/mutex.h> |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 16 | #include <linux/rwsem.h> |
Vlad Buslov | 97394be | 2019-08-26 16:44:58 +0300 | [diff] [blame] | 17 | #include <linux/atomic.h> |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 18 | #include <linux/hashtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <net/gen_stats.h> |
Thomas Graf | be577dd | 2007-03-22 11:55:50 -0700 | [diff] [blame] | 20 | #include <net/rtnetlink.h> |
Pablo Neira Ayuso | a732331 | 2019-07-19 18:20:15 +0200 | [diff] [blame] | 21 | #include <net/flow_offload.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | struct Qdisc_ops; |
| 24 | struct qdisc_walker; |
| 25 | struct tcf_walker; |
| 26 | struct module; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 27 | struct bpf_flow_keys; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 29 | struct qdisc_rate_table { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | struct tc_ratespec rate; |
| 31 | u32 data[256]; |
| 32 | struct qdisc_rate_table *next; |
| 33 | int refcnt; |
| 34 | }; |
| 35 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 36 | enum qdisc_state_t { |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 37 | __QDISC_STATE_SCHED, |
David S. Miller | a9312ae | 2008-08-17 21:51:03 -0700 | [diff] [blame] | 38 | __QDISC_STATE_DEACTIVATED, |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 39 | __QDISC_STATE_MISSED, |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 40 | __QDISC_STATE_DRAINING, |
David S. Miller | e2627c8 | 2008-07-16 00:56:32 -0700 | [diff] [blame] | 41 | }; |
| 42 | |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 43 | #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED) |
| 44 | #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING) |
| 45 | |
| 46 | #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \ |
| 47 | QDISC_STATE_DRAINING) |
| 48 | |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 49 | struct qdisc_size_table { |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 50 | struct rcu_head rcu; |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 51 | struct list_head list; |
| 52 | struct tc_sizespec szopts; |
| 53 | int refcnt; |
| 54 | u16 data[]; |
| 55 | }; |
| 56 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 57 | /* similar to sk_buff_head, but skb->prev pointer is undefined. */ |
| 58 | struct qdisc_skb_head { |
| 59 | struct sk_buff *head; |
| 60 | struct sk_buff *tail; |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 61 | __u32 qlen; |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 62 | spinlock_t lock; |
| 63 | }; |
| 64 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 65 | struct Qdisc { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 66 | int (*enqueue)(struct sk_buff *skb, |
| 67 | struct Qdisc *sch, |
| 68 | struct sk_buff **to_free); |
| 69 | struct sk_buff * (*dequeue)(struct Qdisc *sch); |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 70 | unsigned int flags; |
Jarek Poplawski | b00355d | 2009-02-01 01:12:42 -0800 | [diff] [blame] | 71 | #define TCQ_F_BUILTIN 1 |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 72 | #define TCQ_F_INGRESS 2 |
| 73 | #define TCQ_F_CAN_BYPASS 4 |
| 74 | #define TCQ_F_MQROOT 8 |
Eric Dumazet | 1abbe13 | 2012-12-11 15:54:33 +0000 | [diff] [blame] | 75 | #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for |
| 76 | * q->dev_queue : It can test |
| 77 | * netif_xmit_frozen_or_stopped() before |
| 78 | * dequeueing next packet. |
| 79 | * Its true for MQ/MQPRIO slaves, or non |
| 80 | * multiqueue device. |
| 81 | */ |
Jarek Poplawski | b00355d | 2009-02-01 01:12:42 -0800 | [diff] [blame] | 82 | #define TCQ_F_WARN_NONWC (1 << 16) |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 83 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ |
Eric Dumazet | 4eaf3b8 | 2015-12-01 20:08:51 -0800 | [diff] [blame] | 84 | #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : |
| 85 | * qdisc_tree_decrease_qlen() should stop. |
| 86 | */ |
Jiri Kosina | 49b4997 | 2017-03-08 16:03:32 +0100 | [diff] [blame] | 87 | #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 88 | #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ |
Yuval Mintz | 7a4fa29 | 2017-12-14 15:54:29 +0200 | [diff] [blame] | 89 | #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 90 | u32 limit; |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 91 | const struct Qdisc_ops *ops; |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 92 | struct qdisc_size_table __rcu *stab; |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 93 | struct hlist_node hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | u32 handle; |
| 95 | u32 parent; |
David S. Miller | 72b25a9 | 2008-07-18 20:54:17 -0700 | [diff] [blame] | 96 | |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 97 | struct netdev_queue *dev_queue; |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 98 | |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 99 | struct net_rate_estimator __rcu *rate_est; |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 100 | struct gnet_stats_basic_sync __percpu *cpu_bstats; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 101 | struct gnet_stats_queue __percpu *cpu_qstats; |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 102 | int pad; |
Paolo Abeni | e9be0e9 | 2018-05-25 16:28:44 +0200 | [diff] [blame] | 103 | refcount_t refcnt; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 104 | |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 105 | /* |
| 106 | * For performance sake on SMP, we put highly modified fields at the end |
| 107 | */ |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 108 | struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 109 | struct qdisc_skb_head q; |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 110 | struct gnet_stats_basic_sync bstats; |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 111 | seqcount_t running; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 112 | struct gnet_stats_queue qstats; |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 113 | unsigned long state; |
| 114 | struct Qdisc *next_sched; |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 115 | struct sk_buff_head skb_bad_txq; |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 116 | |
| 117 | spinlock_t busylock ____cacheline_aligned_in_smp; |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 118 | spinlock_t seqlock; |
Paolo Abeni | 28cff53 | 2019-03-22 16:01:55 +0100 | [diff] [blame] | 119 | |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 120 | struct rcu_head rcu; |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 121 | |
| 122 | /* private data */ |
| 123 | long privdata[] ____cacheline_aligned; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | }; |
| 125 | |
Eric Dumazet | 551143d | 2017-08-24 21:12:28 -0700 | [diff] [blame] | 126 | static inline void qdisc_refcount_inc(struct Qdisc *qdisc) |
| 127 | { |
| 128 | if (qdisc->flags & TCQ_F_BUILTIN) |
| 129 | return; |
| 130 | refcount_inc(&qdisc->refcnt); |
| 131 | } |
| 132 | |
Vlad Buslov | 9d7e82c | 2018-09-24 19:22:52 +0300 | [diff] [blame] | 133 | /* Intended to be used by unlocked users, when concurrent qdisc release is |
| 134 | * possible. |
| 135 | */ |
| 136 | |
| 137 | static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) |
| 138 | { |
| 139 | if (qdisc->flags & TCQ_F_BUILTIN) |
| 140 | return qdisc; |
| 141 | if (refcount_inc_not_zero(&qdisc->refcnt)) |
| 142 | return qdisc; |
| 143 | return NULL; |
| 144 | } |
| 145 | |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 146 | static inline bool qdisc_is_running(struct Qdisc *qdisc) |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 147 | { |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 148 | if (qdisc->flags & TCQ_F_NOLOCK) |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 149 | return spin_is_locked(&qdisc->seqlock); |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 150 | return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 151 | } |
| 152 | |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 153 | static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc) |
| 154 | { |
| 155 | return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY); |
| 156 | } |
| 157 | |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 158 | static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) |
| 159 | { |
| 160 | return q->flags & TCQ_F_CPUSTATS; |
| 161 | } |
| 162 | |
Paolo Abeni | 28cff53 | 2019-03-22 16:01:55 +0100 | [diff] [blame] | 163 | static inline bool qdisc_is_empty(const struct Qdisc *qdisc) |
| 164 | { |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 165 | if (qdisc_is_percpu_stats(qdisc)) |
Yunsheng Lin | d3e0f57 | 2021-06-22 14:49:57 +0800 | [diff] [blame] | 166 | return nolock_qdisc_is_empty(qdisc); |
Eric Dumazet | 90b2be2 | 2019-11-08 08:45:23 -0800 | [diff] [blame] | 167 | return !READ_ONCE(qdisc->q.qlen); |
Paolo Abeni | 28cff53 | 2019-03-22 16:01:55 +0100 | [diff] [blame] | 168 | } |
| 169 | |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 170 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) |
| 171 | { |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 172 | if (qdisc->flags & TCQ_F_NOLOCK) { |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 173 | if (spin_trylock(&qdisc->seqlock)) |
Yunsheng Lin | d3e0f57 | 2021-06-22 14:49:57 +0800 | [diff] [blame] | 174 | return true; |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 175 | |
Yunsheng Lin | 89837eb | 2021-06-17 09:04:14 +0800 | [diff] [blame] | 176 | /* Paired with smp_mb__after_atomic() to make sure |
| 177 | * STATE_MISSED checking is synchronized with clearing |
| 178 | * in pfifo_fast_dequeue(). |
| 179 | */ |
| 180 | smp_mb__before_atomic(); |
| 181 | |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 182 | /* If the MISSED flag is set, it means other thread has |
| 183 | * set the MISSED flag before second spin_trylock(), so |
| 184 | * we can return false here to avoid multi cpus doing |
| 185 | * the set_bit() and second spin_trylock() concurrently. |
| 186 | */ |
| 187 | if (test_bit(__QDISC_STATE_MISSED, &qdisc->state)) |
| 188 | return false; |
| 189 | |
| 190 | /* Set the MISSED flag before the second spin_trylock(), |
| 191 | * if the second spin_trylock() return false, it means |
| 192 | * other cpu holding the lock will do dequeuing for us |
| 193 | * or it will see the MISSED flag set after releasing |
| 194 | * lock and reschedule the net_tx_action() to do the |
| 195 | * dequeuing. |
| 196 | */ |
| 197 | set_bit(__QDISC_STATE_MISSED, &qdisc->state); |
| 198 | |
Yunsheng Lin | 89837eb | 2021-06-17 09:04:14 +0800 | [diff] [blame] | 199 | /* spin_trylock() only has load-acquire semantic, so use |
| 200 | * smp_mb__after_atomic() to ensure STATE_MISSED is set |
| 201 | * before doing the second spin_trylock(). |
| 202 | */ |
| 203 | smp_mb__after_atomic(); |
| 204 | |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 205 | /* Retry again in case other CPU may not see the new flag |
| 206 | * after it releases the lock at the end of qdisc_run_end(). |
| 207 | */ |
Yunsheng Lin | d3e0f57 | 2021-06-22 14:49:57 +0800 | [diff] [blame] | 208 | return spin_trylock(&qdisc->seqlock); |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 209 | } else if (qdisc_is_running(qdisc)) { |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 210 | return false; |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 211 | } |
Eric Dumazet | 52fbb29 | 2016-06-09 07:45:11 -0700 | [diff] [blame] | 212 | /* Variant of write_seqcount_begin() telling lockdep a trylock |
| 213 | * was attempted. |
| 214 | */ |
| 215 | raw_write_seqcount_begin(&qdisc->running); |
| 216 | seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 217 | return true; |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | static inline void qdisc_run_end(struct Qdisc *qdisc) |
| 221 | { |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 222 | if (qdisc->flags & TCQ_F_NOLOCK) { |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 223 | spin_unlock(&qdisc->seqlock); |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 224 | |
| 225 | if (unlikely(test_bit(__QDISC_STATE_MISSED, |
Yunsheng Lin | c4fef01 | 2021-06-22 14:49:56 +0800 | [diff] [blame] | 226 | &qdisc->state))) |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 227 | __netif_schedule(qdisc); |
Yunsheng Lin | dd25296 | 2021-06-22 14:49:55 +0800 | [diff] [blame] | 228 | } else { |
| 229 | write_seqcount_end(&qdisc->running); |
Yunsheng Lin | a90c57f | 2021-05-14 11:16:59 +0800 | [diff] [blame] | 230 | } |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 231 | } |
| 232 | |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 233 | static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) |
| 234 | { |
| 235 | return qdisc->flags & TCQ_F_ONETXQUEUE; |
| 236 | } |
| 237 | |
| 238 | static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) |
| 239 | { |
| 240 | #ifdef CONFIG_BQL |
| 241 | /* Non-BQL migrated drivers will return 0, too. */ |
| 242 | return dql_avail(&txq->dql); |
| 243 | #else |
| 244 | return 0; |
| 245 | #endif |
| 246 | } |
| 247 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 248 | struct Qdisc_class_ops { |
Vlad Buslov | dfcd2a2 | 2019-02-11 10:55:46 +0200 | [diff] [blame] | 249 | unsigned int flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | /* Child qdisc manipulation */ |
Jarek Poplawski | 926e61b | 2009-09-15 02:53:07 -0700 | [diff] [blame] | 251 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | int (*graft)(struct Qdisc *, unsigned long cl, |
Alexander Aring | 653d6fd | 2017-12-20 12:35:17 -0500 | [diff] [blame] | 253 | struct Qdisc *, struct Qdisc **, |
| 254 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); |
Patrick McHardy | 43effa1 | 2006-11-29 17:35:48 -0800 | [diff] [blame] | 256 | void (*qlen_notify)(struct Qdisc *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
| 258 | /* Class manipulation routines */ |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 259 | unsigned long (*find)(struct Qdisc *, u32 classid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | int (*change)(struct Qdisc *, u32, u32, |
Alexander Aring | 793d81d | 2017-12-20 12:35:15 -0500 | [diff] [blame] | 261 | struct nlattr **, unsigned long *, |
| 262 | struct netlink_ext_ack *); |
Maxim Mikityanskiy | 4dd78a7 | 2021-01-19 14:08:12 +0200 | [diff] [blame] | 263 | int (*delete)(struct Qdisc *, unsigned long, |
| 264 | struct netlink_ext_ack *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | void (*walk)(struct Qdisc *, struct qdisc_walker * arg); |
| 266 | |
| 267 | /* Filter manipulation */ |
Alexander Aring | 0ac4bd6 | 2017-12-04 18:39:59 -0500 | [diff] [blame] | 268 | struct tcf_block * (*tcf_block)(struct Qdisc *sch, |
Alexander Aring | cbaacc4 | 2017-12-20 12:35:16 -0500 | [diff] [blame] | 269 | unsigned long arg, |
| 270 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
| 272 | u32 classid); |
| 273 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
| 274 | |
| 275 | /* rtnetlink specific */ |
| 276 | int (*dump)(struct Qdisc *, unsigned long, |
| 277 | struct sk_buff *skb, struct tcmsg*); |
| 278 | int (*dump_stats)(struct Qdisc *, unsigned long, |
| 279 | struct gnet_dump *); |
| 280 | }; |
| 281 | |
Vlad Buslov | dfcd2a2 | 2019-02-11 10:55:46 +0200 | [diff] [blame] | 282 | /* Qdisc_class_ops flag values */ |
| 283 | |
| 284 | /* Implements API that doesn't require rtnl lock */ |
| 285 | enum qdisc_class_ops_flags { |
| 286 | QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, |
| 287 | }; |
| 288 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 289 | struct Qdisc_ops { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | struct Qdisc_ops *next; |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 291 | const struct Qdisc_class_ops *cl_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | char id[IFNAMSIZ]; |
| 293 | int priv_size; |
John Fastabend | d59f5ff | 2017-12-07 09:55:26 -0800 | [diff] [blame] | 294 | unsigned int static_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 296 | int (*enqueue)(struct sk_buff *skb, |
| 297 | struct Qdisc *sch, |
| 298 | struct sk_buff **to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | struct sk_buff * (*dequeue)(struct Qdisc *); |
Jarek Poplawski | 90d841fd | 2008-10-31 00:43:45 -0700 | [diff] [blame] | 300 | struct sk_buff * (*peek)(struct Qdisc *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | |
Alexander Aring | e63d7df | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 302 | int (*init)(struct Qdisc *sch, struct nlattr *arg, |
| 303 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | void (*reset)(struct Qdisc *); |
| 305 | void (*destroy)(struct Qdisc *); |
Alexander Aring | 0ac4bd6 | 2017-12-04 18:39:59 -0500 | [diff] [blame] | 306 | int (*change)(struct Qdisc *sch, |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 307 | struct nlattr *arg, |
| 308 | struct netlink_ext_ack *extack); |
Alexander Aring | 0ac4bd6 | 2017-12-04 18:39:59 -0500 | [diff] [blame] | 309 | void (*attach)(struct Qdisc *sch); |
Cong Wang | 48bfd55 | 2018-01-25 18:26:23 -0800 | [diff] [blame] | 310 | int (*change_tx_queue_len)(struct Qdisc *, unsigned int); |
Jakub Kicinski | 1e080f1 | 2021-09-13 15:53:30 -0700 | [diff] [blame] | 311 | void (*change_real_num_tx)(struct Qdisc *sch, |
| 312 | unsigned int new_real_tx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | |
| 314 | int (*dump)(struct Qdisc *, struct sk_buff *); |
| 315 | int (*dump_stats)(struct Qdisc *, struct gnet_dump *); |
| 316 | |
Jiri Pirko | d47a6b0 | 2018-01-17 11:46:52 +0100 | [diff] [blame] | 317 | void (*ingress_block_set)(struct Qdisc *sch, |
| 318 | u32 block_index); |
| 319 | void (*egress_block_set)(struct Qdisc *sch, |
| 320 | u32 block_index); |
| 321 | u32 (*ingress_block_get)(struct Qdisc *sch); |
| 322 | u32 (*egress_block_get)(struct Qdisc *sch); |
| 323 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | struct module *owner; |
| 325 | }; |
| 326 | |
| 327 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 328 | struct tcf_result { |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 329 | union { |
| 330 | struct { |
| 331 | unsigned long class; |
| 332 | u32 classid; |
| 333 | }; |
| 334 | const struct tcf_proto *goto_tp; |
Paolo Abeni | cd11b164 | 2018-07-30 14:30:44 +0200 | [diff] [blame] | 335 | |
John Hurley | 720f22f | 2019-06-24 23:13:35 +0100 | [diff] [blame] | 336 | /* used in the skb_tc_reinsert function */ |
Paolo Abeni | cd11b164 | 2018-07-30 14:30:44 +0200 | [diff] [blame] | 337 | struct { |
| 338 | bool ingress; |
| 339 | struct gnet_stats_queue *qstats; |
| 340 | }; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 341 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | }; |
| 343 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 344 | struct tcf_chain; |
| 345 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 346 | struct tcf_proto_ops { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 347 | struct list_head head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | char kind[IFNAMSIZ]; |
| 349 | |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 350 | int (*classify)(struct sk_buff *, |
| 351 | const struct tcf_proto *, |
| 352 | struct tcf_result *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | int (*init)(struct tcf_proto*); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 354 | void (*destroy)(struct tcf_proto *tp, bool rtnl_held, |
Jakub Kicinski | 715df5e | 2018-01-24 12:54:13 -0800 | [diff] [blame] | 355 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 357 | void* (*get)(struct tcf_proto*, u32 handle); |
Vlad Buslov | 7d5509f | 2019-02-11 10:55:44 +0200 | [diff] [blame] | 358 | void (*put)(struct tcf_proto *tp, void *f); |
Benjamin LaHaise | c1b5273 | 2013-01-14 05:15:39 +0000 | [diff] [blame] | 359 | int (*change)(struct net *net, struct sk_buff *, |
Eric W. Biederman | af4c664 | 2012-05-25 13:42:45 -0600 | [diff] [blame] | 360 | struct tcf_proto*, unsigned long, |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 361 | u32 handle, struct nlattr **, |
Cong Wang | 695176b | 2021-07-29 16:12:14 -0700 | [diff] [blame] | 362 | void **, u32, |
Alexander Aring | 7306db3 | 2018-01-18 11:20:51 -0500 | [diff] [blame] | 363 | struct netlink_ext_ack *); |
Alexander Aring | 8865fdd | 2018-01-18 11:20:49 -0500 | [diff] [blame] | 364 | int (*delete)(struct tcf_proto *tp, void *arg, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 365 | bool *last, bool rtnl_held, |
Alexander Aring | 571acf2 | 2018-01-18 11:20:53 -0500 | [diff] [blame] | 366 | struct netlink_ext_ack *); |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 367 | bool (*delete_empty)(struct tcf_proto *tp); |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 368 | void (*walk)(struct tcf_proto *tp, |
| 369 | struct tcf_walker *arg, bool rtnl_held); |
John Hurley | e56185c | 2018-06-25 14:30:05 -0700 | [diff] [blame] | 370 | int (*reoffload)(struct tcf_proto *tp, bool add, |
Pablo Neira Ayuso | a732331 | 2019-07-19 18:20:15 +0200 | [diff] [blame] | 371 | flow_setup_cb_t *cb, void *cb_priv, |
John Hurley | e56185c | 2018-06-25 14:30:05 -0700 | [diff] [blame] | 372 | struct netlink_ext_ack *extack); |
Vlad Buslov | a449a3e | 2019-08-26 16:45:00 +0300 | [diff] [blame] | 373 | void (*hw_add)(struct tcf_proto *tp, |
| 374 | void *type_data); |
| 375 | void (*hw_del)(struct tcf_proto *tp, |
| 376 | void *type_data); |
Cong Wang | 2e24cd7 | 2020-01-23 16:26:18 -0800 | [diff] [blame] | 377 | void (*bind_class)(void *, u32, unsigned long, |
| 378 | void *, unsigned long); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 379 | void * (*tmplt_create)(struct net *net, |
| 380 | struct tcf_chain *chain, |
| 381 | struct nlattr **tca, |
| 382 | struct netlink_ext_ack *extack); |
| 383 | void (*tmplt_destroy)(void *tmplt_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
| 385 | /* rtnetlink specific */ |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 386 | int (*dump)(struct net*, struct tcf_proto*, void *, |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 387 | struct sk_buff *skb, struct tcmsg*, |
| 388 | bool); |
Vlad Buslov | f8ab180 | 2020-05-15 14:40:11 +0300 | [diff] [blame] | 389 | int (*terse_dump)(struct net *net, |
| 390 | struct tcf_proto *tp, void *fh, |
| 391 | struct sk_buff *skb, |
| 392 | struct tcmsg *t, bool rtnl_held); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 393 | int (*tmplt_dump)(struct sk_buff *skb, |
| 394 | struct net *net, |
| 395 | void *tmplt_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | |
| 397 | struct module *owner; |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 398 | int flags; |
| 399 | }; |
| 400 | |
Davide Caratti | a5b72a0 | 2019-12-28 16:36:58 +0100 | [diff] [blame] | 401 | /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags |
| 402 | * are expected to implement tcf_proto_ops->delete_empty(), otherwise race |
| 403 | * conditions can occur when filters are inserted/deleted simultaneously. |
| 404 | */ |
Vlad Buslov | 12db03b | 2019-02-11 10:55:45 +0200 | [diff] [blame] | 405 | enum tcf_proto_ops_flags { |
| 406 | TCF_PROTO_OPS_DOIT_UNLOCKED = 1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | }; |
| 408 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 409 | struct tcf_proto { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | /* Fast access part */ |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 411 | struct tcf_proto __rcu *next; |
| 412 | void __rcu *root; |
Paolo Abeni | 7fd4b28 | 2018-07-30 14:30:43 +0200 | [diff] [blame] | 413 | |
| 414 | /* called under RCU BH lock*/ |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 415 | int (*classify)(struct sk_buff *, |
| 416 | const struct tcf_proto *, |
| 417 | struct tcf_result *); |
Al Viro | 66c6f52 | 2006-11-20 18:07:51 -0800 | [diff] [blame] | 418 | __be16 protocol; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | |
| 420 | /* All the rest */ |
| 421 | u32 prio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | void *data; |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 423 | const struct tcf_proto_ops *ops; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 424 | struct tcf_chain *chain; |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 425 | /* Lock protects tcf_proto shared state and can be used by unlocked |
| 426 | * classifiers to protect their private data. |
| 427 | */ |
| 428 | spinlock_t lock; |
| 429 | bool deleting; |
Vlad Buslov | 4dbfa76 | 2019-02-11 10:55:39 +0200 | [diff] [blame] | 430 | refcount_t refcnt; |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 431 | struct rcu_head rcu; |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 432 | struct hlist_node destroy_ht_node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | }; |
| 434 | |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 435 | struct qdisc_skb_cb { |
Stanislav Fomichev | 089b19a | 2019-04-22 08:55:44 -0700 | [diff] [blame] | 436 | struct { |
| 437 | unsigned int pkt_len; |
| 438 | u16 slave_dev_queue_mapping; |
| 439 | u16 tc_classid; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 440 | }; |
Eric Dumazet | 2571178 | 2014-09-18 08:02:05 -0700 | [diff] [blame] | 441 | #define QDISC_CB_PRIV_LEN 20 |
| 442 | unsigned char data[QDISC_CB_PRIV_LEN]; |
wenxu | 038ebb1 | 2020-07-31 10:45:01 +0800 | [diff] [blame] | 443 | u16 mru; |
wenxu | 7baf242 | 2021-01-19 16:31:50 +0800 | [diff] [blame] | 444 | bool post_ct; |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 445 | }; |
| 446 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 447 | typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); |
| 448 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 449 | struct tcf_chain { |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 450 | /* Protects filter_chain. */ |
| 451 | struct mutex filter_chain_lock; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 452 | struct tcf_proto __rcu *filter_chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 453 | struct list_head list; |
| 454 | struct tcf_block *block; |
| 455 | u32 index; /* chain index */ |
| 456 | unsigned int refcnt; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 457 | unsigned int action_refcnt; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 458 | bool explicitly_created; |
Vlad Buslov | 726d0612 | 2019-02-11 10:55:42 +0200 | [diff] [blame] | 459 | bool flushing; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 460 | const struct tcf_proto_ops *tmplt_ops; |
| 461 | void *tmplt_priv; |
Davide Caratti | ee3bbfe | 2019-03-20 15:00:16 +0100 | [diff] [blame] | 462 | struct rcu_head rcu; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 463 | }; |
| 464 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 465 | struct tcf_block { |
Vlad Buslov | c266f64 | 2019-02-11 10:55:32 +0200 | [diff] [blame] | 466 | /* Lock protects tcf_block and lifetime-management data of chains |
| 467 | * attached to the block (refcnt, action_refcnt, explicitly_created). |
| 468 | */ |
| 469 | struct mutex lock; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 470 | struct list_head chain_list; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 471 | u32 index; /* block index for shared blocks */ |
Cong Wang | a7df487 | 2020-04-30 20:53:49 -0700 | [diff] [blame] | 472 | u32 classid; /* which class this block belongs to */ |
Vlad Buslov | cfebd7e | 2018-09-24 19:22:54 +0300 | [diff] [blame] | 473 | refcount_t refcnt; |
Jiri Pirko | 855319b | 2017-10-13 14:00:58 +0200 | [diff] [blame] | 474 | struct net *net; |
Jiri Pirko | 69d78ef | 2017-10-13 14:00:57 +0200 | [diff] [blame] | 475 | struct Qdisc *q; |
Vlad Buslov | 4f8116c | 2019-08-26 16:44:57 +0300 | [diff] [blame] | 476 | struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ |
Pablo Neira Ayuso | 14bfb13 | 2019-07-19 18:20:16 +0200 | [diff] [blame] | 477 | struct flow_block flow_block; |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 478 | struct list_head owner_list; |
| 479 | bool keep_dst; |
Vlad Buslov | 97394be | 2019-08-26 16:44:58 +0300 | [diff] [blame] | 480 | atomic_t offloadcnt; /* Number of oddloaded filters */ |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 481 | unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ |
Vlad Buslov | c9f1447 | 2019-08-26 16:45:01 +0300 | [diff] [blame] | 482 | unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 483 | struct { |
| 484 | struct tcf_chain *chain; |
| 485 | struct list_head filter_chain_list; |
| 486 | } chain0; |
Vlad Buslov | 0607e43 | 2018-09-24 19:22:57 +0300 | [diff] [blame] | 487 | struct rcu_head rcu; |
John Hurley | 59eb87c | 2019-11-02 14:17:47 +0000 | [diff] [blame] | 488 | DECLARE_HASHTABLE(proto_destroy_ht, 7); |
| 489 | struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 490 | }; |
| 491 | |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 492 | static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) |
| 493 | { |
| 494 | return lockdep_is_held(&chain->filter_chain_lock); |
| 495 | } |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 496 | |
| 497 | static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) |
| 498 | { |
| 499 | return lockdep_is_held(&tp->lock); |
| 500 | } |
Vlad Buslov | ed76f5e | 2019-02-11 10:55:38 +0200 | [diff] [blame] | 501 | |
| 502 | #define tcf_chain_dereference(p, chain) \ |
| 503 | rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) |
| 504 | |
Vlad Buslov | 8b64678 | 2019-02-11 10:55:41 +0200 | [diff] [blame] | 505 | #define tcf_proto_dereference(p, tp) \ |
| 506 | rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) |
| 507 | |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 508 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
| 509 | { |
| 510 | struct qdisc_skb_cb *qcb; |
Eric Dumazet | 5ee31c68 | 2012-06-12 06:03:51 +0000 | [diff] [blame] | 511 | |
wenxu | 038ebb1 | 2020-07-31 10:45:01 +0800 | [diff] [blame] | 512 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 513 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
| 514 | } |
| 515 | |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 516 | static inline int qdisc_qlen_cpu(const struct Qdisc *q) |
| 517 | { |
| 518 | return this_cpu_ptr(q->cpu_qstats)->qlen; |
| 519 | } |
| 520 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 521 | static inline int qdisc_qlen(const struct Qdisc *q) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 522 | { |
| 523 | return q->q.qlen; |
| 524 | } |
| 525 | |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 526 | static inline int qdisc_qlen_sum(const struct Qdisc *q) |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 527 | { |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 528 | __u32 qlen = q->qstats.qlen; |
| 529 | int i; |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 530 | |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 531 | if (qdisc_is_percpu_stats(q)) { |
| 532 | for_each_possible_cpu(i) |
| 533 | qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; |
| 534 | } else { |
Jakub Kicinski | 6172abc | 2018-05-25 21:53:30 -0700 | [diff] [blame] | 535 | qlen += q->q.qlen; |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 536 | } |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 537 | |
| 538 | return qlen; |
| 539 | } |
| 540 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 541 | static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 542 | { |
| 543 | return (struct qdisc_skb_cb *)skb->cb; |
| 544 | } |
| 545 | |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 546 | static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) |
| 547 | { |
| 548 | return &qdisc->q.lock; |
| 549 | } |
| 550 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 551 | static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 552 | { |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 553 | struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); |
| 554 | |
| 555 | return q; |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Eric Dumazet | 159d2c7 | 2019-09-24 13:11:26 -0700 | [diff] [blame] | 558 | static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) |
| 559 | { |
| 560 | return rcu_dereference_bh(qdisc->dev_queue->qdisc); |
| 561 | } |
| 562 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 563 | static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) |
Jarek Poplawski | 2540e05 | 2008-08-21 05:11:14 -0700 | [diff] [blame] | 564 | { |
| 565 | return qdisc->dev_queue->qdisc_sleeping; |
| 566 | } |
| 567 | |
David S. Miller | 7e43f11 | 2008-08-02 23:27:37 -0700 | [diff] [blame] | 568 | /* The qdisc root lock is a mechanism by which to top level |
| 569 | * of a qdisc tree can be locked from any qdisc node in the |
| 570 | * forest. This allows changing the configuration of some |
| 571 | * aspect of the qdisc tree while blocking out asynchronous |
| 572 | * qdisc access in the packet processing paths. |
| 573 | * |
| 574 | * It is only legal to do this when the root will not change |
| 575 | * on us. Otherwise we'll potentially lock the wrong qdisc |
| 576 | * root. This is enforced by holding the RTNL semaphore, which |
| 577 | * all users of this lock accessor must do. |
| 578 | */ |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 579 | static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 580 | { |
| 581 | struct Qdisc *root = qdisc_root(qdisc); |
| 582 | |
David S. Miller | 7e43f11 | 2008-08-02 23:27:37 -0700 | [diff] [blame] | 583 | ASSERT_RTNL(); |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 584 | return qdisc_lock(root); |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 585 | } |
| 586 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 587 | static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) |
Jarek Poplawski | f6f9b93 | 2008-08-27 02:25:17 -0700 | [diff] [blame] | 588 | { |
| 589 | struct Qdisc *root = qdisc_root_sleeping(qdisc); |
| 590 | |
| 591 | ASSERT_RTNL(); |
| 592 | return qdisc_lock(root); |
| 593 | } |
| 594 | |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 595 | static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) |
| 596 | { |
| 597 | struct Qdisc *root = qdisc_root_sleeping(qdisc); |
| 598 | |
| 599 | ASSERT_RTNL(); |
| 600 | return &root->running; |
| 601 | } |
| 602 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 603 | static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 604 | { |
| 605 | return qdisc->dev_queue->dev; |
| 606 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | |
Maxim Mikityanskiy | ca1e4ab | 2021-01-19 14:08:11 +0200 | [diff] [blame] | 608 | static inline void sch_tree_lock(struct Qdisc *q) |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 609 | { |
Maxim Mikityanskiy | ca1e4ab | 2021-01-19 14:08:11 +0200 | [diff] [blame] | 610 | if (q->flags & TCQ_F_MQROOT) |
| 611 | spin_lock_bh(qdisc_lock(q)); |
| 612 | else |
| 613 | spin_lock_bh(qdisc_root_sleeping_lock(q)); |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 614 | } |
| 615 | |
Maxim Mikityanskiy | ca1e4ab | 2021-01-19 14:08:11 +0200 | [diff] [blame] | 616 | static inline void sch_tree_unlock(struct Qdisc *q) |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 617 | { |
Maxim Mikityanskiy | ca1e4ab | 2021-01-19 14:08:11 +0200 | [diff] [blame] | 618 | if (q->flags & TCQ_F_MQROOT) |
| 619 | spin_unlock_bh(qdisc_lock(q)); |
| 620 | else |
| 621 | spin_unlock_bh(qdisc_root_sleeping_lock(q)); |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 622 | } |
| 623 | |
Thomas Graf | e41a33e | 2005-07-05 14:14:30 -0700 | [diff] [blame] | 624 | extern struct Qdisc noop_qdisc; |
| 625 | extern struct Qdisc_ops noop_qdisc_ops; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 626 | extern struct Qdisc_ops pfifo_fast_ops; |
| 627 | extern struct Qdisc_ops mq_qdisc_ops; |
Phil Sutter | d66d6c3 | 2015-08-27 21:21:38 +0200 | [diff] [blame] | 628 | extern struct Qdisc_ops noqueue_qdisc_ops; |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 629 | extern const struct Qdisc_ops *default_qdisc_ops; |
Eric Dumazet | 1f27cde | 2016-03-02 08:21:43 -0800 | [diff] [blame] | 630 | static inline const struct Qdisc_ops * |
| 631 | get_default_qdisc_ops(const struct net_device *dev, int ntx) |
| 632 | { |
| 633 | return ntx < dev->real_num_tx_queues ? |
| 634 | default_qdisc_ops : &pfifo_fast_ops; |
| 635 | } |
Thomas Graf | e41a33e | 2005-07-05 14:14:30 -0700 | [diff] [blame] | 636 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 637 | struct Qdisc_class_common { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 638 | u32 classid; |
| 639 | struct hlist_node hnode; |
| 640 | }; |
| 641 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 642 | struct Qdisc_class_hash { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 643 | struct hlist_head *hash; |
| 644 | unsigned int hashsize; |
| 645 | unsigned int hashmask; |
| 646 | unsigned int hashelems; |
| 647 | }; |
| 648 | |
| 649 | static inline unsigned int qdisc_class_hash(u32 id, u32 mask) |
| 650 | { |
| 651 | id ^= id >> 8; |
| 652 | id ^= id >> 4; |
| 653 | return id & mask; |
| 654 | } |
| 655 | |
| 656 | static inline struct Qdisc_class_common * |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 657 | qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 658 | { |
| 659 | struct Qdisc_class_common *cl; |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 660 | unsigned int h; |
| 661 | |
Gao Feng | 7d3f0cd | 2017-08-18 15:23:24 +0800 | [diff] [blame] | 662 | if (!id) |
| 663 | return NULL; |
| 664 | |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 665 | h = qdisc_class_hash(id, hash->hashmask); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 666 | hlist_for_each_entry(cl, &hash->hash[h], hnode) { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 667 | if (cl->classid == id) |
| 668 | return cl; |
| 669 | } |
| 670 | return NULL; |
| 671 | } |
| 672 | |
Amritha Nambiar | 384c181 | 2017-10-27 02:35:34 -0700 | [diff] [blame] | 673 | static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) |
| 674 | { |
| 675 | u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; |
| 676 | |
| 677 | return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; |
| 678 | } |
| 679 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 680 | int qdisc_class_hash_init(struct Qdisc_class_hash *); |
| 681 | void qdisc_class_hash_insert(struct Qdisc_class_hash *, |
| 682 | struct Qdisc_class_common *); |
| 683 | void qdisc_class_hash_remove(struct Qdisc_class_hash *, |
| 684 | struct Qdisc_class_common *); |
| 685 | void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); |
| 686 | void qdisc_class_hash_destroy(struct Qdisc_class_hash *); |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 687 | |
Cong Wang | 48bfd55 | 2018-01-25 18:26:23 -0800 | [diff] [blame] | 688 | int dev_qdisc_change_tx_queue_len(struct net_device *dev); |
Jakub Kicinski | 1e080f1 | 2021-09-13 15:53:30 -0700 | [diff] [blame] | 689 | void dev_qdisc_change_real_num_tx(struct net_device *dev, |
| 690 | unsigned int new_real_tx); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 691 | void dev_init_scheduler(struct net_device *dev); |
| 692 | void dev_shutdown(struct net_device *dev); |
| 693 | void dev_activate(struct net_device *dev); |
| 694 | void dev_deactivate(struct net_device *dev); |
| 695 | void dev_deactivate_many(struct list_head *head); |
| 696 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
| 697 | struct Qdisc *qdisc); |
| 698 | void qdisc_reset(struct Qdisc *qdisc); |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 699 | void qdisc_put(struct Qdisc *qdisc); |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 700 | void qdisc_put_unlocked(struct Qdisc *qdisc); |
Toke Høiland-Jørgensen | 5f2939d | 2019-01-09 17:10:57 +0100 | [diff] [blame] | 701 | void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); |
Jakub Kicinski | b592843 | 2018-11-07 17:33:34 -0800 | [diff] [blame] | 702 | #ifdef CONFIG_NET_SCHED |
| 703 | int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, |
| 704 | void *type_data); |
Jakub Kicinski | bfaee91 | 2018-11-07 17:33:37 -0800 | [diff] [blame] | 705 | void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, |
| 706 | struct Qdisc *new, struct Qdisc *old, |
| 707 | enum tc_setup_type type, void *type_data, |
| 708 | struct netlink_ext_ack *extack); |
Jakub Kicinski | b592843 | 2018-11-07 17:33:34 -0800 | [diff] [blame] | 709 | #else |
| 710 | static inline int |
| 711 | qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, |
| 712 | void *type_data) |
| 713 | { |
| 714 | q->flags &= ~TCQ_F_OFFLOADED; |
| 715 | return 0; |
| 716 | } |
Jakub Kicinski | bfaee91 | 2018-11-07 17:33:37 -0800 | [diff] [blame] | 717 | |
| 718 | static inline void |
| 719 | qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, |
| 720 | struct Qdisc *new, struct Qdisc *old, |
| 721 | enum tc_setup_type type, void *type_data, |
| 722 | struct netlink_ext_ack *extack) |
| 723 | { |
| 724 | } |
Jakub Kicinski | b592843 | 2018-11-07 17:33:34 -0800 | [diff] [blame] | 725 | #endif |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 726 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
Alexander Aring | d0bd684 | 2017-12-20 12:35:20 -0500 | [diff] [blame] | 727 | const struct Qdisc_ops *ops, |
| 728 | struct netlink_ext_ack *extack); |
Daniel Borkmann | 81d947e | 2018-01-15 23:12:09 +0100 | [diff] [blame] | 729 | void qdisc_free(struct Qdisc *qdisc); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 730 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 731 | const struct Qdisc_ops *ops, u32 parentid, |
| 732 | struct netlink_ext_ack *extack); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 733 | void __qdisc_calculate_pkt_len(struct sk_buff *skb, |
| 734 | const struct qdisc_size_table *stab); |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 735 | int skb_do_redirect(struct sk_buff *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | |
Daniel Borkmann | fdc5432 | 2016-01-07 15:50:22 +0100 | [diff] [blame] | 737 | static inline bool skb_at_tc_ingress(const struct sk_buff *skb) |
| 738 | { |
| 739 | #ifdef CONFIG_NET_CLS_ACT |
Willem de Bruijn | 8dc07fd | 2017-01-07 17:06:37 -0500 | [diff] [blame] | 740 | return skb->tc_at_ingress; |
Daniel Borkmann | fdc5432 | 2016-01-07 15:50:22 +0100 | [diff] [blame] | 741 | #else |
| 742 | return false; |
| 743 | #endif |
| 744 | } |
| 745 | |
Willem de Bruijn | e7246e1 | 2017-01-07 17:06:35 -0500 | [diff] [blame] | 746 | static inline bool skb_skip_tc_classify(struct sk_buff *skb) |
| 747 | { |
| 748 | #ifdef CONFIG_NET_CLS_ACT |
| 749 | if (skb->tc_skip_classify) { |
| 750 | skb->tc_skip_classify = 0; |
| 751 | return true; |
| 752 | } |
| 753 | #endif |
| 754 | return false; |
| 755 | } |
| 756 | |
Gal Pressman | 3a053b1 | 2018-02-28 15:59:15 +0200 | [diff] [blame] | 757 | /* Reset all TX qdiscs greater than index of a device. */ |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 758 | static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) |
David S. Miller | 5aa7099 | 2008-07-08 22:59:10 -0700 | [diff] [blame] | 759 | { |
John Fastabend | 4ef6acf | 2010-07-01 13:21:35 +0000 | [diff] [blame] | 760 | struct Qdisc *qdisc; |
| 761 | |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 762 | for (; i < dev->num_tx_queues; i++) { |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 763 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
John Fastabend | 4ef6acf | 2010-07-01 13:21:35 +0000 | [diff] [blame] | 764 | if (qdisc) { |
| 765 | spin_lock_bh(qdisc_lock(qdisc)); |
| 766 | qdisc_reset(qdisc); |
| 767 | spin_unlock_bh(qdisc_lock(qdisc)); |
| 768 | } |
| 769 | } |
David S. Miller | 5aa7099 | 2008-07-08 22:59:10 -0700 | [diff] [blame] | 770 | } |
| 771 | |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 772 | /* Are all TX queues of the device empty? */ |
| 773 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) |
| 774 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 775 | unsigned int i; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 776 | |
| 777 | rcu_read_lock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 778 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 779 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 780 | const struct Qdisc *q = rcu_dereference(txq->qdisc); |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 781 | |
Paolo Abeni | 1f5e6fd | 2019-04-10 14:32:38 +0200 | [diff] [blame] | 782 | if (!qdisc_is_empty(q)) { |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 783 | rcu_read_unlock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 784 | return false; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 785 | } |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 786 | } |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 787 | rcu_read_unlock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 788 | return true; |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 789 | } |
| 790 | |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 791 | /* Are any of the TX qdiscs changing? */ |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 792 | static inline bool qdisc_tx_changing(const struct net_device *dev) |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 793 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 794 | unsigned int i; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 795 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 796 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 797 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 798 | if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 799 | return true; |
| 800 | } |
| 801 | return false; |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 802 | } |
| 803 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 804 | /* Is the device using the noop qdisc on all queues? */ |
David S. Miller | 0529794 | 2008-07-08 23:01:27 -0700 | [diff] [blame] | 805 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) |
| 806 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 807 | unsigned int i; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 808 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 809 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 810 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 811 | if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 812 | return false; |
| 813 | } |
| 814 | return true; |
David S. Miller | 0529794 | 2008-07-08 23:01:27 -0700 | [diff] [blame] | 815 | } |
| 816 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 817 | static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 818 | { |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 819 | return qdisc_skb_cb(skb)->pkt_len; |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 820 | } |
| 821 | |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 822 | /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 823 | enum net_xmit_qdisc_t { |
| 824 | __NET_XMIT_STOLEN = 0x00010000, |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 825 | __NET_XMIT_BYPASS = 0x00020000, |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 826 | }; |
| 827 | |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 828 | #ifdef CONFIG_NET_CLS_ACT |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 829 | #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 830 | #else |
| 831 | #define net_xmit_drop_count(e) (1) |
| 832 | #endif |
| 833 | |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 834 | static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, |
| 835 | const struct Qdisc *sch) |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 836 | { |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 837 | #ifdef CONFIG_NET_SCHED |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 838 | struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); |
| 839 | |
| 840 | if (stab) |
| 841 | __qdisc_calculate_pkt_len(skb, stab); |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 842 | #endif |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 843 | } |
| 844 | |
Petr Machata | ac5c66f | 2020-07-14 20:03:08 +0300 | [diff] [blame] | 845 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 846 | struct sk_buff **to_free) |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 847 | { |
| 848 | qdisc_calculate_pkt_len(skb, sch); |
Petr Machata | ac5c66f | 2020-07-14 20:03:08 +0300 | [diff] [blame] | 849 | return sch->enqueue(skb, sch, to_free); |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 850 | } |
| 851 | |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 852 | static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 853 | __u64 bytes, __u32 packets) |
| 854 | { |
Ahmed S. Darwish | 67c9e6270 | 2021-10-16 10:49:07 +0200 | [diff] [blame] | 855 | u64_stats_update_begin(&bstats->syncp); |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 856 | u64_stats_add(&bstats->bytes, bytes); |
| 857 | u64_stats_add(&bstats->packets, packets); |
Ahmed S. Darwish | 67c9e6270 | 2021-10-16 10:49:07 +0200 | [diff] [blame] | 858 | u64_stats_update_end(&bstats->syncp); |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 859 | } |
| 860 | |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 861 | static inline void bstats_update(struct gnet_stats_basic_sync *bstats, |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 862 | const struct sk_buff *skb) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 863 | { |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 864 | _bstats_update(bstats, |
| 865 | qdisc_pkt_len(skb), |
| 866 | skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); |
| 867 | } |
| 868 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 869 | static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, |
| 870 | const struct sk_buff *skb) |
| 871 | { |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 872 | bstats_update(this_cpu_ptr(sch->cpu_bstats), skb); |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 873 | } |
| 874 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 875 | static inline void qdisc_bstats_update(struct Qdisc *sch, |
| 876 | const struct sk_buff *skb) |
| 877 | { |
| 878 | bstats_update(&sch->bstats, skb); |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 879 | } |
| 880 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 881 | static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, |
| 882 | const struct sk_buff *skb) |
| 883 | { |
| 884 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
| 885 | } |
| 886 | |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 887 | static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, |
| 888 | const struct sk_buff *skb) |
| 889 | { |
| 890 | this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); |
| 891 | } |
| 892 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 893 | static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, |
| 894 | const struct sk_buff *skb) |
| 895 | { |
| 896 | sch->qstats.backlog += qdisc_pkt_len(skb); |
| 897 | } |
| 898 | |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 899 | static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, |
| 900 | const struct sk_buff *skb) |
| 901 | { |
| 902 | this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); |
| 903 | } |
| 904 | |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 905 | static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 906 | { |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 907 | this_cpu_inc(sch->cpu_qstats->qlen); |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 908 | } |
| 909 | |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 910 | static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 911 | { |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 912 | this_cpu_dec(sch->cpu_qstats->qlen); |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 913 | } |
| 914 | |
| 915 | static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) |
| 916 | { |
| 917 | this_cpu_inc(sch->cpu_qstats->requeues); |
| 918 | } |
| 919 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 920 | static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) |
| 921 | { |
| 922 | sch->qstats.drops += count; |
| 923 | } |
| 924 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 925 | static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 926 | { |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 927 | qstats->drops++; |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 928 | } |
| 929 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 930 | static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 931 | { |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 932 | qstats->overlimits++; |
| 933 | } |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 934 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 935 | static inline void qdisc_qstats_drop(struct Qdisc *sch) |
| 936 | { |
| 937 | qstats_drop_inc(&sch->qstats); |
| 938 | } |
| 939 | |
| 940 | static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) |
| 941 | { |
Eric Dumazet | eb60a8d | 2016-08-24 10:23:34 -0700 | [diff] [blame] | 942 | this_cpu_inc(sch->cpu_qstats->drops); |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 943 | } |
| 944 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 945 | static inline void qdisc_qstats_overlimit(struct Qdisc *sch) |
| 946 | { |
| 947 | sch->qstats.overlimits++; |
| 948 | } |
| 949 | |
Paolo Abeni | 5dd431b | 2019-03-28 16:53:12 +0100 | [diff] [blame] | 950 | static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) |
| 951 | { |
| 952 | __u32 qlen = qdisc_qlen_sum(sch); |
| 953 | |
| 954 | return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); |
| 955 | } |
| 956 | |
| 957 | static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, |
| 958 | __u32 *backlog) |
| 959 | { |
| 960 | struct gnet_stats_queue qstats = { 0 }; |
Paolo Abeni | 5dd431b | 2019-03-28 16:53:12 +0100 | [diff] [blame] | 961 | |
Sebastian Andrzej Siewior | 10940eb | 2021-10-16 10:49:05 +0200 | [diff] [blame] | 962 | gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); |
| 963 | *qlen = qstats.qlen + qdisc_qlen(sch); |
Paolo Abeni | 5dd431b | 2019-03-28 16:53:12 +0100 | [diff] [blame] | 964 | *backlog = qstats.backlog; |
| 965 | } |
| 966 | |
Paolo Abeni | e5f0e8f | 2019-03-28 16:53:13 +0100 | [diff] [blame] | 967 | static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) |
| 968 | { |
| 969 | __u32 qlen, backlog; |
| 970 | |
| 971 | qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); |
| 972 | qdisc_tree_reduce_backlog(sch, qlen, backlog); |
| 973 | } |
| 974 | |
| 975 | static inline void qdisc_purge_queue(struct Qdisc *sch) |
| 976 | { |
| 977 | __u32 qlen, backlog; |
| 978 | |
| 979 | qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); |
| 980 | qdisc_reset(sch); |
| 981 | qdisc_tree_reduce_backlog(sch, qlen, backlog); |
| 982 | } |
| 983 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 984 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 985 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 986 | qh->head = NULL; |
| 987 | qh->tail = NULL; |
| 988 | qh->qlen = 0; |
| 989 | } |
| 990 | |
David S. Miller | aea890b | 2018-07-29 16:22:13 -0700 | [diff] [blame] | 991 | static inline void __qdisc_enqueue_tail(struct sk_buff *skb, |
| 992 | struct qdisc_skb_head *qh) |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 993 | { |
| 994 | struct sk_buff *last = qh->tail; |
| 995 | |
| 996 | if (last) { |
| 997 | skb->next = NULL; |
| 998 | last->next = skb; |
| 999 | qh->tail = skb; |
| 1000 | } else { |
| 1001 | qh->tail = skb; |
| 1002 | qh->head = skb; |
| 1003 | } |
| 1004 | qh->qlen++; |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1005 | } |
| 1006 | |
| 1007 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) |
| 1008 | { |
David S. Miller | aea890b | 2018-07-29 16:22:13 -0700 | [diff] [blame] | 1009 | __qdisc_enqueue_tail(skb, &sch->q); |
| 1010 | qdisc_qstats_backlog_inc(sch, skb); |
| 1011 | return NET_XMIT_SUCCESS; |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1012 | } |
| 1013 | |
David S. Miller | 5969773 | 2018-07-29 16:33:28 -0700 | [diff] [blame] | 1014 | static inline void __qdisc_enqueue_head(struct sk_buff *skb, |
| 1015 | struct qdisc_skb_head *qh) |
| 1016 | { |
| 1017 | skb->next = qh->head; |
| 1018 | |
| 1019 | if (!qh->head) |
| 1020 | qh->tail = skb; |
| 1021 | qh->head = skb; |
| 1022 | qh->qlen++; |
| 1023 | } |
| 1024 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1025 | static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1026 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1027 | struct sk_buff *skb = qh->head; |
| 1028 | |
| 1029 | if (likely(skb != NULL)) { |
| 1030 | qh->head = skb->next; |
| 1031 | qh->qlen--; |
| 1032 | if (qh->head == NULL) |
| 1033 | qh->tail = NULL; |
| 1034 | skb->next = NULL; |
| 1035 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1036 | |
Florian Westphal | ec32336 | 2016-09-18 00:57:32 +0200 | [diff] [blame] | 1037 | return skb; |
| 1038 | } |
| 1039 | |
| 1040 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) |
| 1041 | { |
| 1042 | struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); |
| 1043 | |
Eric Dumazet | 9190b3b | 2011-01-20 23:31:33 -0800 | [diff] [blame] | 1044 | if (likely(skb != NULL)) { |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 1045 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | 9190b3b | 2011-01-20 23:31:33 -0800 | [diff] [blame] | 1046 | qdisc_bstats_update(sch, skb); |
| 1047 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1048 | |
| 1049 | return skb; |
| 1050 | } |
| 1051 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1052 | /* Instead of calling kfree_skb() while root qdisc lock is held, |
| 1053 | * queue the skb for future freeing at end of __dev_xmit_skb() |
| 1054 | */ |
| 1055 | static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) |
| 1056 | { |
| 1057 | skb->next = *to_free; |
| 1058 | *to_free = skb; |
| 1059 | } |
| 1060 | |
Alexey Kodanev | 35d889d | 2018-03-05 20:52:54 +0300 | [diff] [blame] | 1061 | static inline void __qdisc_drop_all(struct sk_buff *skb, |
| 1062 | struct sk_buff **to_free) |
| 1063 | { |
| 1064 | if (skb->prev) |
| 1065 | skb->prev->next = *to_free; |
| 1066 | else |
| 1067 | skb->next = *to_free; |
| 1068 | *to_free = skb; |
| 1069 | } |
| 1070 | |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 1071 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1072 | struct qdisc_skb_head *qh, |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1073 | struct sk_buff **to_free) |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 1074 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1075 | struct sk_buff *skb = __qdisc_dequeue_head(qh); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 1076 | |
| 1077 | if (likely(skb != NULL)) { |
| 1078 | unsigned int len = qdisc_pkt_len(skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1079 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 1080 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1081 | __qdisc_drop(skb, to_free); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 1082 | return len; |
| 1083 | } |
| 1084 | |
| 1085 | return 0; |
| 1086 | } |
| 1087 | |
Patrick McHardy | 48a8f51 | 2008-10-31 00:44:18 -0700 | [diff] [blame] | 1088 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) |
| 1089 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1090 | const struct qdisc_skb_head *qh = &sch->q; |
| 1091 | |
| 1092 | return qh->head; |
Patrick McHardy | 48a8f51 | 2008-10-31 00:44:18 -0700 | [diff] [blame] | 1093 | } |
| 1094 | |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1095 | /* generic pseudo peek method for non-work-conserving qdisc */ |
| 1096 | static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) |
| 1097 | { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1098 | struct sk_buff *skb = skb_peek(&sch->gso_skb); |
| 1099 | |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1100 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1101 | if (!skb) { |
| 1102 | skb = sch->dequeue(sch); |
| 1103 | |
| 1104 | if (skb) { |
| 1105 | __skb_queue_head(&sch->gso_skb, skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 1106 | /* it's still part of the queue */ |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1107 | qdisc_qstats_backlog_inc(sch, skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 1108 | sch->q.qlen++; |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 1109 | } |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 1110 | } |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1111 | |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1112 | return skb; |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1113 | } |
| 1114 | |
Paolo Abeni | 8a53e61 | 2019-04-10 14:32:40 +0200 | [diff] [blame] | 1115 | static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, |
| 1116 | struct sk_buff *skb) |
| 1117 | { |
| 1118 | if (qdisc_is_percpu_stats(sch)) { |
| 1119 | qdisc_qstats_cpu_backlog_dec(sch, skb); |
| 1120 | qdisc_bstats_cpu_update(sch, skb); |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 1121 | qdisc_qstats_cpu_qlen_dec(sch); |
Paolo Abeni | 8a53e61 | 2019-04-10 14:32:40 +0200 | [diff] [blame] | 1122 | } else { |
| 1123 | qdisc_qstats_backlog_dec(sch, skb); |
| 1124 | qdisc_bstats_update(sch, skb); |
| 1125 | sch->q.qlen--; |
| 1126 | } |
| 1127 | } |
| 1128 | |
| 1129 | static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, |
| 1130 | unsigned int pkt_len) |
| 1131 | { |
| 1132 | if (qdisc_is_percpu_stats(sch)) { |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 1133 | qdisc_qstats_cpu_qlen_inc(sch); |
Paolo Abeni | 8a53e61 | 2019-04-10 14:32:40 +0200 | [diff] [blame] | 1134 | this_cpu_add(sch->cpu_qstats->backlog, pkt_len); |
| 1135 | } else { |
| 1136 | sch->qstats.backlog += pkt_len; |
| 1137 | sch->q.qlen++; |
| 1138 | } |
| 1139 | } |
| 1140 | |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1141 | /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ |
| 1142 | static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) |
| 1143 | { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1144 | struct sk_buff *skb = skb_peek(&sch->gso_skb); |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1145 | |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 1146 | if (skb) { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 1147 | skb = __skb_dequeue(&sch->gso_skb); |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 1148 | if (qdisc_is_percpu_stats(sch)) { |
| 1149 | qdisc_qstats_cpu_backlog_dec(sch, skb); |
Paolo Abeni | 73eb628 | 2019-04-10 14:32:41 +0200 | [diff] [blame] | 1150 | qdisc_qstats_cpu_qlen_dec(sch); |
Paolo Abeni | 9c01c9f | 2019-04-10 14:32:39 +0200 | [diff] [blame] | 1151 | } else { |
| 1152 | qdisc_qstats_backlog_dec(sch, skb); |
| 1153 | sch->q.qlen--; |
| 1154 | } |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 1155 | } else { |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1156 | skb = sch->dequeue(sch); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 1157 | } |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1158 | |
| 1159 | return skb; |
| 1160 | } |
| 1161 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1162 | static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1163 | { |
| 1164 | /* |
| 1165 | * We do not know the backlog in bytes of this list, it |
| 1166 | * is up to the caller to correct it |
| 1167 | */ |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 1168 | ASSERT_RTNL(); |
| 1169 | if (qh->qlen) { |
| 1170 | rtnl_kfree_skbs(qh->head, qh->tail); |
| 1171 | |
| 1172 | qh->head = NULL; |
| 1173 | qh->tail = NULL; |
| 1174 | qh->qlen = 0; |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 1175 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1176 | } |
| 1177 | |
| 1178 | static inline void qdisc_reset_queue(struct Qdisc *sch) |
| 1179 | { |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 1180 | __qdisc_reset_queue(&sch->q); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1181 | sch->qstats.backlog = 0; |
| 1182 | } |
| 1183 | |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 1184 | static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, |
| 1185 | struct Qdisc **pold) |
| 1186 | { |
| 1187 | struct Qdisc *old; |
| 1188 | |
| 1189 | sch_tree_lock(sch); |
| 1190 | old = *pold; |
| 1191 | *pold = new; |
Paolo Abeni | e5f0e8f | 2019-03-28 16:53:13 +0100 | [diff] [blame] | 1192 | if (old != NULL) |
Alexander Ovechkin | 938e0fc | 2021-02-01 23:00:49 +0300 | [diff] [blame] | 1193 | qdisc_purge_queue(old); |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 1194 | sch_tree_unlock(sch); |
| 1195 | |
| 1196 | return old; |
| 1197 | } |
| 1198 | |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 1199 | static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) |
| 1200 | { |
| 1201 | rtnl_kfree_skbs(skb, skb); |
| 1202 | qdisc_qstats_drop(sch); |
| 1203 | } |
| 1204 | |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 1205 | static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, |
| 1206 | struct sk_buff **to_free) |
| 1207 | { |
| 1208 | __qdisc_drop(skb, to_free); |
| 1209 | qdisc_qstats_cpu_drop(sch); |
| 1210 | |
| 1211 | return NET_XMIT_DROP; |
| 1212 | } |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1213 | |
| 1214 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, |
| 1215 | struct sk_buff **to_free) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1216 | { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1217 | __qdisc_drop(skb, to_free); |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 1218 | qdisc_qstats_drop(sch); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1219 | |
| 1220 | return NET_XMIT_DROP; |
| 1221 | } |
| 1222 | |
Alexey Kodanev | 35d889d | 2018-03-05 20:52:54 +0300 | [diff] [blame] | 1223 | static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, |
| 1224 | struct sk_buff **to_free) |
| 1225 | { |
| 1226 | __qdisc_drop_all(skb, to_free); |
| 1227 | qdisc_qstats_drop(sch); |
| 1228 | |
| 1229 | return NET_XMIT_DROP; |
| 1230 | } |
| 1231 | |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 1232 | /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how |
| 1233 | long it will take to send a packet given its size. |
| 1234 | */ |
| 1235 | static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) |
| 1236 | { |
Jesper Dangaard Brouer | e08b099 | 2007-09-12 16:36:28 +0200 | [diff] [blame] | 1237 | int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; |
| 1238 | if (slot < 0) |
| 1239 | slot = 0; |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 1240 | slot >>= rtab->rate.cell_log; |
| 1241 | if (slot > 255) |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 1242 | return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 1243 | return rtab->data[slot]; |
| 1244 | } |
| 1245 | |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1246 | struct psched_ratecfg { |
Eric Dumazet | 130d3d6 | 2013-06-06 13:56:19 -0700 | [diff] [blame] | 1247 | u64 rate_bytes_ps; /* bytes per second */ |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1248 | u32 mult; |
| 1249 | u16 overhead; |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1250 | u8 linklayer; |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1251 | u8 shift; |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1252 | }; |
| 1253 | |
| 1254 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, |
| 1255 | unsigned int len) |
| 1256 | { |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1257 | len += r->overhead; |
| 1258 | |
| 1259 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) |
| 1260 | return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; |
| 1261 | |
| 1262 | return ((u64)len * r->mult) >> r->shift; |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1263 | } |
| 1264 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 1265 | void psched_ratecfg_precompute(struct psched_ratecfg *r, |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 1266 | const struct tc_ratespec *conf, |
| 1267 | u64 rate64); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1268 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1269 | static inline void psched_ratecfg_getrate(struct tc_ratespec *res, |
| 1270 | const struct psched_ratecfg *r) |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1271 | { |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1272 | memset(res, 0, sizeof(*res)); |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 1273 | |
| 1274 | /* legacy struct tc_ratespec has a 32bit @rate field |
| 1275 | * Qdisc using 64bit rate should add new attributes |
| 1276 | * in order to maintain compatibility. |
| 1277 | */ |
| 1278 | res->rate = min_t(u64, r->rate_bytes_ps, ~0U); |
| 1279 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1280 | res->overhead = r->overhead; |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1281 | res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1282 | } |
| 1283 | |
Baowen Zheng | 2ffe039 | 2021-03-12 15:08:31 +0100 | [diff] [blame] | 1284 | struct psched_pktrate { |
| 1285 | u64 rate_pkts_ps; /* packets per second */ |
| 1286 | u32 mult; |
| 1287 | u8 shift; |
| 1288 | }; |
| 1289 | |
| 1290 | static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, |
| 1291 | unsigned int pkt_num) |
| 1292 | { |
| 1293 | return ((u64)pkt_num * r->mult) >> r->shift; |
| 1294 | } |
| 1295 | |
| 1296 | void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); |
| 1297 | |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1298 | /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. |
| 1299 | * The fast path only needs to access filter list and to update stats |
| 1300 | */ |
| 1301 | struct mini_Qdisc { |
| 1302 | struct tcf_proto *filter_list; |
Paul Blakey | 7d17c54 | 2020-02-16 12:01:22 +0200 | [diff] [blame] | 1303 | struct tcf_block *block; |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 1304 | struct gnet_stats_basic_sync __percpu *cpu_bstats; |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1305 | struct gnet_stats_queue __percpu *cpu_qstats; |
| 1306 | struct rcu_head rcu; |
| 1307 | }; |
| 1308 | |
| 1309 | static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, |
| 1310 | const struct sk_buff *skb) |
| 1311 | { |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame^] | 1312 | bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb); |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1313 | } |
| 1314 | |
| 1315 | static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) |
| 1316 | { |
| 1317 | this_cpu_inc(miniq->cpu_qstats->drops); |
| 1318 | } |
| 1319 | |
| 1320 | struct mini_Qdisc_pair { |
| 1321 | struct mini_Qdisc miniq1; |
| 1322 | struct mini_Qdisc miniq2; |
| 1323 | struct mini_Qdisc __rcu **p_miniq; |
| 1324 | }; |
| 1325 | |
| 1326 | void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, |
| 1327 | struct tcf_proto *tp_head); |
| 1328 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, |
| 1329 | struct mini_Qdisc __rcu **p_miniq); |
Paul Blakey | 7d17c54 | 2020-02-16 12:01:22 +0200 | [diff] [blame] | 1330 | void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, |
| 1331 | struct tcf_block *block); |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1332 | |
Jakub Kicinski | f7116fb | 2021-09-17 06:55:06 -0700 | [diff] [blame] | 1333 | void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); |
| 1334 | |
wenxu | c129412 | 2020-11-25 12:01:23 +0800 | [diff] [blame] | 1335 | int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); |
Paolo Abeni | cd11b164 | 2018-07-30 14:30:44 +0200 | [diff] [blame] | 1336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | #endif |