Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __NET_SCHED_GENERIC_H |
| 3 | #define __NET_SCHED_GENERIC_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/netdevice.h> |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/pkt_sched.h> |
| 9 | #include <linux/pkt_cls.h> |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 10 | #include <linux/percpu.h> |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 11 | #include <linux/dynamic_queue_limits.h> |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 12 | #include <linux/list.h> |
Reshetova, Elena | 7b93640 | 2017-07-04 15:53:07 +0300 | [diff] [blame] | 13 | #include <linux/refcount.h> |
Cong Wang | 7aa0045 | 2017-10-26 18:24:28 -0700 | [diff] [blame] | 14 | #include <linux/workqueue.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <net/gen_stats.h> |
Thomas Graf | be577dd | 2007-03-22 11:55:50 -0700 | [diff] [blame] | 16 | #include <net/rtnetlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
| 18 | struct Qdisc_ops; |
| 19 | struct qdisc_walker; |
| 20 | struct tcf_walker; |
| 21 | struct module; |
| 22 | |
John Hurley | e56185c | 2018-06-25 14:30:05 -0700 | [diff] [blame] | 23 | typedef int tc_setup_cb_t(enum tc_setup_type type, |
| 24 | void *type_data, void *cb_priv); |
| 25 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 26 | struct qdisc_rate_table { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | struct tc_ratespec rate; |
| 28 | u32 data[256]; |
| 29 | struct qdisc_rate_table *next; |
| 30 | int refcnt; |
| 31 | }; |
| 32 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 33 | enum qdisc_state_t { |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 34 | __QDISC_STATE_SCHED, |
David S. Miller | a9312ae | 2008-08-17 21:51:03 -0700 | [diff] [blame] | 35 | __QDISC_STATE_DEACTIVATED, |
David S. Miller | e2627c8 | 2008-07-16 00:56:32 -0700 | [diff] [blame] | 36 | }; |
| 37 | |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 38 | struct qdisc_size_table { |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 39 | struct rcu_head rcu; |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 40 | struct list_head list; |
| 41 | struct tc_sizespec szopts; |
| 42 | int refcnt; |
| 43 | u16 data[]; |
| 44 | }; |
| 45 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 46 | /* similar to sk_buff_head, but skb->prev pointer is undefined. */ |
| 47 | struct qdisc_skb_head { |
| 48 | struct sk_buff *head; |
| 49 | struct sk_buff *tail; |
| 50 | __u32 qlen; |
| 51 | spinlock_t lock; |
| 52 | }; |
| 53 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 54 | struct Qdisc { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 55 | int (*enqueue)(struct sk_buff *skb, |
| 56 | struct Qdisc *sch, |
| 57 | struct sk_buff **to_free); |
| 58 | struct sk_buff * (*dequeue)(struct Qdisc *sch); |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 59 | unsigned int flags; |
Jarek Poplawski | b00355d | 2009-02-01 01:12:42 -0800 | [diff] [blame] | 60 | #define TCQ_F_BUILTIN 1 |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 61 | #define TCQ_F_INGRESS 2 |
| 62 | #define TCQ_F_CAN_BYPASS 4 |
| 63 | #define TCQ_F_MQROOT 8 |
Eric Dumazet | 1abbe13 | 2012-12-11 15:54:33 +0000 | [diff] [blame] | 64 | #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for |
| 65 | * q->dev_queue : It can test |
| 66 | * netif_xmit_frozen_or_stopped() before |
| 67 | * dequeueing next packet. |
| 68 | * Its true for MQ/MQPRIO slaves, or non |
| 69 | * multiqueue device. |
| 70 | */ |
Jarek Poplawski | b00355d | 2009-02-01 01:12:42 -0800 | [diff] [blame] | 71 | #define TCQ_F_WARN_NONWC (1 << 16) |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 72 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ |
Eric Dumazet | 4eaf3b8 | 2015-12-01 20:08:51 -0800 | [diff] [blame] | 73 | #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : |
| 74 | * qdisc_tree_decrease_qlen() should stop. |
| 75 | */ |
Jiri Kosina | 49b4997 | 2017-03-08 16:03:32 +0100 | [diff] [blame] | 76 | #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ |
John Fastabend | 6b3ba91 | 2017-12-07 09:54:25 -0800 | [diff] [blame] | 77 | #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ |
Yuval Mintz | 7a4fa29 | 2017-12-14 15:54:29 +0200 | [diff] [blame] | 78 | #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 79 | u32 limit; |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 80 | const struct Qdisc_ops *ops; |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 81 | struct qdisc_size_table __rcu *stab; |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 82 | struct hlist_node hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | u32 handle; |
| 84 | u32 parent; |
David S. Miller | 72b25a9 | 2008-07-18 20:54:17 -0700 | [diff] [blame] | 85 | |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 86 | struct netdev_queue *dev_queue; |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 87 | |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 88 | struct net_rate_estimator __rcu *rate_est; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 89 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
| 90 | struct gnet_stats_queue __percpu *cpu_qstats; |
Paolo Abeni | e9be0e9 | 2018-05-25 16:28:44 +0200 | [diff] [blame] | 91 | int padded; |
| 92 | refcount_t refcnt; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 93 | |
Eric Dumazet | 5e140df | 2009-03-20 01:33:32 -0700 | [diff] [blame] | 94 | /* |
| 95 | * For performance sake on SMP, we put highly modified fields at the end |
| 96 | */ |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 97 | struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 98 | struct qdisc_skb_head q; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 99 | struct gnet_stats_basic_packed bstats; |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 100 | seqcount_t running; |
Eric Dumazet | 0d32ef8 | 2015-01-29 17:30:12 -0800 | [diff] [blame] | 101 | struct gnet_stats_queue qstats; |
Eric Dumazet | 4d202a0 | 2016-06-21 23:16:52 -0700 | [diff] [blame] | 102 | unsigned long state; |
| 103 | struct Qdisc *next_sched; |
John Fastabend | 70e57d5 | 2017-12-07 09:56:23 -0800 | [diff] [blame] | 104 | struct sk_buff_head skb_bad_txq; |
Eric Dumazet | 45203a3 | 2013-06-06 08:43:22 -0700 | [diff] [blame] | 105 | |
| 106 | spinlock_t busylock ____cacheline_aligned_in_smp; |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 107 | spinlock_t seqlock; |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame^] | 108 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | }; |
| 110 | |
Eric Dumazet | 551143d | 2017-08-24 21:12:28 -0700 | [diff] [blame] | 111 | static inline void qdisc_refcount_inc(struct Qdisc *qdisc) |
| 112 | { |
| 113 | if (qdisc->flags & TCQ_F_BUILTIN) |
| 114 | return; |
| 115 | refcount_inc(&qdisc->refcnt); |
| 116 | } |
| 117 | |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 118 | static inline bool qdisc_is_running(struct Qdisc *qdisc) |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 119 | { |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 120 | if (qdisc->flags & TCQ_F_NOLOCK) |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 121 | return spin_is_locked(&qdisc->seqlock); |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 122 | return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) |
| 126 | { |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 127 | if (qdisc->flags & TCQ_F_NOLOCK) { |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 128 | if (!spin_trylock(&qdisc->seqlock)) |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 129 | return false; |
| 130 | } else if (qdisc_is_running(qdisc)) { |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 131 | return false; |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 132 | } |
Eric Dumazet | 52fbb29 | 2016-06-09 07:45:11 -0700 | [diff] [blame] | 133 | /* Variant of write_seqcount_begin() telling lockdep a trylock |
| 134 | * was attempted. |
| 135 | */ |
| 136 | raw_write_seqcount_begin(&qdisc->running); |
| 137 | seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 138 | return true; |
Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | static inline void qdisc_run_end(struct Qdisc *qdisc) |
| 142 | { |
Eric Dumazet | f9eb8ae | 2016-06-06 09:37:15 -0700 | [diff] [blame] | 143 | write_seqcount_end(&qdisc->running); |
Paolo Abeni | 32f7b44 | 2018-05-15 10:50:31 +0200 | [diff] [blame] | 144 | if (qdisc->flags & TCQ_F_NOLOCK) |
Paolo Abeni | 96009c7 | 2018-05-15 16:24:36 +0200 | [diff] [blame] | 145 | spin_unlock(&qdisc->seqlock); |
Eric Dumazet | fd245a4 | 2011-01-20 05:27:16 +0000 | [diff] [blame] | 146 | } |
| 147 | |
Jesper Dangaard Brouer | 5772e9a | 2014-10-01 22:35:59 +0200 | [diff] [blame] | 148 | static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) |
| 149 | { |
| 150 | return qdisc->flags & TCQ_F_ONETXQUEUE; |
| 151 | } |
| 152 | |
| 153 | static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) |
| 154 | { |
| 155 | #ifdef CONFIG_BQL |
| 156 | /* Non-BQL migrated drivers will return 0, too. */ |
| 157 | return dql_avail(&txq->dql); |
| 158 | #else |
| 159 | return 0; |
| 160 | #endif |
| 161 | } |
| 162 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 163 | struct Qdisc_class_ops { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | /* Child qdisc manipulation */ |
Jarek Poplawski | 926e61b | 2009-09-15 02:53:07 -0700 | [diff] [blame] | 165 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | int (*graft)(struct Qdisc *, unsigned long cl, |
Alexander Aring | 653d6fd | 2017-12-20 12:35:17 -0500 | [diff] [blame] | 167 | struct Qdisc *, struct Qdisc **, |
| 168 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); |
Patrick McHardy | 43effa1 | 2006-11-29 17:35:48 -0800 | [diff] [blame] | 170 | void (*qlen_notify)(struct Qdisc *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
| 172 | /* Class manipulation routines */ |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 173 | unsigned long (*find)(struct Qdisc *, u32 classid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | int (*change)(struct Qdisc *, u32, u32, |
Alexander Aring | 793d81d | 2017-12-20 12:35:15 -0500 | [diff] [blame] | 175 | struct nlattr **, unsigned long *, |
| 176 | struct netlink_ext_ack *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | int (*delete)(struct Qdisc *, unsigned long); |
| 178 | void (*walk)(struct Qdisc *, struct qdisc_walker * arg); |
| 179 | |
| 180 | /* Filter manipulation */ |
Alexander Aring | 0ac4bd6 | 2017-12-04 18:39:59 -0500 | [diff] [blame] | 181 | struct tcf_block * (*tcf_block)(struct Qdisc *sch, |
Alexander Aring | cbaacc4 | 2017-12-20 12:35:16 -0500 | [diff] [blame] | 182 | unsigned long arg, |
| 183 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
| 185 | u32 classid); |
| 186 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
| 187 | |
| 188 | /* rtnetlink specific */ |
| 189 | int (*dump)(struct Qdisc *, unsigned long, |
| 190 | struct sk_buff *skb, struct tcmsg*); |
| 191 | int (*dump_stats)(struct Qdisc *, unsigned long, |
| 192 | struct gnet_dump *); |
| 193 | }; |
| 194 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 195 | struct Qdisc_ops { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | struct Qdisc_ops *next; |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 197 | const struct Qdisc_class_ops *cl_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | char id[IFNAMSIZ]; |
| 199 | int priv_size; |
John Fastabend | d59f5ff | 2017-12-07 09:55:26 -0800 | [diff] [blame] | 200 | unsigned int static_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 202 | int (*enqueue)(struct sk_buff *skb, |
| 203 | struct Qdisc *sch, |
| 204 | struct sk_buff **to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | struct sk_buff * (*dequeue)(struct Qdisc *); |
Jarek Poplawski | 90d841fd | 2008-10-31 00:43:45 -0700 | [diff] [blame] | 206 | struct sk_buff * (*peek)(struct Qdisc *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
Alexander Aring | e63d7df | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 208 | int (*init)(struct Qdisc *sch, struct nlattr *arg, |
| 209 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | void (*reset)(struct Qdisc *); |
| 211 | void (*destroy)(struct Qdisc *); |
Alexander Aring | 0ac4bd6 | 2017-12-04 18:39:59 -0500 | [diff] [blame] | 212 | int (*change)(struct Qdisc *sch, |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 213 | struct nlattr *arg, |
| 214 | struct netlink_ext_ack *extack); |
Alexander Aring | 0ac4bd6 | 2017-12-04 18:39:59 -0500 | [diff] [blame] | 215 | void (*attach)(struct Qdisc *sch); |
Cong Wang | 48bfd55 | 2018-01-25 18:26:23 -0800 | [diff] [blame] | 216 | int (*change_tx_queue_len)(struct Qdisc *, unsigned int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
| 218 | int (*dump)(struct Qdisc *, struct sk_buff *); |
| 219 | int (*dump_stats)(struct Qdisc *, struct gnet_dump *); |
| 220 | |
Jiri Pirko | d47a6b0 | 2018-01-17 11:46:52 +0100 | [diff] [blame] | 221 | void (*ingress_block_set)(struct Qdisc *sch, |
| 222 | u32 block_index); |
| 223 | void (*egress_block_set)(struct Qdisc *sch, |
| 224 | u32 block_index); |
| 225 | u32 (*ingress_block_get)(struct Qdisc *sch); |
| 226 | u32 (*egress_block_get)(struct Qdisc *sch); |
| 227 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | struct module *owner; |
| 229 | }; |
| 230 | |
| 231 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 232 | struct tcf_result { |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 233 | union { |
| 234 | struct { |
| 235 | unsigned long class; |
| 236 | u32 classid; |
| 237 | }; |
| 238 | const struct tcf_proto *goto_tp; |
Paolo Abeni | cd11b164 | 2018-07-30 14:30:44 +0200 | [diff] [blame] | 239 | |
| 240 | /* used by the TC_ACT_REINSERT action */ |
| 241 | struct { |
| 242 | bool ingress; |
| 243 | struct gnet_stats_queue *qstats; |
| 244 | }; |
Jiri Pirko | db50514 | 2017-05-17 11:08:03 +0200 | [diff] [blame] | 245 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | }; |
| 247 | |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 248 | struct tcf_chain; |
| 249 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 250 | struct tcf_proto_ops { |
WANG Cong | 3627287 | 2013-12-15 20:15:11 -0800 | [diff] [blame] | 251 | struct list_head head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | char kind[IFNAMSIZ]; |
| 253 | |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 254 | int (*classify)(struct sk_buff *, |
| 255 | const struct tcf_proto *, |
| 256 | struct tcf_result *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | int (*init)(struct tcf_proto*); |
Jakub Kicinski | 715df5e | 2018-01-24 12:54:13 -0800 | [diff] [blame] | 258 | void (*destroy)(struct tcf_proto *tp, |
| 259 | struct netlink_ext_ack *extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 261 | void* (*get)(struct tcf_proto*, u32 handle); |
Benjamin LaHaise | c1b5273 | 2013-01-14 05:15:39 +0000 | [diff] [blame] | 262 | int (*change)(struct net *net, struct sk_buff *, |
Eric W. Biederman | af4c664 | 2012-05-25 13:42:45 -0600 | [diff] [blame] | 263 | struct tcf_proto*, unsigned long, |
Patrick McHardy | add93b6 | 2008-01-22 22:11:33 -0800 | [diff] [blame] | 264 | u32 handle, struct nlattr **, |
Alexander Aring | 7306db3 | 2018-01-18 11:20:51 -0500 | [diff] [blame] | 265 | void **, bool, |
| 266 | struct netlink_ext_ack *); |
Alexander Aring | 8865fdd | 2018-01-18 11:20:49 -0500 | [diff] [blame] | 267 | int (*delete)(struct tcf_proto *tp, void *arg, |
Alexander Aring | 571acf2 | 2018-01-18 11:20:53 -0500 | [diff] [blame] | 268 | bool *last, |
| 269 | struct netlink_ext_ack *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | void (*walk)(struct tcf_proto*, struct tcf_walker *arg); |
John Hurley | e56185c | 2018-06-25 14:30:05 -0700 | [diff] [blame] | 271 | int (*reoffload)(struct tcf_proto *tp, bool add, |
| 272 | tc_setup_cb_t *cb, void *cb_priv, |
| 273 | struct netlink_ext_ack *extack); |
Cong Wang | 07d79fc | 2017-08-30 14:30:36 -0700 | [diff] [blame] | 274 | void (*bind_class)(void *, u32, unsigned long); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 275 | void * (*tmplt_create)(struct net *net, |
| 276 | struct tcf_chain *chain, |
| 277 | struct nlattr **tca, |
| 278 | struct netlink_ext_ack *extack); |
| 279 | void (*tmplt_destroy)(void *tmplt_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
| 281 | /* rtnetlink specific */ |
WANG Cong | 8113c09 | 2017-08-04 21:31:43 -0700 | [diff] [blame] | 282 | int (*dump)(struct net*, struct tcf_proto*, void *, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | struct sk_buff *skb, struct tcmsg*); |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 284 | int (*tmplt_dump)(struct sk_buff *skb, |
| 285 | struct net *net, |
| 286 | void *tmplt_priv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
| 288 | struct module *owner; |
| 289 | }; |
| 290 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 291 | struct tcf_proto { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | /* Fast access part */ |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 293 | struct tcf_proto __rcu *next; |
| 294 | void __rcu *root; |
Paolo Abeni | 7fd4b28 | 2018-07-30 14:30:43 +0200 | [diff] [blame] | 295 | |
| 296 | /* called under RCU BH lock*/ |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 297 | int (*classify)(struct sk_buff *, |
| 298 | const struct tcf_proto *, |
| 299 | struct tcf_result *); |
Al Viro | 66c6f52 | 2006-11-20 18:07:51 -0800 | [diff] [blame] | 300 | __be16 protocol; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | |
| 302 | /* All the rest */ |
| 303 | u32 prio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | void *data; |
Eric Dumazet | dc7f9f6 | 2011-07-05 23:25:42 +0000 | [diff] [blame] | 305 | const struct tcf_proto_ops *ops; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 306 | struct tcf_chain *chain; |
John Fastabend | 25d8c0d | 2014-09-12 20:05:27 -0700 | [diff] [blame] | 307 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | }; |
| 309 | |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 310 | struct qdisc_skb_cb { |
| 311 | unsigned int pkt_len; |
Jiri Pirko | df4ab5b | 2012-07-20 02:28:49 +0000 | [diff] [blame] | 312 | u16 slave_dev_queue_mapping; |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 313 | u16 tc_classid; |
Eric Dumazet | 2571178 | 2014-09-18 08:02:05 -0700 | [diff] [blame] | 314 | #define QDISC_CB_PRIV_LEN 20 |
| 315 | unsigned char data[QDISC_CB_PRIV_LEN]; |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 316 | }; |
| 317 | |
Jiri Pirko | c7eb7d7 | 2017-11-03 11:46:24 +0100 | [diff] [blame] | 318 | typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); |
| 319 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 320 | struct tcf_chain { |
| 321 | struct tcf_proto __rcu *filter_chain; |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 322 | struct list_head list; |
| 323 | struct tcf_block *block; |
| 324 | u32 index; /* chain index */ |
| 325 | unsigned int refcnt; |
Jiri Pirko | 1f3ed38 | 2018-07-27 09:45:05 +0200 | [diff] [blame] | 326 | unsigned int action_refcnt; |
Jiri Pirko | 32a4f5e | 2018-07-23 09:23:06 +0200 | [diff] [blame] | 327 | bool explicitly_created; |
Jiri Pirko | 9f407f1 | 2018-07-23 09:23:07 +0200 | [diff] [blame] | 328 | const struct tcf_proto_ops *tmplt_ops; |
| 329 | void *tmplt_priv; |
Jiri Pirko | 6529eab | 2017-05-17 11:07:55 +0200 | [diff] [blame] | 330 | }; |
| 331 | |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 332 | struct tcf_block { |
Jiri Pirko | 5bc1701 | 2017-05-17 11:08:01 +0200 | [diff] [blame] | 333 | struct list_head chain_list; |
Jiri Pirko | 4861738 | 2018-01-17 11:46:46 +0100 | [diff] [blame] | 334 | u32 index; /* block index for shared blocks */ |
| 335 | unsigned int refcnt; |
Jiri Pirko | 855319b | 2017-10-13 14:00:58 +0200 | [diff] [blame] | 336 | struct net *net; |
Jiri Pirko | 69d78ef | 2017-10-13 14:00:57 +0200 | [diff] [blame] | 337 | struct Qdisc *q; |
Jiri Pirko | acb6744 | 2017-10-19 15:50:31 +0200 | [diff] [blame] | 338 | struct list_head cb_list; |
Jiri Pirko | f36fe1c | 2018-01-17 11:46:48 +0100 | [diff] [blame] | 339 | struct list_head owner_list; |
| 340 | bool keep_dst; |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 341 | unsigned int offloadcnt; /* Number of oddloaded filters */ |
| 342 | unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ |
Jiri Pirko | f71e0ca4 | 2018-07-23 09:23:05 +0200 | [diff] [blame] | 343 | struct { |
| 344 | struct tcf_chain *chain; |
| 345 | struct list_head filter_chain_list; |
| 346 | } chain0; |
Jiri Pirko | 2190d1d | 2017-05-17 11:07:59 +0200 | [diff] [blame] | 347 | }; |
| 348 | |
Jiri Pirko | caa7260 | 2018-01-17 11:46:50 +0100 | [diff] [blame] | 349 | static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) |
| 350 | { |
| 351 | if (*flags & TCA_CLS_FLAGS_IN_HW) |
| 352 | return; |
| 353 | *flags |= TCA_CLS_FLAGS_IN_HW; |
| 354 | block->offloadcnt++; |
| 355 | } |
| 356 | |
| 357 | static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) |
| 358 | { |
| 359 | if (!(*flags & TCA_CLS_FLAGS_IN_HW)) |
| 360 | return; |
| 361 | *flags &= ~TCA_CLS_FLAGS_IN_HW; |
| 362 | block->offloadcnt--; |
| 363 | } |
| 364 | |
John Hurley | 31533cb | 2018-06-25 14:30:06 -0700 | [diff] [blame] | 365 | static inline void |
Vlad Buslov | 86c5536 | 2018-09-07 17:22:21 +0300 | [diff] [blame] | 366 | tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt, |
John Hurley | 31533cb | 2018-06-25 14:30:06 -0700 | [diff] [blame] | 367 | u32 *flags, bool add) |
| 368 | { |
| 369 | if (add) { |
| 370 | if (!*cnt) |
| 371 | tcf_block_offload_inc(block, flags); |
| 372 | (*cnt)++; |
| 373 | } else { |
| 374 | (*cnt)--; |
| 375 | if (!*cnt) |
| 376 | tcf_block_offload_dec(block, flags); |
| 377 | } |
| 378 | } |
| 379 | |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 380 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
| 381 | { |
| 382 | struct qdisc_skb_cb *qcb; |
Eric Dumazet | 5ee31c68 | 2012-06-12 06:03:51 +0000 | [diff] [blame] | 383 | |
| 384 | BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 385 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
| 386 | } |
| 387 | |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 388 | static inline int qdisc_qlen_cpu(const struct Qdisc *q) |
| 389 | { |
| 390 | return this_cpu_ptr(q->cpu_qstats)->qlen; |
| 391 | } |
| 392 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 393 | static inline int qdisc_qlen(const struct Qdisc *q) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 394 | { |
| 395 | return q->q.qlen; |
| 396 | } |
| 397 | |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 398 | static inline int qdisc_qlen_sum(const struct Qdisc *q) |
| 399 | { |
Jakub Kicinski | 6172abc | 2018-05-25 21:53:30 -0700 | [diff] [blame] | 400 | __u32 qlen = q->qstats.qlen; |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 401 | int i; |
| 402 | |
| 403 | if (q->flags & TCQ_F_NOLOCK) { |
| 404 | for_each_possible_cpu(i) |
| 405 | qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; |
| 406 | } else { |
Jakub Kicinski | 6172abc | 2018-05-25 21:53:30 -0700 | [diff] [blame] | 407 | qlen += q->q.qlen; |
John Fastabend | 7e66016 | 2017-12-07 09:57:00 -0800 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | return qlen; |
| 411 | } |
| 412 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 413 | static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 414 | { |
| 415 | return (struct qdisc_skb_cb *)skb->cb; |
| 416 | } |
| 417 | |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 418 | static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) |
| 419 | { |
| 420 | return &qdisc->q.lock; |
| 421 | } |
| 422 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 423 | static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 424 | { |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 425 | struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); |
| 426 | |
| 427 | return q; |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 428 | } |
| 429 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 430 | static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) |
Jarek Poplawski | 2540e05 | 2008-08-21 05:11:14 -0700 | [diff] [blame] | 431 | { |
| 432 | return qdisc->dev_queue->qdisc_sleeping; |
| 433 | } |
| 434 | |
David S. Miller | 7e43f11 | 2008-08-02 23:27:37 -0700 | [diff] [blame] | 435 | /* The qdisc root lock is a mechanism by which to top level |
| 436 | * of a qdisc tree can be locked from any qdisc node in the |
| 437 | * forest. This allows changing the configuration of some |
| 438 | * aspect of the qdisc tree while blocking out asynchronous |
| 439 | * qdisc access in the packet processing paths. |
| 440 | * |
| 441 | * It is only legal to do this when the root will not change |
| 442 | * on us. Otherwise we'll potentially lock the wrong qdisc |
| 443 | * root. This is enforced by holding the RTNL semaphore, which |
| 444 | * all users of this lock accessor must do. |
| 445 | */ |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 446 | static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 447 | { |
| 448 | struct Qdisc *root = qdisc_root(qdisc); |
| 449 | |
David S. Miller | 7e43f11 | 2008-08-02 23:27:37 -0700 | [diff] [blame] | 450 | ASSERT_RTNL(); |
David S. Miller | 83874000 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 451 | return qdisc_lock(root); |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 452 | } |
| 453 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 454 | static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) |
Jarek Poplawski | f6f9b93 | 2008-08-27 02:25:17 -0700 | [diff] [blame] | 455 | { |
| 456 | struct Qdisc *root = qdisc_root_sleeping(qdisc); |
| 457 | |
| 458 | ASSERT_RTNL(); |
| 459 | return qdisc_lock(root); |
| 460 | } |
| 461 | |
Eric Dumazet | edb09eb | 2016-06-06 09:37:16 -0700 | [diff] [blame] | 462 | static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) |
| 463 | { |
| 464 | struct Qdisc *root = qdisc_root_sleeping(qdisc); |
| 465 | |
| 466 | ASSERT_RTNL(); |
| 467 | return &root->running; |
| 468 | } |
| 469 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 470 | static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 471 | { |
| 472 | return qdisc->dev_queue->dev; |
| 473 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 475 | static inline void sch_tree_lock(const struct Qdisc *q) |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 476 | { |
Jarek Poplawski | fe439dd | 2008-08-27 02:27:10 -0700 | [diff] [blame] | 477 | spin_lock_bh(qdisc_root_sleeping_lock(q)); |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 478 | } |
| 479 | |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 480 | static inline void sch_tree_unlock(const struct Qdisc *q) |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 481 | { |
Jarek Poplawski | fe439dd | 2008-08-27 02:27:10 -0700 | [diff] [blame] | 482 | spin_unlock_bh(qdisc_root_sleeping_lock(q)); |
David S. Miller | 78a5b30 | 2008-07-16 03:12:24 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Thomas Graf | e41a33e | 2005-07-05 14:14:30 -0700 | [diff] [blame] | 485 | extern struct Qdisc noop_qdisc; |
| 486 | extern struct Qdisc_ops noop_qdisc_ops; |
David S. Miller | 6ec1c69 | 2009-09-06 01:58:51 -0700 | [diff] [blame] | 487 | extern struct Qdisc_ops pfifo_fast_ops; |
| 488 | extern struct Qdisc_ops mq_qdisc_ops; |
Phil Sutter | d66d6c3 | 2015-08-27 21:21:38 +0200 | [diff] [blame] | 489 | extern struct Qdisc_ops noqueue_qdisc_ops; |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 490 | extern const struct Qdisc_ops *default_qdisc_ops; |
Eric Dumazet | 1f27cde | 2016-03-02 08:21:43 -0800 | [diff] [blame] | 491 | static inline const struct Qdisc_ops * |
| 492 | get_default_qdisc_ops(const struct net_device *dev, int ntx) |
| 493 | { |
| 494 | return ntx < dev->real_num_tx_queues ? |
| 495 | default_qdisc_ops : &pfifo_fast_ops; |
| 496 | } |
Thomas Graf | e41a33e | 2005-07-05 14:14:30 -0700 | [diff] [blame] | 497 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 498 | struct Qdisc_class_common { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 499 | u32 classid; |
| 500 | struct hlist_node hnode; |
| 501 | }; |
| 502 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 503 | struct Qdisc_class_hash { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 504 | struct hlist_head *hash; |
| 505 | unsigned int hashsize; |
| 506 | unsigned int hashmask; |
| 507 | unsigned int hashelems; |
| 508 | }; |
| 509 | |
| 510 | static inline unsigned int qdisc_class_hash(u32 id, u32 mask) |
| 511 | { |
| 512 | id ^= id >> 8; |
| 513 | id ^= id >> 4; |
| 514 | return id & mask; |
| 515 | } |
| 516 | |
| 517 | static inline struct Qdisc_class_common * |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 518 | qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 519 | { |
| 520 | struct Qdisc_class_common *cl; |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 521 | unsigned int h; |
| 522 | |
Gao Feng | 7d3f0cd | 2017-08-18 15:23:24 +0800 | [diff] [blame] | 523 | if (!id) |
| 524 | return NULL; |
| 525 | |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 526 | h = qdisc_class_hash(id, hash->hashmask); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 527 | hlist_for_each_entry(cl, &hash->hash[h], hnode) { |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 528 | if (cl->classid == id) |
| 529 | return cl; |
| 530 | } |
| 531 | return NULL; |
| 532 | } |
| 533 | |
Amritha Nambiar | 384c181 | 2017-10-27 02:35:34 -0700 | [diff] [blame] | 534 | static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid) |
| 535 | { |
| 536 | u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY; |
| 537 | |
| 538 | return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL; |
| 539 | } |
| 540 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 541 | int qdisc_class_hash_init(struct Qdisc_class_hash *); |
| 542 | void qdisc_class_hash_insert(struct Qdisc_class_hash *, |
| 543 | struct Qdisc_class_common *); |
| 544 | void qdisc_class_hash_remove(struct Qdisc_class_hash *, |
| 545 | struct Qdisc_class_common *); |
| 546 | void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); |
| 547 | void qdisc_class_hash_destroy(struct Qdisc_class_hash *); |
Patrick McHardy | 6fe1c7a | 2008-07-05 23:21:31 -0700 | [diff] [blame] | 548 | |
Cong Wang | 48bfd55 | 2018-01-25 18:26:23 -0800 | [diff] [blame] | 549 | int dev_qdisc_change_tx_queue_len(struct net_device *dev); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 550 | void dev_init_scheduler(struct net_device *dev); |
| 551 | void dev_shutdown(struct net_device *dev); |
| 552 | void dev_activate(struct net_device *dev); |
| 553 | void dev_deactivate(struct net_device *dev); |
| 554 | void dev_deactivate_many(struct list_head *head); |
| 555 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
| 556 | struct Qdisc *qdisc); |
| 557 | void qdisc_reset(struct Qdisc *qdisc); |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 558 | void qdisc_put(struct Qdisc *qdisc); |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame^] | 559 | void qdisc_put_unlocked(struct Qdisc *qdisc); |
WANG Cong | 2ccccf5 | 2016-02-25 14:55:01 -0800 | [diff] [blame] | 560 | void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, |
| 561 | unsigned int len); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 562 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
Alexander Aring | d0bd684 | 2017-12-20 12:35:20 -0500 | [diff] [blame] | 563 | const struct Qdisc_ops *ops, |
| 564 | struct netlink_ext_ack *extack); |
Daniel Borkmann | 81d947e | 2018-01-15 23:12:09 +0100 | [diff] [blame] | 565 | void qdisc_free(struct Qdisc *qdisc); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 566 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 567 | const struct Qdisc_ops *ops, u32 parentid, |
| 568 | struct netlink_ext_ack *extack); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 569 | void __qdisc_calculate_pkt_len(struct sk_buff *skb, |
| 570 | const struct qdisc_size_table *stab); |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 571 | int skb_do_redirect(struct sk_buff *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | |
Willem de Bruijn | a5135bc | 2017-01-07 17:06:36 -0500 | [diff] [blame] | 573 | static inline void skb_reset_tc(struct sk_buff *skb) |
| 574 | { |
| 575 | #ifdef CONFIG_NET_CLS_ACT |
Willem de Bruijn | bc31c90 | 2017-01-07 17:06:38 -0500 | [diff] [blame] | 576 | skb->tc_redirected = 0; |
Willem de Bruijn | a5135bc | 2017-01-07 17:06:36 -0500 | [diff] [blame] | 577 | #endif |
| 578 | } |
| 579 | |
Paolo Abeni | cd11b164 | 2018-07-30 14:30:44 +0200 | [diff] [blame] | 580 | static inline bool skb_is_tc_redirected(const struct sk_buff *skb) |
| 581 | { |
| 582 | #ifdef CONFIG_NET_CLS_ACT |
| 583 | return skb->tc_redirected; |
| 584 | #else |
| 585 | return false; |
| 586 | #endif |
| 587 | } |
| 588 | |
Daniel Borkmann | fdc5432 | 2016-01-07 15:50:22 +0100 | [diff] [blame] | 589 | static inline bool skb_at_tc_ingress(const struct sk_buff *skb) |
| 590 | { |
| 591 | #ifdef CONFIG_NET_CLS_ACT |
Willem de Bruijn | 8dc07fd | 2017-01-07 17:06:37 -0500 | [diff] [blame] | 592 | return skb->tc_at_ingress; |
Daniel Borkmann | fdc5432 | 2016-01-07 15:50:22 +0100 | [diff] [blame] | 593 | #else |
| 594 | return false; |
| 595 | #endif |
| 596 | } |
| 597 | |
Willem de Bruijn | e7246e1 | 2017-01-07 17:06:35 -0500 | [diff] [blame] | 598 | static inline bool skb_skip_tc_classify(struct sk_buff *skb) |
| 599 | { |
| 600 | #ifdef CONFIG_NET_CLS_ACT |
| 601 | if (skb->tc_skip_classify) { |
| 602 | skb->tc_skip_classify = 0; |
| 603 | return true; |
| 604 | } |
| 605 | #endif |
| 606 | return false; |
| 607 | } |
| 608 | |
Gal Pressman | 3a053b1 | 2018-02-28 15:59:15 +0200 | [diff] [blame] | 609 | /* Reset all TX qdiscs greater than index of a device. */ |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 610 | static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) |
David S. Miller | 5aa7099 | 2008-07-08 22:59:10 -0700 | [diff] [blame] | 611 | { |
John Fastabend | 4ef6acf | 2010-07-01 13:21:35 +0000 | [diff] [blame] | 612 | struct Qdisc *qdisc; |
| 613 | |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 614 | for (; i < dev->num_tx_queues; i++) { |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 615 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
John Fastabend | 4ef6acf | 2010-07-01 13:21:35 +0000 | [diff] [blame] | 616 | if (qdisc) { |
| 617 | spin_lock_bh(qdisc_lock(qdisc)); |
| 618 | qdisc_reset(qdisc); |
| 619 | spin_unlock_bh(qdisc_lock(qdisc)); |
| 620 | } |
| 621 | } |
David S. Miller | 5aa7099 | 2008-07-08 22:59:10 -0700 | [diff] [blame] | 622 | } |
| 623 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | static inline void qdisc_reset_all_tx(struct net_device *dev) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 625 | { |
John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 626 | qdisc_reset_all_tx_gt(dev, 0); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 627 | } |
| 628 | |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 629 | /* Are all TX queues of the device empty? */ |
| 630 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) |
| 631 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 632 | unsigned int i; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 633 | |
| 634 | rcu_read_lock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 635 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 636 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 637 | const struct Qdisc *q = rcu_dereference(txq->qdisc); |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 638 | |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 639 | if (q->q.qlen) { |
| 640 | rcu_read_unlock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 641 | return false; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 642 | } |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 643 | } |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 644 | rcu_read_unlock(); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 645 | return true; |
David S. Miller | 3e745dd | 2008-07-08 23:00:25 -0700 | [diff] [blame] | 646 | } |
| 647 | |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 648 | /* Are any of the TX qdiscs changing? */ |
Eric Dumazet | 05bdd2f | 2011-10-20 17:45:43 -0400 | [diff] [blame] | 649 | static inline bool qdisc_tx_changing(const struct net_device *dev) |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 650 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 651 | unsigned int i; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 652 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 653 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 654 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 655 | if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 656 | return true; |
| 657 | } |
| 658 | return false; |
David S. Miller | 6fa9864 | 2008-07-08 23:01:06 -0700 | [diff] [blame] | 659 | } |
| 660 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 661 | /* Is the device using the noop qdisc on all queues? */ |
David S. Miller | 0529794 | 2008-07-08 23:01:27 -0700 | [diff] [blame] | 662 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) |
| 663 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 664 | unsigned int i; |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 665 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 666 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 667 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
John Fastabend | 46e5da40a | 2014-09-12 20:04:52 -0700 | [diff] [blame] | 668 | if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 669 | return false; |
| 670 | } |
| 671 | return true; |
David S. Miller | 0529794 | 2008-07-08 23:01:27 -0700 | [diff] [blame] | 672 | } |
| 673 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 674 | static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 675 | { |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 676 | return qdisc_skb_cb(skb)->pkt_len; |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 677 | } |
| 678 | |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 679 | /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 680 | enum net_xmit_qdisc_t { |
| 681 | __NET_XMIT_STOLEN = 0x00010000, |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 682 | __NET_XMIT_BYPASS = 0x00020000, |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 683 | }; |
| 684 | |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 685 | #ifdef CONFIG_NET_CLS_ACT |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 686 | #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 687 | #else |
| 688 | #define net_xmit_drop_count(e) (1) |
| 689 | #endif |
| 690 | |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 691 | static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, |
| 692 | const struct Qdisc *sch) |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 693 | { |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 694 | #ifdef CONFIG_NET_SCHED |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 695 | struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); |
| 696 | |
| 697 | if (stab) |
| 698 | __qdisc_calculate_pkt_len(skb, stab); |
David S. Miller | 3a682fb | 2008-07-20 18:13:01 -0700 | [diff] [blame] | 699 | #endif |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 700 | } |
| 701 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 702 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 703 | struct sk_buff **to_free) |
Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 704 | { |
| 705 | qdisc_calculate_pkt_len(skb, sch); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 706 | return sch->enqueue(skb, sch, to_free); |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 707 | } |
| 708 | |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 709 | static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) |
| 710 | { |
| 711 | return q->flags & TCQ_F_CPUSTATS; |
| 712 | } |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 713 | |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 714 | static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, |
| 715 | __u64 bytes, __u32 packets) |
| 716 | { |
| 717 | bstats->bytes += bytes; |
| 718 | bstats->packets += packets; |
| 719 | } |
| 720 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 721 | static inline void bstats_update(struct gnet_stats_basic_packed *bstats, |
| 722 | const struct sk_buff *skb) |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 723 | { |
Amir Vadai | 3804070 | 2016-05-13 12:55:35 +0000 | [diff] [blame] | 724 | _bstats_update(bstats, |
| 725 | qdisc_pkt_len(skb), |
| 726 | skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); |
| 727 | } |
| 728 | |
| 729 | static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, |
| 730 | __u64 bytes, __u32 packets) |
| 731 | { |
| 732 | u64_stats_update_begin(&bstats->syncp); |
| 733 | _bstats_update(&bstats->bstats, bytes, packets); |
| 734 | u64_stats_update_end(&bstats->syncp); |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 735 | } |
| 736 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 737 | static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, |
| 738 | const struct sk_buff *skb) |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 739 | { |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 740 | u64_stats_update_begin(&bstats->syncp); |
| 741 | bstats_update(&bstats->bstats, skb); |
| 742 | u64_stats_update_end(&bstats->syncp); |
| 743 | } |
| 744 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 745 | static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, |
| 746 | const struct sk_buff *skb) |
| 747 | { |
| 748 | bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); |
| 749 | } |
| 750 | |
Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 751 | static inline void qdisc_bstats_update(struct Qdisc *sch, |
| 752 | const struct sk_buff *skb) |
| 753 | { |
| 754 | bstats_update(&sch->bstats, skb); |
Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 755 | } |
| 756 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 757 | static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, |
| 758 | const struct sk_buff *skb) |
| 759 | { |
| 760 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
| 761 | } |
| 762 | |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 763 | static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, |
| 764 | const struct sk_buff *skb) |
| 765 | { |
| 766 | this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); |
| 767 | } |
| 768 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 769 | static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, |
| 770 | const struct sk_buff *skb) |
| 771 | { |
| 772 | sch->qstats.backlog += qdisc_pkt_len(skb); |
| 773 | } |
| 774 | |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 775 | static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, |
| 776 | const struct sk_buff *skb) |
| 777 | { |
| 778 | this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); |
| 779 | } |
| 780 | |
| 781 | static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) |
| 782 | { |
| 783 | this_cpu_inc(sch->cpu_qstats->qlen); |
| 784 | } |
| 785 | |
| 786 | static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) |
| 787 | { |
| 788 | this_cpu_dec(sch->cpu_qstats->qlen); |
| 789 | } |
| 790 | |
| 791 | static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) |
| 792 | { |
| 793 | this_cpu_inc(sch->cpu_qstats->requeues); |
| 794 | } |
| 795 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 796 | static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) |
| 797 | { |
| 798 | sch->qstats.drops += count; |
| 799 | } |
| 800 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 801 | static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 802 | { |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 803 | qstats->drops++; |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 804 | } |
| 805 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 806 | static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 807 | { |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 808 | qstats->overlimits++; |
| 809 | } |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 810 | |
Eric Dumazet | 24ea591 | 2015-07-06 05:18:03 -0700 | [diff] [blame] | 811 | static inline void qdisc_qstats_drop(struct Qdisc *sch) |
| 812 | { |
| 813 | qstats_drop_inc(&sch->qstats); |
| 814 | } |
| 815 | |
| 816 | static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) |
| 817 | { |
Eric Dumazet | eb60a8d | 2016-08-24 10:23:34 -0700 | [diff] [blame] | 818 | this_cpu_inc(sch->cpu_qstats->drops); |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 819 | } |
| 820 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 821 | static inline void qdisc_qstats_overlimit(struct Qdisc *sch) |
| 822 | { |
| 823 | sch->qstats.overlimits++; |
| 824 | } |
| 825 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 826 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 827 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 828 | qh->head = NULL; |
| 829 | qh->tail = NULL; |
| 830 | qh->qlen = 0; |
| 831 | } |
| 832 | |
David S. Miller | aea890b | 2018-07-29 16:22:13 -0700 | [diff] [blame] | 833 | static inline void __qdisc_enqueue_tail(struct sk_buff *skb, |
| 834 | struct qdisc_skb_head *qh) |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 835 | { |
| 836 | struct sk_buff *last = qh->tail; |
| 837 | |
| 838 | if (last) { |
| 839 | skb->next = NULL; |
| 840 | last->next = skb; |
| 841 | qh->tail = skb; |
| 842 | } else { |
| 843 | qh->tail = skb; |
| 844 | qh->head = skb; |
| 845 | } |
| 846 | qh->qlen++; |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 847 | } |
| 848 | |
| 849 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) |
| 850 | { |
David S. Miller | aea890b | 2018-07-29 16:22:13 -0700 | [diff] [blame] | 851 | __qdisc_enqueue_tail(skb, &sch->q); |
| 852 | qdisc_qstats_backlog_inc(sch, skb); |
| 853 | return NET_XMIT_SUCCESS; |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 854 | } |
| 855 | |
David S. Miller | 5969773 | 2018-07-29 16:33:28 -0700 | [diff] [blame] | 856 | static inline void __qdisc_enqueue_head(struct sk_buff *skb, |
| 857 | struct qdisc_skb_head *qh) |
| 858 | { |
| 859 | skb->next = qh->head; |
| 860 | |
| 861 | if (!qh->head) |
| 862 | qh->tail = skb; |
| 863 | qh->head = skb; |
| 864 | qh->qlen++; |
| 865 | } |
| 866 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 867 | static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 868 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 869 | struct sk_buff *skb = qh->head; |
| 870 | |
| 871 | if (likely(skb != NULL)) { |
| 872 | qh->head = skb->next; |
| 873 | qh->qlen--; |
| 874 | if (qh->head == NULL) |
| 875 | qh->tail = NULL; |
| 876 | skb->next = NULL; |
| 877 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 878 | |
Florian Westphal | ec32336 | 2016-09-18 00:57:32 +0200 | [diff] [blame] | 879 | return skb; |
| 880 | } |
| 881 | |
| 882 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) |
| 883 | { |
| 884 | struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); |
| 885 | |
Eric Dumazet | 9190b3b | 2011-01-20 23:31:33 -0800 | [diff] [blame] | 886 | if (likely(skb != NULL)) { |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 887 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | 9190b3b | 2011-01-20 23:31:33 -0800 | [diff] [blame] | 888 | qdisc_bstats_update(sch, skb); |
| 889 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 890 | |
| 891 | return skb; |
| 892 | } |
| 893 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 894 | /* Instead of calling kfree_skb() while root qdisc lock is held, |
| 895 | * queue the skb for future freeing at end of __dev_xmit_skb() |
| 896 | */ |
| 897 | static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) |
| 898 | { |
| 899 | skb->next = *to_free; |
| 900 | *to_free = skb; |
| 901 | } |
| 902 | |
Alexey Kodanev | 35d889d | 2018-03-05 20:52:54 +0300 | [diff] [blame] | 903 | static inline void __qdisc_drop_all(struct sk_buff *skb, |
| 904 | struct sk_buff **to_free) |
| 905 | { |
| 906 | if (skb->prev) |
| 907 | skb->prev->next = *to_free; |
| 908 | else |
| 909 | skb->next = *to_free; |
| 910 | *to_free = skb; |
| 911 | } |
| 912 | |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 913 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 914 | struct qdisc_skb_head *qh, |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 915 | struct sk_buff **to_free) |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 916 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 917 | struct sk_buff *skb = __qdisc_dequeue_head(qh); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 918 | |
| 919 | if (likely(skb != NULL)) { |
| 920 | unsigned int len = qdisc_pkt_len(skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 921 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 922 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 923 | __qdisc_drop(skb, to_free); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 924 | return len; |
| 925 | } |
| 926 | |
| 927 | return 0; |
| 928 | } |
| 929 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 930 | static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, |
| 931 | struct sk_buff **to_free) |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 932 | { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 933 | return __qdisc_queue_drop_head(sch, &sch->q, to_free); |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 934 | } |
| 935 | |
Patrick McHardy | 48a8f51 | 2008-10-31 00:44:18 -0700 | [diff] [blame] | 936 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) |
| 937 | { |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 938 | const struct qdisc_skb_head *qh = &sch->q; |
| 939 | |
| 940 | return qh->head; |
Patrick McHardy | 48a8f51 | 2008-10-31 00:44:18 -0700 | [diff] [blame] | 941 | } |
| 942 | |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 943 | /* generic pseudo peek method for non-work-conserving qdisc */ |
| 944 | static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) |
| 945 | { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 946 | struct sk_buff *skb = skb_peek(&sch->gso_skb); |
| 947 | |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 948 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 949 | if (!skb) { |
| 950 | skb = sch->dequeue(sch); |
| 951 | |
| 952 | if (skb) { |
| 953 | __skb_queue_head(&sch->gso_skb, skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 954 | /* it's still part of the queue */ |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 955 | qdisc_qstats_backlog_inc(sch, skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 956 | sch->q.qlen++; |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 957 | } |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 958 | } |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 959 | |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 960 | return skb; |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 961 | } |
| 962 | |
| 963 | /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ |
| 964 | static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) |
| 965 | { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 966 | struct sk_buff *skb = skb_peek(&sch->gso_skb); |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 967 | |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 968 | if (skb) { |
John Fastabend | a53851e | 2017-12-07 09:55:45 -0800 | [diff] [blame] | 969 | skb = __skb_dequeue(&sch->gso_skb); |
WANG Cong | a27758f | 2016-06-03 15:05:57 -0700 | [diff] [blame] | 970 | qdisc_qstats_backlog_dec(sch, skb); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 971 | sch->q.qlen--; |
| 972 | } else { |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 973 | skb = sch->dequeue(sch); |
Jarek Poplawski | 61c9eaf | 2008-11-05 16:02:34 -0800 | [diff] [blame] | 974 | } |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 975 | |
| 976 | return skb; |
| 977 | } |
| 978 | |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 979 | static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 980 | { |
| 981 | /* |
| 982 | * We do not know the backlog in bytes of this list, it |
| 983 | * is up to the caller to correct it |
| 984 | */ |
Florian Westphal | 48da34b | 2016-09-18 00:57:34 +0200 | [diff] [blame] | 985 | ASSERT_RTNL(); |
| 986 | if (qh->qlen) { |
| 987 | rtnl_kfree_skbs(qh->head, qh->tail); |
| 988 | |
| 989 | qh->head = NULL; |
| 990 | qh->tail = NULL; |
| 991 | qh->qlen = 0; |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 992 | } |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 993 | } |
| 994 | |
| 995 | static inline void qdisc_reset_queue(struct Qdisc *sch) |
| 996 | { |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 997 | __qdisc_reset_queue(&sch->q); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 998 | sch->qstats.backlog = 0; |
| 999 | } |
| 1000 | |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 1001 | static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, |
| 1002 | struct Qdisc **pold) |
| 1003 | { |
| 1004 | struct Qdisc *old; |
| 1005 | |
| 1006 | sch_tree_lock(sch); |
| 1007 | old = *pold; |
| 1008 | *pold = new; |
| 1009 | if (old != NULL) { |
Konstantin Khlebnikov | 68a66d1 | 2017-08-19 15:37:07 +0300 | [diff] [blame] | 1010 | unsigned int qlen = old->q.qlen; |
| 1011 | unsigned int backlog = old->qstats.backlog; |
| 1012 | |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 1013 | qdisc_reset(old); |
Konstantin Khlebnikov | 68a66d1 | 2017-08-19 15:37:07 +0300 | [diff] [blame] | 1014 | qdisc_tree_reduce_backlog(old, qlen, backlog); |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 1015 | } |
| 1016 | sch_tree_unlock(sch); |
| 1017 | |
| 1018 | return old; |
| 1019 | } |
| 1020 | |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 1021 | static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) |
| 1022 | { |
| 1023 | rtnl_kfree_skbs(skb, skb); |
| 1024 | qdisc_qstats_drop(sch); |
| 1025 | } |
| 1026 | |
John Fastabend | 40bd036 | 2017-12-07 09:55:07 -0800 | [diff] [blame] | 1027 | static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, |
| 1028 | struct sk_buff **to_free) |
| 1029 | { |
| 1030 | __qdisc_drop(skb, to_free); |
| 1031 | qdisc_qstats_cpu_drop(sch); |
| 1032 | |
| 1033 | return NET_XMIT_DROP; |
| 1034 | } |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1035 | |
| 1036 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, |
| 1037 | struct sk_buff **to_free) |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1038 | { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 1039 | __qdisc_drop(skb, to_free); |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 1040 | qdisc_qstats_drop(sch); |
Thomas Graf | 9972b25 | 2005-06-18 22:57:26 -0700 | [diff] [blame] | 1041 | |
| 1042 | return NET_XMIT_DROP; |
| 1043 | } |
| 1044 | |
Alexey Kodanev | 35d889d | 2018-03-05 20:52:54 +0300 | [diff] [blame] | 1045 | static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, |
| 1046 | struct sk_buff **to_free) |
| 1047 | { |
| 1048 | __qdisc_drop_all(skb, to_free); |
| 1049 | qdisc_qstats_drop(sch); |
| 1050 | |
| 1051 | return NET_XMIT_DROP; |
| 1052 | } |
| 1053 | |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 1054 | /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how |
| 1055 | long it will take to send a packet given its size. |
| 1056 | */ |
| 1057 | static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) |
| 1058 | { |
Jesper Dangaard Brouer | e08b099 | 2007-09-12 16:36:28 +0200 | [diff] [blame] | 1059 | int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; |
| 1060 | if (slot < 0) |
| 1061 | slot = 0; |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 1062 | slot >>= rtab->rate.cell_log; |
| 1063 | if (slot > 255) |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 1064 | return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; |
Jesper Dangaard Brouer | e9bef55 | 2007-09-12 16:35:24 +0200 | [diff] [blame] | 1065 | return rtab->data[slot]; |
| 1066 | } |
| 1067 | |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1068 | struct psched_ratecfg { |
Eric Dumazet | 130d3d6 | 2013-06-06 13:56:19 -0700 | [diff] [blame] | 1069 | u64 rate_bytes_ps; /* bytes per second */ |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1070 | u32 mult; |
| 1071 | u16 overhead; |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1072 | u8 linklayer; |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1073 | u8 shift; |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1074 | }; |
| 1075 | |
| 1076 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, |
| 1077 | unsigned int len) |
| 1078 | { |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1079 | len += r->overhead; |
| 1080 | |
| 1081 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) |
| 1082 | return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; |
| 1083 | |
| 1084 | return ((u64)len * r->mult) >> r->shift; |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1085 | } |
| 1086 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 1087 | void psched_ratecfg_precompute(struct psched_ratecfg *r, |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 1088 | const struct tc_ratespec *conf, |
| 1089 | u64 rate64); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1090 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1091 | static inline void psched_ratecfg_getrate(struct tc_ratespec *res, |
| 1092 | const struct psched_ratecfg *r) |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1093 | { |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1094 | memset(res, 0, sizeof(*res)); |
Eric Dumazet | 3e1e3aa | 2013-09-19 09:10:03 -0700 | [diff] [blame] | 1095 | |
| 1096 | /* legacy struct tc_ratespec has a 32bit @rate field |
| 1097 | * Qdisc using 64bit rate should add new attributes |
| 1098 | * in order to maintain compatibility. |
| 1099 | */ |
| 1100 | res->rate = min_t(u64, r->rate_bytes_ps, ~0U); |
| 1101 | |
Eric Dumazet | 01cb71d | 2013-06-02 13:55:05 +0000 | [diff] [blame] | 1102 | res->overhead = r->overhead; |
Jesper Dangaard Brouer | 8a8e3d8 | 2013-08-14 23:47:11 +0200 | [diff] [blame] | 1103 | res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); |
Jiri Pirko | 292f1c7 | 2013-02-12 00:12:03 +0000 | [diff] [blame] | 1104 | } |
| 1105 | |
Jiri Pirko | 4620940 | 2017-11-03 11:46:25 +0100 | [diff] [blame] | 1106 | /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. |
| 1107 | * The fast path only needs to access filter list and to update stats |
| 1108 | */ |
| 1109 | struct mini_Qdisc { |
| 1110 | struct tcf_proto *filter_list; |
| 1111 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
| 1112 | struct gnet_stats_queue __percpu *cpu_qstats; |
| 1113 | struct rcu_head rcu; |
| 1114 | }; |
| 1115 | |
| 1116 | static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, |
| 1117 | const struct sk_buff *skb) |
| 1118 | { |
| 1119 | bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); |
| 1120 | } |
| 1121 | |
| 1122 | static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) |
| 1123 | { |
| 1124 | this_cpu_inc(miniq->cpu_qstats->drops); |
| 1125 | } |
| 1126 | |
| 1127 | struct mini_Qdisc_pair { |
| 1128 | struct mini_Qdisc miniq1; |
| 1129 | struct mini_Qdisc miniq2; |
| 1130 | struct mini_Qdisc __rcu **p_miniq; |
| 1131 | }; |
| 1132 | |
| 1133 | void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, |
| 1134 | struct tcf_proto *tp_head); |
| 1135 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, |
| 1136 | struct mini_Qdisc __rcu **p_miniq); |
| 1137 | |
Paolo Abeni | cd11b164 | 2018-07-30 14:30:44 +0200 | [diff] [blame] | 1138 | static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) |
| 1139 | { |
| 1140 | struct gnet_stats_queue *stats = res->qstats; |
| 1141 | int ret; |
| 1142 | |
| 1143 | if (res->ingress) |
| 1144 | ret = netif_receive_skb(skb); |
| 1145 | else |
| 1146 | ret = dev_queue_xmit(skb); |
| 1147 | if (ret && stats) |
| 1148 | qstats_overlimit_inc(res->qstats); |
| 1149 | } |
| 1150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | #endif |