Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __NET_PKT_SCHED_H |
| 3 | #define __NET_PKT_SCHED_H |
| 4 | |
Patrick McHardy | 538e43a | 2006-01-08 22:12:03 -0800 | [diff] [blame] | 5 | #include <linux/jiffies.h> |
Patrick McHardy | 641b9e0 | 2007-03-16 01:18:42 -0700 | [diff] [blame] | 6 | #include <linux/ktime.h> |
Jiri Pirko | d8b9605 | 2015-01-13 17:13:43 +0100 | [diff] [blame] | 7 | #include <linux/if_vlan.h> |
Jiri Pirko | 855319b | 2017-10-13 14:00:58 +0200 | [diff] [blame] | 8 | #include <linux/netdevice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <net/sch_generic.h> |
Jiri Pirko | 855319b | 2017-10-13 14:00:58 +0200 | [diff] [blame] | 10 | #include <net/net_namespace.h> |
Jiri Pirko | 861932e | 2017-08-09 14:30:31 +0200 | [diff] [blame] | 11 | #include <uapi/linux/pkt_sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Jesper Dangaard Brouer | d0a81f6 | 2016-11-03 14:56:01 +0100 | [diff] [blame] | 13 | #define DEFAULT_TX_QUEUE_LEN 1000 |
王贇 | b193e15 | 2021-09-24 10:35:58 +0800 | [diff] [blame] | 14 | #define STAB_SIZE_LOG_MAX 30 |
Jesper Dangaard Brouer | d0a81f6 | 2016-11-03 14:56:01 +0100 | [diff] [blame] | 15 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 16 | struct qdisc_walker { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | int stop; |
| 18 | int skip; |
| 19 | int count; |
| 20 | int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); |
| 21 | }; |
| 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | static inline void *qdisc_priv(struct Qdisc *q) |
| 24 | { |
Eric Dumazet | 846e463 | 2020-10-07 09:51:11 -0700 | [diff] [blame] | 25 | return &q->privdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | } |
| 27 | |
Allen Pais | 6e1978a | 2020-11-03 14:48:21 +0530 | [diff] [blame] | 28 | static inline struct Qdisc *qdisc_from_priv(void *priv) |
| 29 | { |
| 30 | return container_of(priv, struct Qdisc, privdata); |
| 31 | } |
| 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | /* |
| 34 | Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth |
| 35 | |
| 36 | Normal IP packet size ~ 512byte, hence: |
| 37 | |
| 38 | 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for |
| 39 | 10Mbit ethernet. |
| 40 | |
| 41 | 10msec resolution -> <50Kbit/sec. |
| 42 | |
| 43 | The result: [34]86 is not good choice for QoS router :-( |
| 44 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 45 | The things are not so bad, because we may use artificial |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | clock evaluated by integration of network data flow |
| 47 | in the most critical places. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | */ |
| 49 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | typedef u64 psched_time_t; |
| 51 | typedef long psched_tdiff_t; |
| 52 | |
Jarek Poplawski | a4a710c | 2009-06-08 22:05:13 +0000 | [diff] [blame] | 53 | /* Avoid doing 64 bit divide */ |
| 54 | #define PSCHED_SHIFT 6 |
Jarek Poplawski | ca44d6e | 2009-06-15 02:31:47 -0700 | [diff] [blame] | 55 | #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) |
| 56 | #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Jarek Poplawski | ca44d6e | 2009-06-15 02:31:47 -0700 | [diff] [blame] | 58 | #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) |
Patrick McHardy | a084980 | 2007-03-23 11:28:30 -0700 | [diff] [blame] | 59 | #define PSCHED_PASTPERFECT 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 61 | static inline psched_time_t psched_get_time(void) |
| 62 | { |
Eric Dumazet | d2de875 | 2014-08-22 18:32:09 -0700 | [diff] [blame] | 63 | return PSCHED_NS2TICKS(ktime_get_ns()); |
Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Patrick McHardy | 03cc45c | 2007-03-23 11:29:11 -0700 | [diff] [blame] | 66 | static inline psched_tdiff_t |
| 67 | psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) |
| 68 | { |
| 69 | return min(tv1 - tv2, bound); |
| 70 | } |
| 71 | |
Patrick McHardy | 4179477 | 2007-03-16 01:19:15 -0700 | [diff] [blame] | 72 | struct qdisc_watchdog { |
Eric Dumazet | a9efad8 | 2016-05-23 14:24:56 -0700 | [diff] [blame] | 73 | u64 last_expires; |
Patrick McHardy | 4179477 | 2007-03-16 01:19:15 -0700 | [diff] [blame] | 74 | struct hrtimer timer; |
| 75 | struct Qdisc *qdisc; |
| 76 | }; |
| 77 | |
Vinicius Costa Gomes | 860b642 | 2018-07-03 15:42:52 -0700 | [diff] [blame] | 78 | void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, |
| 79 | clockid_t clockid); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 80 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); |
Eric Dumazet | efe074c | 2020-03-16 19:12:49 -0700 | [diff] [blame] | 81 | |
| 82 | void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, |
| 83 | u64 delta_ns); |
| 84 | |
| 85 | static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, |
| 86 | u64 expires) |
| 87 | { |
| 88 | return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL); |
| 89 | } |
Jiri Pirko | 34c5d29 | 2013-02-12 00:12:04 +0000 | [diff] [blame] | 90 | |
| 91 | static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, |
| 92 | psched_time_t expires) |
| 93 | { |
Eric Dumazet | 45f50be | 2016-06-10 16:41:39 -0700 | [diff] [blame] | 94 | qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); |
Jiri Pirko | 34c5d29 | 2013-02-12 00:12:04 +0000 | [diff] [blame] | 95 | } |
| 96 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 97 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); |
Patrick McHardy | 4179477 | 2007-03-16 01:19:15 -0700 | [diff] [blame] | 98 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | extern struct Qdisc_ops pfifo_qdisc_ops; |
| 100 | extern struct Qdisc_ops bfifo_qdisc_ops; |
Hagen Paul Pfeifer | 57dbb2d | 2010-01-24 12:30:59 +0000 | [diff] [blame] | 101 | extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 103 | int fifo_set_limit(struct Qdisc *q, unsigned int limit); |
| 104 | struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, |
Alexander Aring | a38a9882 | 2017-12-20 12:35:21 -0500 | [diff] [blame] | 105 | unsigned int limit, |
| 106 | struct netlink_ext_ack *extack); |
Patrick McHardy | fb0305c | 2008-07-05 23:40:21 -0700 | [diff] [blame] | 107 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 108 | int register_qdisc(struct Qdisc_ops *qops); |
| 109 | int unregister_qdisc(struct Qdisc_ops *qops); |
stephen hemminger | 6da7c8f | 2013-08-27 16:19:08 -0700 | [diff] [blame] | 110 | void qdisc_get_default(char *id, size_t len); |
| 111 | int qdisc_set_default(const char *id); |
| 112 | |
Jiri Kosina | 49b4997 | 2017-03-08 16:03:32 +0100 | [diff] [blame] | 113 | void qdisc_hash_add(struct Qdisc *q, bool invisible); |
Jiri Kosina | 59cc1f6 | 2016-08-10 11:05:15 +0200 | [diff] [blame] | 114 | void qdisc_hash_del(struct Qdisc *q); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 115 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 116 | struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 117 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
Alexander Aring | e9bc3fa | 2017-12-20 12:35:18 -0500 | [diff] [blame] | 118 | struct nlattr *tab, |
| 119 | struct netlink_ext_ack *extack); |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 120 | void qdisc_put_rtab(struct qdisc_rate_table *tab); |
| 121 | void qdisc_put_stab(struct qdisc_size_table *tab); |
Florian Westphal | 6e765a0 | 2014-06-11 20:35:18 +0200 | [diff] [blame] | 122 | void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); |
John Fastabend | 29b86cd | 2017-12-07 09:54:47 -0800 | [diff] [blame] | 123 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
| 124 | struct net_device *dev, struct netdev_queue *txq, |
| 125 | spinlock_t *root_lock, bool validate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Joe Perches | 5c15257 | 2013-07-30 22:47:13 -0700 | [diff] [blame] | 127 | void __qdisc_run(struct Qdisc *q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 129 | static inline void qdisc_run(struct Qdisc *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | { |
John Fastabend | 6c14818 | 2017-12-07 09:54:06 -0800 | [diff] [blame] | 131 | if (qdisc_run_begin(q)) { |
Yunsheng Lin | 102b55e | 2021-05-14 11:17:00 +0800 | [diff] [blame] | 132 | __qdisc_run(q); |
John Fastabend | 6c14818 | 2017-12-07 09:54:06 -0800 | [diff] [blame] | 133 | qdisc_run_end(q); |
| 134 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | /* Calculate maximal size of packet seen by hard_start_xmit |
| 138 | routine of this device. |
| 139 | */ |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 140 | static inline unsigned int psched_mtu(const struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 142 | return dev->mtu + dev->hard_header_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } |
| 144 | |
Jiri Pirko | 855319b | 2017-10-13 14:00:58 +0200 | [diff] [blame] | 145 | static inline struct net *qdisc_net(struct Qdisc *q) |
| 146 | { |
| 147 | return dev_net(q->dev_queue->dev); |
| 148 | } |
| 149 | |
Vinicius Costa Gomes | 3d0bd02 | 2017-10-16 18:01:27 -0700 | [diff] [blame] | 150 | struct tc_cbs_qopt_offload { |
| 151 | u8 enable; |
| 152 | s32 queue; |
| 153 | s32 hicredit; |
| 154 | s32 locredit; |
| 155 | s32 idleslope; |
| 156 | s32 sendslope; |
| 157 | }; |
| 158 | |
Jesus Sanchez-Palencia | 88cab77 | 2018-07-03 15:42:54 -0700 | [diff] [blame] | 159 | struct tc_etf_qopt_offload { |
| 160 | u8 enable; |
| 161 | s32 queue; |
| 162 | }; |
| 163 | |
Vinicius Costa Gomes | 9c66d15 | 2019-09-15 04:59:58 +0300 | [diff] [blame] | 164 | struct tc_taprio_sched_entry { |
| 165 | u8 command; /* TC_TAPRIO_CMD_* */ |
| 166 | |
| 167 | /* The gate_mask in the offloading side refers to traffic classes */ |
| 168 | u32 gate_mask; |
| 169 | u32 interval; |
| 170 | }; |
| 171 | |
| 172 | struct tc_taprio_qopt_offload { |
| 173 | u8 enable; |
| 174 | ktime_t base_time; |
| 175 | u64 cycle_time; |
| 176 | u64 cycle_time_extension; |
| 177 | |
| 178 | size_t num_entries; |
Gustavo A. R. Silva | b90feaf | 2020-02-27 14:58:44 -0600 | [diff] [blame] | 179 | struct tc_taprio_sched_entry entries[]; |
Vinicius Costa Gomes | 9c66d15 | 2019-09-15 04:59:58 +0300 | [diff] [blame] | 180 | }; |
| 181 | |
| 182 | /* Reference counting */ |
| 183 | struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload |
| 184 | *offload); |
| 185 | void taprio_offload_free(struct tc_taprio_qopt_offload *offload); |
| 186 | |
Vladimir Oltean | 847cbfc | 2021-03-10 16:50:44 +0200 | [diff] [blame] | 187 | /* Ensure skb_mstamp_ns, which might have been populated with the txtime, is |
| 188 | * not mistaken for a software timestamp, because this will otherwise prevent |
| 189 | * the dispatch of hardware timestamps to the socket. |
| 190 | */ |
| 191 | static inline void skb_txtime_consumed(struct sk_buff *skb) |
| 192 | { |
| 193 | skb->tstamp = ktime_set(0, 0); |
| 194 | } |
| 195 | |
Paul Blakey | ec624fe | 2021-12-14 19:24:33 +0200 | [diff] [blame] | 196 | struct tc_skb_cb { |
| 197 | struct qdisc_skb_cb qdisc_cb; |
| 198 | |
| 199 | u16 mru; |
| 200 | bool post_ct; |
Paul Blakey | 3849595 | 2021-12-14 19:24:34 +0200 | [diff] [blame] | 201 | u16 zone; /* Only valid if post_ct = true */ |
Paul Blakey | ec624fe | 2021-12-14 19:24:33 +0200 | [diff] [blame] | 202 | }; |
| 203 | |
| 204 | static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb) |
| 205 | { |
| 206 | struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb; |
| 207 | |
| 208 | BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); |
| 209 | return cb; |
| 210 | } |
| 211 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | #endif |