blob: 9e71691c491b7a1a00c5fb4ab55cb409704a3f3a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_PKT_SCHED_H
3#define __NET_PKT_SCHED_H
4
Patrick McHardy538e43a2006-01-08 22:12:03 -08005#include <linux/jiffies.h>
Patrick McHardy641b9e02007-03-16 01:18:42 -07006#include <linux/ktime.h>
Jiri Pirkod8b96052015-01-13 17:13:43 +01007#include <linux/if_vlan.h>
Jiri Pirko855319b2017-10-13 14:00:58 +02008#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <net/sch_generic.h>
Jiri Pirko855319b2017-10-13 14:00:58 +020010#include <net/net_namespace.h>
Jiri Pirko861932e2017-08-09 14:30:31 +020011#include <uapi/linux/pkt_sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Jesper Dangaard Brouerd0a81f62016-11-03 14:56:01 +010013#define DEFAULT_TX_QUEUE_LEN 1000
王贇b193e152021-09-24 10:35:58 +080014#define STAB_SIZE_LOG_MAX 30
Jesper Dangaard Brouerd0a81f62016-11-03 14:56:01 +010015
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000016struct qdisc_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 int stop;
18 int skip;
19 int count;
20 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
21};
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023static inline void *qdisc_priv(struct Qdisc *q)
24{
Eric Dumazet846e4632020-10-07 09:51:11 -070025 return &q->privdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
27
Allen Pais6e1978a2020-11-03 14:48:21 +053028static inline struct Qdisc *qdisc_from_priv(void *priv)
29{
30 return container_of(priv, struct Qdisc, privdata);
31}
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
34 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
35
36 Normal IP packet size ~ 512byte, hence:
37
38 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
39 10Mbit ethernet.
40
41 10msec resolution -> <50Kbit/sec.
42
43 The result: [34]86 is not good choice for QoS router :-(
44
Lucas De Marchi25985ed2011-03-30 22:57:33 -030045 The things are not so bad, because we may use artificial
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 clock evaluated by integration of network data flow
47 in the most critical places.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 */
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050typedef u64 psched_time_t;
51typedef long psched_tdiff_t;
52
Jarek Poplawskia4a710c2009-06-08 22:05:13 +000053/* Avoid doing 64 bit divide */
54#define PSCHED_SHIFT 6
Jarek Poplawskica44d6e2009-06-15 02:31:47 -070055#define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT)
56#define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Jarek Poplawskica44d6e2009-06-15 02:31:47 -070058#define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC)
Patrick McHardya0849802007-03-23 11:28:30 -070059#define PSCHED_PASTPERFECT 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Patrick McHardy3bebcda2007-03-23 11:29:25 -070061static inline psched_time_t psched_get_time(void)
62{
Eric Dumazetd2de8752014-08-22 18:32:09 -070063 return PSCHED_NS2TICKS(ktime_get_ns());
Patrick McHardy3bebcda2007-03-23 11:29:25 -070064}
65
Patrick McHardy03cc45c2007-03-23 11:29:11 -070066static inline psched_tdiff_t
67psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
68{
69 return min(tv1 - tv2, bound);
70}
71
Patrick McHardy41794772007-03-16 01:19:15 -070072struct qdisc_watchdog {
Eric Dumazeta9efad82016-05-23 14:24:56 -070073 u64 last_expires;
Patrick McHardy41794772007-03-16 01:19:15 -070074 struct hrtimer timer;
75 struct Qdisc *qdisc;
76};
77
Vinicius Costa Gomes860b6422018-07-03 15:42:52 -070078void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
79 clockid_t clockid);
Joe Perches5c152572013-07-30 22:47:13 -070080void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
Eric Dumazetefe074c2020-03-16 19:12:49 -070081
82void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
83 u64 delta_ns);
84
85static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
86 u64 expires)
87{
88 return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
89}
Jiri Pirko34c5d292013-02-12 00:12:04 +000090
91static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
92 psched_time_t expires)
93{
Eric Dumazet45f50be2016-06-10 16:41:39 -070094 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
Jiri Pirko34c5d292013-02-12 00:12:04 +000095}
96
Joe Perches5c152572013-07-30 22:47:13 -070097void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
Patrick McHardy41794772007-03-16 01:19:15 -070098
Linus Torvalds1da177e2005-04-16 15:20:36 -070099extern struct Qdisc_ops pfifo_qdisc_ops;
100extern struct Qdisc_ops bfifo_qdisc_ops;
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +0000101extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Joe Perches5c152572013-07-30 22:47:13 -0700103int fifo_set_limit(struct Qdisc *q, unsigned int limit);
104struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
Alexander Aringa38a98822017-12-20 12:35:21 -0500105 unsigned int limit,
106 struct netlink_ext_ack *extack);
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700107
Joe Perches5c152572013-07-30 22:47:13 -0700108int register_qdisc(struct Qdisc_ops *qops);
109int unregister_qdisc(struct Qdisc_ops *qops);
stephen hemminger6da7c8f2013-08-27 16:19:08 -0700110void qdisc_get_default(char *id, size_t len);
111int qdisc_set_default(const char *id);
112
Jiri Kosina49b49972017-03-08 16:03:32 +0100113void qdisc_hash_add(struct Qdisc *q, bool invisible);
Jiri Kosina59cc1f62016-08-10 11:05:15 +0200114void qdisc_hash_del(struct Qdisc *q);
Joe Perches5c152572013-07-30 22:47:13 -0700115struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
Vlad Buslov3a7d0d02018-09-24 19:22:51 +0300116struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
Joe Perches5c152572013-07-30 22:47:13 -0700117struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
Alexander Aringe9bc3fa2017-12-20 12:35:18 -0500118 struct nlattr *tab,
119 struct netlink_ext_ack *extack);
Joe Perches5c152572013-07-30 22:47:13 -0700120void qdisc_put_rtab(struct qdisc_rate_table *tab);
121void qdisc_put_stab(struct qdisc_size_table *tab);
Florian Westphal6e765a02014-06-11 20:35:18 +0200122void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
John Fastabend29b86cd2017-12-07 09:54:47 -0800123bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
124 struct net_device *dev, struct netdev_queue *txq,
125 spinlock_t *root_lock, bool validate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Joe Perches5c152572013-07-30 22:47:13 -0700127void __qdisc_run(struct Qdisc *q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
David S. Miller37437bb2008-07-16 02:15:04 -0700129static inline void qdisc_run(struct Qdisc *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
John Fastabend6c148182017-12-07 09:54:06 -0800131 if (qdisc_run_begin(q)) {
Yunsheng Lin102b55e2021-05-14 11:17:00 +0800132 __qdisc_run(q);
John Fastabend6c148182017-12-07 09:54:06 -0800133 qdisc_run_end(q);
134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* Calculate maximal size of packet seen by hard_start_xmit
138 routine of this device.
139 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000140static inline unsigned int psched_mtu(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700142 return dev->mtu + dev->hard_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
Jiri Pirko855319b2017-10-13 14:00:58 +0200145static inline struct net *qdisc_net(struct Qdisc *q)
146{
147 return dev_net(q->dev_queue->dev);
148}
149
Vinicius Costa Gomes3d0bd022017-10-16 18:01:27 -0700150struct tc_cbs_qopt_offload {
151 u8 enable;
152 s32 queue;
153 s32 hicredit;
154 s32 locredit;
155 s32 idleslope;
156 s32 sendslope;
157};
158
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700159struct tc_etf_qopt_offload {
160 u8 enable;
161 s32 queue;
162};
163
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300164struct tc_taprio_sched_entry {
165 u8 command; /* TC_TAPRIO_CMD_* */
166
167 /* The gate_mask in the offloading side refers to traffic classes */
168 u32 gate_mask;
169 u32 interval;
170};
171
172struct tc_taprio_qopt_offload {
173 u8 enable;
174 ktime_t base_time;
175 u64 cycle_time;
176 u64 cycle_time_extension;
177
178 size_t num_entries;
Gustavo A. R. Silvab90feaf2020-02-27 14:58:44 -0600179 struct tc_taprio_sched_entry entries[];
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300180};
181
182/* Reference counting */
183struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
184 *offload);
185void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
186
Vladimir Oltean847cbfc2021-03-10 16:50:44 +0200187/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is
188 * not mistaken for a software timestamp, because this will otherwise prevent
189 * the dispatch of hardware timestamps to the socket.
190 */
191static inline void skb_txtime_consumed(struct sk_buff *skb)
192{
193 skb->tstamp = ktime_set(0, 0);
194}
195
Paul Blakeyec624fe2021-12-14 19:24:33 +0200196struct tc_skb_cb {
197 struct qdisc_skb_cb qdisc_cb;
198
199 u16 mru;
200 bool post_ct;
Paul Blakey38495952021-12-14 19:24:34 +0200201 u16 zone; /* Only valid if post_ct = true */
Paul Blakeyec624fe2021-12-14 19:24:33 +0200202};
203
204static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
205{
206 struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
207
208 BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
209 return cb;
210}
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#endif