blob: fa85b24ac794d51b3f7e5134c266236ff98dac0b [file] [log] [blame]
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -07001// SPDX-License-Identifier: GPL-2.0
2
3/* net/sched/sch_etf.c Earliest TxTime First queueing discipline.
4 *
5 * Authors: Jesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com>
6 * Vinicius Costa Gomes <vinicius.gomes@intel.com>
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
Jesus Sanchez-Palencia4b15c702018-07-03 15:43:00 -070014#include <linux/errqueue.h>
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -070015#include <linux/rbtree.h>
16#include <linux/skbuff.h>
17#include <linux/posix-timers.h>
18#include <net/netlink.h>
19#include <net/sch_generic.h>
20#include <net/pkt_sched.h>
21#include <net/sock.h>
22
23#define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON)
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -070024#define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON)
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -070025
26struct etf_sched_data {
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -070027 bool offload;
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -070028 bool deadline_mode;
29 int clockid;
30 int queue;
31 s32 delta; /* in ns */
32 ktime_t last; /* The txtime of the last skb sent to the netdevice. */
33 struct rb_root head;
34 struct qdisc_watchdog watchdog;
35 ktime_t (*get_time)(void);
36};
37
38static const struct nla_policy etf_policy[TCA_ETF_MAX + 1] = {
39 [TCA_ETF_PARMS] = { .len = sizeof(struct tc_etf_qopt) },
40};
41
42static inline int validate_input_params(struct tc_etf_qopt *qopt,
43 struct netlink_ext_ack *extack)
44{
45 /* Check if params comply to the following rules:
46 * * Clockid and delta must be valid.
47 *
48 * * Dynamic clockids are not supported.
49 *
50 * * Delta must be a positive integer.
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -070051 *
52 * Also note that for the HW offload case, we must
53 * expect that system clocks have been synchronized to PHC.
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -070054 */
55 if (qopt->clockid < 0) {
56 NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported");
57 return -ENOTSUPP;
58 }
59
60 if (qopt->clockid != CLOCK_TAI) {
61 NL_SET_ERR_MSG(extack, "Invalid clockid. CLOCK_TAI must be used");
62 return -EINVAL;
63 }
64
65 if (qopt->delta < 0) {
66 NL_SET_ERR_MSG(extack, "Delta must be positive");
67 return -EINVAL;
68 }
69
70 return 0;
71}
72
73static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
74{
75 struct etf_sched_data *q = qdisc_priv(sch);
76 ktime_t txtime = nskb->tstamp;
77 struct sock *sk = nskb->sk;
78 ktime_t now;
79
80 if (!sk)
81 return false;
82
83 if (!sock_flag(sk, SOCK_TXTIME))
84 return false;
85
86 /* We don't perform crosstimestamping.
87 * Drop if packet's clockid differs from qdisc's.
88 */
89 if (sk->sk_clockid != q->clockid)
90 return false;
91
92 if (sk->sk_txtime_deadline_mode != q->deadline_mode)
93 return false;
94
95 now = q->get_time();
96 if (ktime_before(txtime, now) || ktime_before(txtime, q->last))
97 return false;
98
99 return true;
100}
101
102static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
103{
104 struct etf_sched_data *q = qdisc_priv(sch);
105 struct rb_node *p;
106
107 p = rb_first(&q->head);
108 if (!p)
109 return NULL;
110
111 return rb_to_skb(p);
112}
113
114static void reset_watchdog(struct Qdisc *sch)
115{
116 struct etf_sched_data *q = qdisc_priv(sch);
117 struct sk_buff *skb = etf_peek_timesortedlist(sch);
118 ktime_t next;
119
Jesus Sanchez-Palencia3fcbdae2018-11-14 17:26:32 -0800120 if (!skb) {
121 qdisc_watchdog_cancel(&q->watchdog);
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700122 return;
Jesus Sanchez-Palencia3fcbdae2018-11-14 17:26:32 -0800123 }
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700124
125 next = ktime_sub_ns(skb->tstamp, q->delta);
126 qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
127}
128
Jesus Sanchez-Palencia4b15c702018-07-03 15:43:00 -0700129static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
130{
131 struct sock_exterr_skb *serr;
132 struct sk_buff *clone;
133 ktime_t txtime = skb->tstamp;
134
135 if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
136 return;
137
138 clone = skb_clone(skb, GFP_ATOMIC);
139 if (!clone)
140 return;
141
142 serr = SKB_EXT_ERR(clone);
143 serr->ee.ee_errno = err;
144 serr->ee.ee_origin = SO_EE_ORIGIN_TXTIME;
145 serr->ee.ee_type = 0;
146 serr->ee.ee_code = code;
147 serr->ee.ee_pad = 0;
148 serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
149 serr->ee.ee_info = txtime; /* low part of tstamp */
150
151 if (sock_queue_err_skb(skb->sk, clone))
152 kfree_skb(clone);
153}
154
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700155static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
156 struct sk_buff **to_free)
157{
158 struct etf_sched_data *q = qdisc_priv(sch);
159 struct rb_node **p = &q->head.rb_node, *parent = NULL;
160 ktime_t txtime = nskb->tstamp;
161
Jesus Sanchez-Palencia4b15c702018-07-03 15:43:00 -0700162 if (!is_packet_valid(sch, nskb)) {
163 report_sock_error(nskb, EINVAL,
164 SO_EE_CODE_TXTIME_INVALID_PARAM);
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700165 return qdisc_drop(nskb, sch, to_free);
Jesus Sanchez-Palencia4b15c702018-07-03 15:43:00 -0700166 }
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700167
168 while (*p) {
169 struct sk_buff *skb;
170
171 parent = *p;
172 skb = rb_to_skb(parent);
173 if (ktime_after(txtime, skb->tstamp))
174 p = &parent->rb_right;
175 else
176 p = &parent->rb_left;
177 }
178 rb_link_node(&nskb->rbnode, parent, p);
179 rb_insert_color(&nskb->rbnode, &q->head);
180
181 qdisc_qstats_backlog_inc(sch, nskb);
182 sch->q.qlen++;
183
184 /* Now we may need to re-arm the qdisc watchdog for the next packet. */
185 reset_watchdog(sch);
186
187 return NET_XMIT_SUCCESS;
188}
189
190static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
191 bool drop)
192{
193 struct etf_sched_data *q = qdisc_priv(sch);
194
195 rb_erase(&skb->rbnode, &q->head);
196
197 /* The rbnode field in the skb re-uses these fields, now that
198 * we are done with the rbnode, reset them.
199 */
200 skb->next = NULL;
201 skb->prev = NULL;
202 skb->dev = qdisc_dev(sch);
203
204 qdisc_qstats_backlog_dec(sch, skb);
205
206 if (drop) {
207 struct sk_buff *to_free = NULL;
208
Jesus Sanchez-Palencia4b15c702018-07-03 15:43:00 -0700209 report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
210
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700211 qdisc_drop(skb, sch, &to_free);
212 kfree_skb_list(to_free);
213 qdisc_qstats_overlimit(sch);
214 } else {
215 qdisc_bstats_update(sch, skb);
216
217 q->last = skb->tstamp;
218 }
219
220 sch->q.qlen--;
221}
222
223static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
224{
225 struct etf_sched_data *q = qdisc_priv(sch);
226 struct sk_buff *skb;
227 ktime_t now, next;
228
229 skb = etf_peek_timesortedlist(sch);
230 if (!skb)
231 return NULL;
232
233 now = q->get_time();
234
235 /* Drop if packet has expired while in queue. */
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700236 if (ktime_before(skb->tstamp, now)) {
237 timesortedlist_erase(sch, skb, true);
238 skb = NULL;
239 goto out;
240 }
241
242 /* When in deadline mode, dequeue as soon as possible and change the
243 * txtime from deadline to (now + delta).
244 */
245 if (q->deadline_mode) {
246 timesortedlist_erase(sch, skb, false);
247 skb->tstamp = now;
248 goto out;
249 }
250
251 next = ktime_sub_ns(skb->tstamp, q->delta);
252
253 /* Dequeue only if now is within the [txtime - delta, txtime] range. */
254 if (ktime_after(now, next))
255 timesortedlist_erase(sch, skb, false);
256 else
257 skb = NULL;
258
259out:
260 /* Now we may need to re-arm the qdisc watchdog for the next packet. */
261 reset_watchdog(sch);
262
263 return skb;
264}
265
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700266static void etf_disable_offload(struct net_device *dev,
267 struct etf_sched_data *q)
268{
269 struct tc_etf_qopt_offload etf = { };
270 const struct net_device_ops *ops;
271 int err;
272
273 if (!q->offload)
274 return;
275
276 ops = dev->netdev_ops;
277 if (!ops->ndo_setup_tc)
278 return;
279
280 etf.queue = q->queue;
281 etf.enable = 0;
282
283 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
284 if (err < 0)
285 pr_warn("Couldn't disable ETF offload for queue %d\n",
286 etf.queue);
287}
288
289static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
290 struct netlink_ext_ack *extack)
291{
292 const struct net_device_ops *ops = dev->netdev_ops;
293 struct tc_etf_qopt_offload etf = { };
294 int err;
295
296 if (q->offload)
297 return 0;
298
299 if (!ops->ndo_setup_tc) {
300 NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload");
301 return -EOPNOTSUPP;
302 }
303
304 etf.queue = q->queue;
305 etf.enable = 1;
306
307 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
308 if (err < 0) {
309 NL_SET_ERR_MSG(extack, "Specified device failed to setup ETF hardware offload");
310 return err;
311 }
312
313 return 0;
314}
315
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700316static int etf_init(struct Qdisc *sch, struct nlattr *opt,
317 struct netlink_ext_ack *extack)
318{
319 struct etf_sched_data *q = qdisc_priv(sch);
320 struct net_device *dev = qdisc_dev(sch);
321 struct nlattr *tb[TCA_ETF_MAX + 1];
322 struct tc_etf_qopt *qopt;
323 int err;
324
325 if (!opt) {
326 NL_SET_ERR_MSG(extack,
327 "Missing ETF qdisc options which are mandatory");
328 return -EINVAL;
329 }
330
331 err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack);
332 if (err < 0)
333 return err;
334
335 if (!tb[TCA_ETF_PARMS]) {
336 NL_SET_ERR_MSG(extack, "Missing mandatory ETF parameters");
337 return -EINVAL;
338 }
339
340 qopt = nla_data(tb[TCA_ETF_PARMS]);
341
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700342 pr_debug("delta %d clockid %d offload %s deadline %s\n",
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700343 qopt->delta, qopt->clockid,
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700344 OFFLOAD_IS_ON(qopt) ? "on" : "off",
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700345 DEADLINE_MODE_IS_ON(qopt) ? "on" : "off");
346
347 err = validate_input_params(qopt, extack);
348 if (err < 0)
349 return err;
350
351 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
352
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700353 if (OFFLOAD_IS_ON(qopt)) {
354 err = etf_enable_offload(dev, q, extack);
355 if (err < 0)
356 return err;
357 }
358
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700359 /* Everything went OK, save the parameters used. */
360 q->delta = qopt->delta;
361 q->clockid = qopt->clockid;
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700362 q->offload = OFFLOAD_IS_ON(qopt);
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700363 q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
364
365 switch (q->clockid) {
366 case CLOCK_REALTIME:
367 q->get_time = ktime_get_real;
368 break;
369 case CLOCK_MONOTONIC:
370 q->get_time = ktime_get;
371 break;
372 case CLOCK_BOOTTIME:
373 q->get_time = ktime_get_boottime;
374 break;
375 case CLOCK_TAI:
376 q->get_time = ktime_get_clocktai;
377 break;
378 default:
379 NL_SET_ERR_MSG(extack, "Clockid is not supported");
380 return -ENOTSUPP;
381 }
382
383 qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid);
384
385 return 0;
386}
387
388static void timesortedlist_clear(struct Qdisc *sch)
389{
390 struct etf_sched_data *q = qdisc_priv(sch);
391 struct rb_node *p = rb_first(&q->head);
392
393 while (p) {
394 struct sk_buff *skb = rb_to_skb(p);
395
396 p = rb_next(p);
397
398 rb_erase(&skb->rbnode, &q->head);
399 rtnl_kfree_skbs(skb, skb);
400 sch->q.qlen--;
401 }
402}
403
404static void etf_reset(struct Qdisc *sch)
405{
406 struct etf_sched_data *q = qdisc_priv(sch);
407
408 /* Only cancel watchdog if it's been initialized. */
409 if (q->watchdog.qdisc == sch)
410 qdisc_watchdog_cancel(&q->watchdog);
411
412 /* No matter which mode we are on, it's safe to clear both lists. */
413 timesortedlist_clear(sch);
414 __qdisc_reset_queue(&sch->q);
415
416 sch->qstats.backlog = 0;
417 sch->q.qlen = 0;
418
419 q->last = 0;
420}
421
422static void etf_destroy(struct Qdisc *sch)
423{
424 struct etf_sched_data *q = qdisc_priv(sch);
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700425 struct net_device *dev = qdisc_dev(sch);
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700426
427 /* Only cancel watchdog if it's been initialized. */
428 if (q->watchdog.qdisc == sch)
429 qdisc_watchdog_cancel(&q->watchdog);
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700430
431 etf_disable_offload(dev, q);
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700432}
433
434static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
435{
436 struct etf_sched_data *q = qdisc_priv(sch);
437 struct tc_etf_qopt opt = { };
438 struct nlattr *nest;
439
440 nest = nla_nest_start(skb, TCA_OPTIONS);
441 if (!nest)
442 goto nla_put_failure;
443
444 opt.delta = q->delta;
445 opt.clockid = q->clockid;
Jesus Sanchez-Palencia88cab772018-07-03 15:42:54 -0700446 if (q->offload)
447 opt.flags |= TC_ETF_OFFLOAD_ON;
448
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700449 if (q->deadline_mode)
450 opt.flags |= TC_ETF_DEADLINE_MODE_ON;
451
452 if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
453 goto nla_put_failure;
454
455 return nla_nest_end(skb, nest);
456
457nla_put_failure:
458 nla_nest_cancel(skb, nest);
459 return -1;
460}
461
462static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
463 .id = "etf",
464 .priv_size = sizeof(struct etf_sched_data),
465 .enqueue = etf_enqueue_timesortedlist,
466 .dequeue = etf_dequeue_timesortedlist,
467 .peek = etf_peek_timesortedlist,
468 .init = etf_init,
469 .reset = etf_reset,
470 .destroy = etf_destroy,
471 .dump = etf_dump,
472 .owner = THIS_MODULE,
473};
474
475static int __init etf_module_init(void)
476{
477 return register_qdisc(&etf_qdisc_ops);
478}
479
480static void __exit etf_module_exit(void)
481{
482 unregister_qdisc(&etf_qdisc_ops);
483}
484module_init(etf_module_init)
485module_exit(etf_module_exit)
486MODULE_LICENSE("GPL");