blob: 660fc45ee40fc0036996f701d8c1c3184f713640 [file] [log] [blame]
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001// SPDX-License-Identifier: GPL-2.0
2
3/* net/sched/sch_taprio.c Time Aware Priority Scheduler
4 *
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
6 *
7 */
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/skbuff.h>
Jakub Kicinski23bddf62019-04-17 13:51:57 -070016#include <linux/math64.h>
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070017#include <linux/module.h>
18#include <linux/spinlock.h>
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -070019#include <linux/rcupdate.h>
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070020#include <net/netlink.h>
21#include <net/pkt_sched.h>
22#include <net/pkt_cls.h>
23#include <net/sch_generic.h>
Vedang Patel4cfd5772019-06-25 15:07:17 -070024#include <net/sock.h>
Vedang Patel540020662019-06-25 15:07:19 -070025#include <net/tcp.h>
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070026
Leandro Dorileo7b9eba72019-04-08 10:12:17 -070027static LIST_HEAD(taprio_list);
28static DEFINE_SPINLOCK(taprio_list_lock);
29
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070030#define TAPRIO_ALL_GATES_OPEN -1
31
Vedang Patel4cfd5772019-06-25 15:07:17 -070032#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +030033#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -080034#define TAPRIO_FLAGS_INVALID U32_MAX
Vedang Patel4cfd5772019-06-25 15:07:17 -070035
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070036struct sched_entry {
37 struct list_head list;
38
39 /* The instant that this entry "closes" and the next one
40 * should open, the qdisc will make some effort so that no
41 * packet leaves after this time.
42 */
43 ktime_t close_time;
Vedang Patel4cfd5772019-06-25 15:07:17 -070044 ktime_t next_txtime;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070045 atomic_t budget;
46 int index;
47 u32 gate_mask;
48 u32 interval;
49 u8 command;
50};
51
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -070052struct sched_gate_list {
53 struct rcu_head rcu;
54 struct list_head entries;
55 size_t num_entries;
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -070056 ktime_t cycle_close_time;
57 s64 cycle_time;
Vinicius Costa Gomesc25031e2019-04-29 15:48:33 -070058 s64 cycle_time_extension;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -070059 s64 base_time;
60};
61
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070062struct taprio_sched {
63 struct Qdisc **qdiscs;
64 struct Qdisc *root;
Vedang Patel4cfd5772019-06-25 15:07:17 -070065 u32 flags;
Vedang Patel7ede7b02019-06-25 15:07:18 -070066 enum tk_offsets tk_offset;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070067 int clockid;
Leandro Dorileo7b9eba72019-04-08 10:12:17 -070068 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
69 * speeds it's sub-nanoseconds per byte
70 */
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070071
72 /* Protects the update side of the RCU protected current_entry */
73 spinlock_t current_entry_lock;
74 struct sched_entry __rcu *current_entry;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -070075 struct sched_gate_list __rcu *oper_sched;
76 struct sched_gate_list __rcu *admin_sched;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070077 struct hrtimer advance_timer;
Leandro Dorileo7b9eba72019-04-08 10:12:17 -070078 struct list_head taprio_list;
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +030079 struct sk_buff *(*dequeue)(struct Qdisc *sch);
80 struct sk_buff *(*peek)(struct Qdisc *sch);
Vedang Patela5b64702019-07-16 12:52:18 -070081 u32 txtime_delay;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -070082};
83
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +030084struct __tc_taprio_qopt_offload {
85 refcount_t users;
86 struct tc_taprio_qopt_offload offload;
87};
88
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -070089static ktime_t sched_base_time(const struct sched_gate_list *sched)
90{
91 if (!sched)
92 return KTIME_MAX;
93
94 return ns_to_ktime(sched->base_time);
95}
96
Vedang Patel7ede7b02019-06-25 15:07:18 -070097static ktime_t taprio_get_time(struct taprio_sched *q)
98{
99 ktime_t mono = ktime_get();
100
101 switch (q->tk_offset) {
102 case TK_OFFS_MAX:
103 return mono;
104 default:
105 return ktime_mono_to_any(mono, q->tk_offset);
106 }
107
108 return KTIME_MAX;
109}
110
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700111static void taprio_free_sched_cb(struct rcu_head *head)
112{
113 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
114 struct sched_entry *entry, *n;
115
116 if (!sched)
117 return;
118
119 list_for_each_entry_safe(entry, n, &sched->entries, list) {
120 list_del(&entry->list);
121 kfree(entry);
122 }
123
124 kfree(sched);
125}
126
127static void switch_schedules(struct taprio_sched *q,
128 struct sched_gate_list **admin,
129 struct sched_gate_list **oper)
130{
131 rcu_assign_pointer(q->oper_sched, *admin);
132 rcu_assign_pointer(q->admin_sched, NULL);
133
134 if (*oper)
135 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
136
137 *oper = *admin;
138 *admin = NULL;
139}
140
Vedang Patel4cfd5772019-06-25 15:07:17 -0700141/* Get how much time has been already elapsed in the current cycle. */
142static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
143{
144 ktime_t time_since_sched_start;
145 s32 time_elapsed;
146
147 time_since_sched_start = ktime_sub(time, sched->base_time);
148 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
149
150 return time_elapsed;
151}
152
153static ktime_t get_interval_end_time(struct sched_gate_list *sched,
154 struct sched_gate_list *admin,
155 struct sched_entry *entry,
156 ktime_t intv_start)
157{
158 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
159 ktime_t intv_end, cycle_ext_end, cycle_end;
160
161 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
162 intv_end = ktime_add_ns(intv_start, entry->interval);
163 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
164
165 if (ktime_before(intv_end, cycle_end))
166 return intv_end;
167 else if (admin && admin != sched &&
168 ktime_after(admin->base_time, cycle_end) &&
169 ktime_before(admin->base_time, cycle_ext_end))
170 return admin->base_time;
171 else
172 return cycle_end;
173}
174
175static int length_to_duration(struct taprio_sched *q, int len)
176{
177 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
178}
179
180/* Returns the entry corresponding to next available interval. If
181 * validate_interval is set, it only validates whether the timestamp occurs
182 * when the gate corresponding to the skb's traffic class is open.
183 */
184static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
185 struct Qdisc *sch,
186 struct sched_gate_list *sched,
187 struct sched_gate_list *admin,
188 ktime_t time,
189 ktime_t *interval_start,
190 ktime_t *interval_end,
191 bool validate_interval)
192{
193 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
194 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
195 struct sched_entry *entry = NULL, *entry_found = NULL;
196 struct taprio_sched *q = qdisc_priv(sch);
197 struct net_device *dev = qdisc_dev(sch);
198 bool entry_available = false;
199 s32 cycle_elapsed;
200 int tc, n;
201
202 tc = netdev_get_prio_tc_map(dev, skb->priority);
203 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
204
205 *interval_start = 0;
206 *interval_end = 0;
207
208 if (!sched)
209 return NULL;
210
211 cycle = sched->cycle_time;
212 cycle_elapsed = get_cycle_time_elapsed(sched, time);
213 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
214 cycle_end = ktime_add_ns(curr_intv_end, cycle);
215
216 list_for_each_entry(entry, &sched->entries, list) {
217 curr_intv_start = curr_intv_end;
218 curr_intv_end = get_interval_end_time(sched, admin, entry,
219 curr_intv_start);
220
221 if (ktime_after(curr_intv_start, cycle_end))
222 break;
223
224 if (!(entry->gate_mask & BIT(tc)) ||
225 packet_transmit_time > entry->interval)
226 continue;
227
228 txtime = entry->next_txtime;
229
230 if (ktime_before(txtime, time) || validate_interval) {
231 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
232 if ((ktime_before(curr_intv_start, time) &&
233 ktime_before(transmit_end_time, curr_intv_end)) ||
234 (ktime_after(curr_intv_start, time) && !validate_interval)) {
235 entry_found = entry;
236 *interval_start = curr_intv_start;
237 *interval_end = curr_intv_end;
238 break;
239 } else if (!entry_available && !validate_interval) {
240 /* Here, we are just trying to find out the
241 * first available interval in the next cycle.
242 */
243 entry_available = 1;
244 entry_found = entry;
245 *interval_start = ktime_add_ns(curr_intv_start, cycle);
246 *interval_end = ktime_add_ns(curr_intv_end, cycle);
247 }
248 } else if (ktime_before(txtime, earliest_txtime) &&
249 !entry_available) {
250 earliest_txtime = txtime;
251 entry_found = entry;
252 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
253 *interval_start = ktime_add(curr_intv_start, n * cycle);
254 *interval_end = ktime_add(curr_intv_end, n * cycle);
255 }
256 }
257
258 return entry_found;
259}
260
261static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
262{
263 struct taprio_sched *q = qdisc_priv(sch);
264 struct sched_gate_list *sched, *admin;
265 ktime_t interval_start, interval_end;
266 struct sched_entry *entry;
267
268 rcu_read_lock();
269 sched = rcu_dereference(q->oper_sched);
270 admin = rcu_dereference(q->admin_sched);
271
272 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
273 &interval_start, &interval_end, true);
274 rcu_read_unlock();
275
276 return entry;
277}
278
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300279static bool taprio_flags_valid(u32 flags)
280{
281 /* Make sure no other flag bits are set. */
282 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
283 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
284 return false;
285 /* txtime-assist and full offload are mutually exclusive */
286 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
287 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
288 return false;
289 return true;
290}
291
Vedang Patel540020662019-06-25 15:07:19 -0700292/* This returns the tstamp value set by TCP in terms of the set clock. */
293static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
294{
295 unsigned int offset = skb_network_offset(skb);
296 const struct ipv6hdr *ipv6h;
297 const struct iphdr *iph;
298 struct ipv6hdr _ipv6h;
299
300 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
301 if (!ipv6h)
302 return 0;
303
304 if (ipv6h->version == 4) {
305 iph = (struct iphdr *)ipv6h;
306 offset += iph->ihl * 4;
307
308 /* special-case 6in4 tunnelling, as that is a common way to get
309 * v6 connectivity in the home
310 */
311 if (iph->protocol == IPPROTO_IPV6) {
312 ipv6h = skb_header_pointer(skb, offset,
313 sizeof(_ipv6h), &_ipv6h);
314
315 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
316 return 0;
317 } else if (iph->protocol != IPPROTO_TCP) {
318 return 0;
319 }
320 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
321 return 0;
322 }
323
324 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
325}
326
Vedang Patel4cfd5772019-06-25 15:07:17 -0700327/* There are a few scenarios where we will have to modify the txtime from
328 * what is read from next_txtime in sched_entry. They are:
329 * 1. If txtime is in the past,
330 * a. The gate for the traffic class is currently open and packet can be
331 * transmitted before it closes, schedule the packet right away.
332 * b. If the gate corresponding to the traffic class is going to open later
333 * in the cycle, set the txtime of packet to the interval start.
334 * 2. If txtime is in the future, there are packets corresponding to the
335 * current traffic class waiting to be transmitted. So, the following
336 * possibilities exist:
337 * a. We can transmit the packet before the window containing the txtime
338 * closes.
339 * b. The window might close before the transmission can be completed
340 * successfully. So, schedule the packet in the next open window.
341 */
342static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
343{
Vedang Patel540020662019-06-25 15:07:19 -0700344 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
Vedang Patel4cfd5772019-06-25 15:07:17 -0700345 struct taprio_sched *q = qdisc_priv(sch);
346 struct sched_gate_list *sched, *admin;
347 ktime_t minimum_time, now, txtime;
348 int len, packet_transmit_time;
349 struct sched_entry *entry;
350 bool sched_changed;
351
Vedang Patel7ede7b02019-06-25 15:07:18 -0700352 now = taprio_get_time(q);
Vedang Patel4cfd5772019-06-25 15:07:17 -0700353 minimum_time = ktime_add_ns(now, q->txtime_delay);
354
Vedang Patel540020662019-06-25 15:07:19 -0700355 tcp_tstamp = get_tcp_tstamp(q, skb);
356 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
357
Vedang Patel4cfd5772019-06-25 15:07:17 -0700358 rcu_read_lock();
359 admin = rcu_dereference(q->admin_sched);
360 sched = rcu_dereference(q->oper_sched);
361 if (admin && ktime_after(minimum_time, admin->base_time))
362 switch_schedules(q, &admin, &sched);
363
364 /* Until the schedule starts, all the queues are open */
365 if (!sched || ktime_before(minimum_time, sched->base_time)) {
366 txtime = minimum_time;
367 goto done;
368 }
369
370 len = qdisc_pkt_len(skb);
371 packet_transmit_time = length_to_duration(q, len);
372
373 do {
374 sched_changed = 0;
375
376 entry = find_entry_to_transmit(skb, sch, sched, admin,
377 minimum_time,
378 &interval_start, &interval_end,
379 false);
380 if (!entry) {
381 txtime = 0;
382 goto done;
383 }
384
385 txtime = entry->next_txtime;
386 txtime = max_t(ktime_t, txtime, minimum_time);
387 txtime = max_t(ktime_t, txtime, interval_start);
388
389 if (admin && admin != sched &&
390 ktime_after(txtime, admin->base_time)) {
391 sched = admin;
392 sched_changed = 1;
393 continue;
394 }
395
396 transmit_end_time = ktime_add(txtime, packet_transmit_time);
397 minimum_time = transmit_end_time;
398
399 /* Update the txtime of current entry to the next time it's
400 * interval starts.
401 */
402 if (ktime_after(transmit_end_time, interval_end))
403 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
404 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
405
406 entry->next_txtime = transmit_end_time;
407
408done:
409 rcu_read_unlock();
410 return txtime;
411}
412
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700413static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
414 struct sk_buff **to_free)
415{
416 struct taprio_sched *q = qdisc_priv(sch);
417 struct Qdisc *child;
418 int queue;
419
420 queue = skb_get_queue_mapping(skb);
421
422 child = q->qdiscs[queue];
423 if (unlikely(!child))
424 return qdisc_drop(skb, sch, to_free);
425
Vedang Patel4cfd5772019-06-25 15:07:17 -0700426 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
427 if (!is_valid_interval(skb, sch))
428 return qdisc_drop(skb, sch, to_free);
429 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
430 skb->tstamp = get_packet_txtime(skb, sch);
431 if (!skb->tstamp)
432 return qdisc_drop(skb, sch, to_free);
433 }
434
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700435 qdisc_qstats_backlog_inc(sch, skb);
436 sch->q.qlen++;
437
438 return qdisc_enqueue(skb, child, to_free);
439}
440
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300441static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700442{
443 struct taprio_sched *q = qdisc_priv(sch);
444 struct net_device *dev = qdisc_dev(sch);
445 struct sched_entry *entry;
446 struct sk_buff *skb;
447 u32 gate_mask;
448 int i;
449
450 rcu_read_lock();
451 entry = rcu_dereference(q->current_entry);
Andre Guedes2684d1b2019-04-23 12:44:23 -0700452 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700453 rcu_read_unlock();
454
455 if (!gate_mask)
456 return NULL;
457
458 for (i = 0; i < dev->num_tx_queues; i++) {
459 struct Qdisc *child = q->qdiscs[i];
460 int prio;
461 u8 tc;
462
463 if (unlikely(!child))
464 continue;
465
466 skb = child->ops->peek(child);
467 if (!skb)
468 continue;
469
Vedang Patel4cfd5772019-06-25 15:07:17 -0700470 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
471 return skb;
472
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700473 prio = skb->priority;
474 tc = netdev_get_prio_tc_map(dev, prio);
475
476 if (!(gate_mask & BIT(tc)))
Andre Guedes2684d1b2019-04-23 12:44:23 -0700477 continue;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700478
479 return skb;
480 }
481
482 return NULL;
483}
484
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300485static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
486{
487 struct taprio_sched *q = qdisc_priv(sch);
488 struct net_device *dev = qdisc_dev(sch);
489 struct sk_buff *skb;
490 int i;
491
492 for (i = 0; i < dev->num_tx_queues; i++) {
493 struct Qdisc *child = q->qdiscs[i];
494
495 if (unlikely(!child))
496 continue;
497
498 skb = child->ops->peek(child);
499 if (!skb)
500 continue;
501
502 return skb;
503 }
504
505 return NULL;
506}
507
508static struct sk_buff *taprio_peek(struct Qdisc *sch)
509{
510 struct taprio_sched *q = qdisc_priv(sch);
511
512 return q->peek(sch);
513}
514
Jakub Kicinski23bddf62019-04-17 13:51:57 -0700515static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
516{
517 atomic_set(&entry->budget,
518 div64_u64((u64)entry->interval * 1000,
519 atomic64_read(&q->picos_per_byte)));
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700520}
521
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300522static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700523{
524 struct taprio_sched *q = qdisc_priv(sch);
525 struct net_device *dev = qdisc_dev(sch);
Vinicius Costa Gomes8c79f0e2019-04-29 15:48:30 -0700526 struct sk_buff *skb = NULL;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700527 struct sched_entry *entry;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700528 u32 gate_mask;
529 int i;
530
531 rcu_read_lock();
532 entry = rcu_dereference(q->current_entry);
533 /* if there's no entry, it means that the schedule didn't
534 * start yet, so force all gates to be open, this is in
535 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
536 * "AdminGateSates"
537 */
538 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700539
540 if (!gate_mask)
Vinicius Costa Gomes8c79f0e2019-04-29 15:48:30 -0700541 goto done;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700542
543 for (i = 0; i < dev->num_tx_queues; i++) {
544 struct Qdisc *child = q->qdiscs[i];
545 ktime_t guard;
546 int prio;
547 int len;
548 u8 tc;
549
550 if (unlikely(!child))
551 continue;
552
Vedang Patel4cfd5772019-06-25 15:07:17 -0700553 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
554 skb = child->ops->dequeue(child);
555 if (!skb)
556 continue;
557 goto skb_found;
558 }
559
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700560 skb = child->ops->peek(child);
561 if (!skb)
562 continue;
563
564 prio = skb->priority;
565 tc = netdev_get_prio_tc_map(dev, prio);
566
567 if (!(gate_mask & BIT(tc)))
568 continue;
569
570 len = qdisc_pkt_len(skb);
Vedang Patel7ede7b02019-06-25 15:07:18 -0700571 guard = ktime_add_ns(taprio_get_time(q),
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700572 length_to_duration(q, len));
573
574 /* In the case that there's no gate entry, there's no
575 * guard band ...
576 */
577 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
578 ktime_after(guard, entry->close_time))
Andre Guedes6e734c82019-04-23 12:44:24 -0700579 continue;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700580
581 /* ... and no budget. */
582 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
583 atomic_sub_return(len, &entry->budget) < 0)
Andre Guedes6e734c82019-04-23 12:44:24 -0700584 continue;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700585
586 skb = child->ops->dequeue(child);
587 if (unlikely(!skb))
Vinicius Costa Gomes8c79f0e2019-04-29 15:48:30 -0700588 goto done;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700589
Vedang Patel4cfd5772019-06-25 15:07:17 -0700590skb_found:
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700591 qdisc_bstats_update(sch, skb);
592 qdisc_qstats_backlog_dec(sch, skb);
593 sch->q.qlen--;
594
Vinicius Costa Gomes8c79f0e2019-04-29 15:48:30 -0700595 goto done;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700596 }
597
Vinicius Costa Gomes8c79f0e2019-04-29 15:48:30 -0700598done:
599 rcu_read_unlock();
600
601 return skb;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700602}
603
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +0300604static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
605{
606 struct taprio_sched *q = qdisc_priv(sch);
607 struct net_device *dev = qdisc_dev(sch);
608 struct sk_buff *skb;
609 int i;
610
611 for (i = 0; i < dev->num_tx_queues; i++) {
612 struct Qdisc *child = q->qdiscs[i];
613
614 if (unlikely(!child))
615 continue;
616
617 skb = child->ops->dequeue(child);
618 if (unlikely(!skb))
619 continue;
620
621 qdisc_bstats_update(sch, skb);
622 qdisc_qstats_backlog_dec(sch, skb);
623 sch->q.qlen--;
624
625 return skb;
626 }
627
628 return NULL;
629}
630
631static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
632{
633 struct taprio_sched *q = qdisc_priv(sch);
634
635 return q->dequeue(sch);
636}
637
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -0700638static bool should_restart_cycle(const struct sched_gate_list *oper,
639 const struct sched_entry *entry)
640{
641 if (list_is_last(&entry->list, &oper->entries))
642 return true;
643
644 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
645 return true;
646
647 return false;
648}
649
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700650static bool should_change_schedules(const struct sched_gate_list *admin,
651 const struct sched_gate_list *oper,
652 ktime_t close_time)
653{
Vinicius Costa Gomesc25031e2019-04-29 15:48:33 -0700654 ktime_t next_base_time, extension_time;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700655
656 if (!admin)
657 return false;
658
659 next_base_time = sched_base_time(admin);
660
661 /* This is the simple case, the close_time would fall after
662 * the next schedule base_time.
663 */
664 if (ktime_compare(next_base_time, close_time) <= 0)
665 return true;
666
Vinicius Costa Gomesc25031e2019-04-29 15:48:33 -0700667 /* This is the cycle_time_extension case, if the close_time
668 * plus the amount that can be extended would fall after the
669 * next schedule base_time, we can extend the current schedule
670 * for that amount.
671 */
672 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
673
674 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
675 * how precisely the extension should be made. So after
676 * conformance testing, this logic may change.
677 */
678 if (ktime_compare(next_base_time, extension_time) <= 0)
679 return true;
680
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700681 return false;
682}
683
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700684static enum hrtimer_restart advance_sched(struct hrtimer *timer)
685{
686 struct taprio_sched *q = container_of(timer, struct taprio_sched,
687 advance_timer);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700688 struct sched_gate_list *oper, *admin;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700689 struct sched_entry *entry, *next;
690 struct Qdisc *sch = q->root;
691 ktime_t close_time;
692
693 spin_lock(&q->current_entry_lock);
694 entry = rcu_dereference_protected(q->current_entry,
695 lockdep_is_held(&q->current_entry_lock));
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700696 oper = rcu_dereference_protected(q->oper_sched,
697 lockdep_is_held(&q->current_entry_lock));
698 admin = rcu_dereference_protected(q->admin_sched,
699 lockdep_is_held(&q->current_entry_lock));
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700700
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700701 if (!oper)
702 switch_schedules(q, &admin, &oper);
703
704 /* This can happen in two cases: 1. this is the very first run
705 * of this function (i.e. we weren't running any schedule
706 * previously); 2. The previous schedule just ended. The first
707 * entry of all schedules are pre-calculated during the
708 * schedule initialization.
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700709 */
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700710 if (unlikely(!entry || entry->close_time == oper->base_time)) {
711 next = list_first_entry(&oper->entries, struct sched_entry,
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700712 list);
713 close_time = next->close_time;
714 goto first_run;
715 }
716
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -0700717 if (should_restart_cycle(oper, entry)) {
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700718 next = list_first_entry(&oper->entries, struct sched_entry,
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700719 list);
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -0700720 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
721 oper->cycle_time);
722 } else {
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700723 next = list_next_entry(entry, list);
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -0700724 }
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700725
726 close_time = ktime_add_ns(entry->close_time, next->interval);
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -0700727 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700728
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700729 if (should_change_schedules(admin, oper, close_time)) {
730 /* Set things so the next time this runs, the new
731 * schedule runs.
732 */
733 close_time = sched_base_time(admin);
734 switch_schedules(q, &admin, &oper);
735 }
736
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700737 next->close_time = close_time;
Jakub Kicinski23bddf62019-04-17 13:51:57 -0700738 taprio_set_budget(q, next);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700739
740first_run:
741 rcu_assign_pointer(q->current_entry, next);
742 spin_unlock(&q->current_entry_lock);
743
744 hrtimer_set_expires(&q->advance_timer, close_time);
745
746 rcu_read_lock();
747 __netif_schedule(sch);
748 rcu_read_unlock();
749
750 return HRTIMER_RESTART;
751}
752
753static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
754 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
755 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
756 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
757 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
758};
759
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700760static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
761 [TCA_TAPRIO_ATTR_PRIOMAP] = {
762 .len = sizeof(struct tc_mqprio_qopt)
763 },
Vinicius Costa Gomesc25031e2019-04-29 15:48:33 -0700764 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
765 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
766 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
767 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
768 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
769 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
Vinicius Costa Gomes49c684d2020-02-06 13:46:08 -0800770 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700771};
772
773static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
774 struct netlink_ext_ack *extack)
775{
776 u32 interval = 0;
777
778 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
779 entry->command = nla_get_u8(
780 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
781
782 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
783 entry->gate_mask = nla_get_u32(
784 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
785
786 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
787 interval = nla_get_u32(
788 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
789
790 if (interval == 0) {
791 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
792 return -EINVAL;
793 }
794
795 entry->interval = interval;
796
797 return 0;
798}
799
800static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
801 int index, struct netlink_ext_ack *extack)
802{
803 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
804 int err;
805
Johannes Berg8cb08172019-04-26 14:07:28 +0200806 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
807 entry_policy, NULL);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700808 if (err < 0) {
809 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
810 return -EINVAL;
811 }
812
813 entry->index = index;
814
815 return fill_sched_entry(tb, entry, extack);
816}
817
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700818static int parse_sched_list(struct nlattr *list,
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700819 struct sched_gate_list *sched,
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700820 struct netlink_ext_ack *extack)
821{
822 struct nlattr *n;
823 int err, rem;
824 int i = 0;
825
826 if (!list)
827 return -EINVAL;
828
829 nla_for_each_nested(n, list, rem) {
830 struct sched_entry *entry;
831
832 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
833 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
834 continue;
835 }
836
837 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
838 if (!entry) {
839 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
840 return -ENOMEM;
841 }
842
843 err = parse_sched_entry(n, entry, i, extack);
844 if (err < 0) {
845 kfree(entry);
846 return err;
847 }
848
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700849 list_add_tail(&entry->list, &sched->entries);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700850 i++;
851 }
852
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700853 sched->num_entries = i;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700854
855 return i;
856}
857
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700858static int parse_taprio_schedule(struct nlattr **tb,
859 struct sched_gate_list *new,
860 struct netlink_ext_ack *extack)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700861{
862 int err = 0;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700863
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700864 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
865 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
866 return -ENOTSUPP;
867 }
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700868
869 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700870 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700871
Vinicius Costa Gomesc25031e2019-04-29 15:48:33 -0700872 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
873 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
874
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -0700875 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
876 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
877
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700878 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
879 err = parse_sched_list(
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700880 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
881 if (err < 0)
882 return err;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700883
Vedang Patel037be032019-06-25 15:07:15 -0700884 if (!new->cycle_time) {
885 struct sched_entry *entry;
886 ktime_t cycle = 0;
887
888 list_for_each_entry(entry, &new->entries, list)
889 cycle = ktime_add_ns(cycle, entry->interval);
890 new->cycle_time = cycle;
891 }
892
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700893 return 0;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700894}
895
896static int taprio_parse_mqprio_opt(struct net_device *dev,
897 struct tc_mqprio_qopt *qopt,
Vedang Patel4cfd5772019-06-25 15:07:17 -0700898 struct netlink_ext_ack *extack,
899 u32 taprio_flags)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700900{
901 int i, j;
902
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700903 if (!qopt && !dev->num_tc) {
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700904 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
905 return -EINVAL;
906 }
907
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700908 /* If num_tc is already set, it means that the user already
909 * configured the mqprio part
910 */
911 if (dev->num_tc)
912 return 0;
913
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700914 /* Verify num_tc is not out of max range */
915 if (qopt->num_tc > TC_MAX_QUEUE) {
916 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
917 return -EINVAL;
918 }
919
920 /* taprio imposes that traffic classes map 1:n to tx queues */
921 if (qopt->num_tc > dev->num_tx_queues) {
922 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
923 return -EINVAL;
924 }
925
926 /* Verify priority mapping uses valid tcs */
Ivan Khoronzhukb5a0faa2019-11-19 02:23:12 +0200927 for (i = 0; i <= TC_BITMASK; i++) {
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700928 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
929 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
930 return -EINVAL;
931 }
932 }
933
934 for (i = 0; i < qopt->num_tc; i++) {
935 unsigned int last = qopt->offset[i] + qopt->count[i];
936
937 /* Verify the queue count is in tx range being equal to the
938 * real_num_tx_queues indicates the last queue is in use.
939 */
940 if (qopt->offset[i] >= dev->num_tx_queues ||
941 !qopt->count[i] ||
942 last > dev->real_num_tx_queues) {
943 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
944 return -EINVAL;
945 }
946
Vedang Patel4cfd5772019-06-25 15:07:17 -0700947 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
948 continue;
949
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700950 /* Verify that the offset and counts do not overlap */
951 for (j = i + 1; j < qopt->num_tc; j++) {
952 if (last > qopt->offset[j]) {
953 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
954 return -EINVAL;
955 }
956 }
957 }
958
959 return 0;
960}
961
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700962static int taprio_get_start_time(struct Qdisc *sch,
963 struct sched_gate_list *sched,
964 ktime_t *start)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700965{
966 struct taprio_sched *q = qdisc_priv(sch);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700967 ktime_t now, base, cycle;
968 s64 n;
969
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700970 base = sched_base_time(sched);
Vedang Patel7ede7b02019-06-25 15:07:18 -0700971 now = taprio_get_time(q);
Andre Guedes85990992019-04-23 12:44:21 -0700972
973 if (ktime_after(base, now)) {
974 *start = base;
975 return 0;
976 }
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700977
Vedang Patel037be032019-06-25 15:07:15 -0700978 cycle = sched->cycle_time;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700979
Andre Guedes85990992019-04-23 12:44:21 -0700980 /* The qdisc is expected to have at least one sched_entry. Moreover,
981 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
982 * something went really wrong. In that case, we should warn about this
983 * inconsistent state and return error.
984 */
985 if (WARN_ON(!cycle))
986 return -EFAULT;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700987
988 /* Schedule the start time for the beginning of the next
989 * cycle.
990 */
991 n = div64_s64(ktime_sub_ns(now, base), cycle);
Andre Guedes85990992019-04-23 12:44:21 -0700992 *start = ktime_add_ns(base, (n + 1) * cycle);
993 return 0;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700994}
995
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -0700996static void setup_first_close_time(struct taprio_sched *q,
997 struct sched_gate_list *sched, ktime_t base)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700998{
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -0700999 struct sched_entry *first;
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -07001000 ktime_t cycle;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001001
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001002 first = list_first_entry(&sched->entries,
1003 struct sched_entry, list);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001004
Vedang Patel037be032019-06-25 15:07:15 -07001005 cycle = sched->cycle_time;
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -07001006
1007 /* FIXME: find a better place to do this */
1008 sched->cycle_close_time = ktime_add_ns(base, cycle);
1009
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001010 first->close_time = ktime_add_ns(base, first->interval);
Jakub Kicinski23bddf62019-04-17 13:51:57 -07001011 taprio_set_budget(q, first);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001012 rcu_assign_pointer(q->current_entry, NULL);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001013}
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001014
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001015static void taprio_start_sched(struct Qdisc *sch,
1016 ktime_t start, struct sched_gate_list *new)
1017{
1018 struct taprio_sched *q = qdisc_priv(sch);
1019 ktime_t expires;
1020
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001021 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1022 return;
1023
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001024 expires = hrtimer_get_expires(&q->advance_timer);
1025 if (expires == 0)
1026 expires = KTIME_MAX;
1027
1028 /* If the new schedule starts before the next expiration, we
1029 * reprogram it to the earliest one, so we change the admin
1030 * schedule to the operational one at the right time.
1031 */
1032 start = min_t(ktime_t, start, expires);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001033
1034 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1035}
1036
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001037static void taprio_set_picos_per_byte(struct net_device *dev,
1038 struct taprio_sched *q)
1039{
1040 struct ethtool_link_ksettings ecmd;
Vladimir Olteanf04b5142019-08-30 04:07:22 +03001041 int speed = SPEED_10;
1042 int picos_per_byte;
1043 int err;
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001044
Vladimir Olteanf04b5142019-08-30 04:07:22 +03001045 err = __ethtool_get_link_ksettings(dev, &ecmd);
1046 if (err < 0)
1047 goto skip;
1048
Vladimir Oltean9a9251a2019-09-29 02:37:22 +03001049 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
Vladimir Olteanf04b5142019-08-30 04:07:22 +03001050 speed = ecmd.base.speed;
1051
1052skip:
Vladimir Oltean68ce6682019-09-29 02:01:39 +03001053 picos_per_byte = (USEC_PER_SEC * 8) / speed;
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001054
1055 atomic64_set(&q->picos_per_byte, picos_per_byte);
1056 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1057 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1058 ecmd.base.speed);
1059}
1060
1061static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1062 void *ptr)
1063{
1064 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1065 struct net_device *qdev;
1066 struct taprio_sched *q;
1067 bool found = false;
1068
1069 ASSERT_RTNL();
1070
1071 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1072 return NOTIFY_DONE;
1073
1074 spin_lock(&taprio_list_lock);
1075 list_for_each_entry(q, &taprio_list, taprio_list) {
1076 qdev = qdisc_dev(q->root);
1077 if (qdev == dev) {
1078 found = true;
1079 break;
1080 }
1081 }
1082 spin_unlock(&taprio_list_lock);
1083
1084 if (found)
1085 taprio_set_picos_per_byte(dev, q);
1086
1087 return NOTIFY_DONE;
1088}
1089
Vedang Patel4cfd5772019-06-25 15:07:17 -07001090static void setup_txtime(struct taprio_sched *q,
1091 struct sched_gate_list *sched, ktime_t base)
1092{
1093 struct sched_entry *entry;
1094 u32 interval = 0;
1095
1096 list_for_each_entry(entry, &sched->entries, list) {
1097 entry->next_txtime = ktime_add_ns(base, interval);
1098 interval += entry->interval;
1099 }
1100}
1101
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001102static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1103{
1104 size_t size = sizeof(struct tc_taprio_sched_entry) * num_entries +
1105 sizeof(struct __tc_taprio_qopt_offload);
1106 struct __tc_taprio_qopt_offload *__offload;
1107
1108 __offload = kzalloc(size, GFP_KERNEL);
1109 if (!__offload)
1110 return NULL;
1111
1112 refcount_set(&__offload->users, 1);
1113
1114 return &__offload->offload;
1115}
1116
1117struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1118 *offload)
1119{
1120 struct __tc_taprio_qopt_offload *__offload;
1121
1122 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1123 offload);
1124
1125 refcount_inc(&__offload->users);
1126
1127 return offload;
1128}
1129EXPORT_SYMBOL_GPL(taprio_offload_get);
1130
1131void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1132{
1133 struct __tc_taprio_qopt_offload *__offload;
1134
1135 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1136 offload);
1137
1138 if (!refcount_dec_and_test(&__offload->users))
1139 return;
1140
1141 kfree(__offload);
1142}
1143EXPORT_SYMBOL_GPL(taprio_offload_free);
1144
1145/* The function will only serve to keep the pointers to the "oper" and "admin"
1146 * schedules valid in relation to their base times, so when calling dump() the
1147 * users looks at the right schedules.
1148 * When using full offload, the admin configuration is promoted to oper at the
1149 * base_time in the PHC time domain. But because the system time is not
1150 * necessarily in sync with that, we can't just trigger a hrtimer to call
1151 * switch_schedules at the right hardware time.
1152 * At the moment we call this by hand right away from taprio, but in the future
1153 * it will be useful to create a mechanism for drivers to notify taprio of the
1154 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1155 * This is left as TODO.
1156 */
Yi Wangd665c122019-10-22 07:57:42 +08001157static void taprio_offload_config_changed(struct taprio_sched *q)
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001158{
1159 struct sched_gate_list *oper, *admin;
1160
1161 spin_lock(&q->current_entry_lock);
1162
1163 oper = rcu_dereference_protected(q->oper_sched,
1164 lockdep_is_held(&q->current_entry_lock));
1165 admin = rcu_dereference_protected(q->admin_sched,
1166 lockdep_is_held(&q->current_entry_lock));
1167
1168 switch_schedules(q, &admin, &oper);
1169
1170 spin_unlock(&q->current_entry_lock);
1171}
1172
1173static void taprio_sched_to_offload(struct taprio_sched *q,
1174 struct sched_gate_list *sched,
1175 const struct tc_mqprio_qopt *mqprio,
1176 struct tc_taprio_qopt_offload *offload)
1177{
1178 struct sched_entry *entry;
1179 int i = 0;
1180
1181 offload->base_time = sched->base_time;
1182 offload->cycle_time = sched->cycle_time;
1183 offload->cycle_time_extension = sched->cycle_time_extension;
1184
1185 list_for_each_entry(entry, &sched->entries, list) {
1186 struct tc_taprio_sched_entry *e = &offload->entries[i];
1187
1188 e->command = entry->command;
1189 e->interval = entry->interval;
1190 e->gate_mask = entry->gate_mask;
1191 i++;
1192 }
1193
1194 offload->num_entries = i;
1195}
1196
1197static int taprio_enable_offload(struct net_device *dev,
1198 struct tc_mqprio_qopt *mqprio,
1199 struct taprio_sched *q,
1200 struct sched_gate_list *sched,
1201 struct netlink_ext_ack *extack)
1202{
1203 const struct net_device_ops *ops = dev->netdev_ops;
1204 struct tc_taprio_qopt_offload *offload;
1205 int err = 0;
1206
1207 if (!ops->ndo_setup_tc) {
1208 NL_SET_ERR_MSG(extack,
1209 "Device does not support taprio offload");
1210 return -EOPNOTSUPP;
1211 }
1212
1213 offload = taprio_offload_alloc(sched->num_entries);
1214 if (!offload) {
1215 NL_SET_ERR_MSG(extack,
1216 "Not enough memory for enabling offload mode");
1217 return -ENOMEM;
1218 }
1219 offload->enable = 1;
1220 taprio_sched_to_offload(q, sched, mqprio, offload);
1221
1222 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1223 if (err < 0) {
1224 NL_SET_ERR_MSG(extack,
1225 "Device failed to setup taprio offload");
1226 goto done;
1227 }
1228
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001229done:
1230 taprio_offload_free(offload);
1231
1232 return err;
1233}
1234
1235static int taprio_disable_offload(struct net_device *dev,
1236 struct taprio_sched *q,
1237 struct netlink_ext_ack *extack)
1238{
1239 const struct net_device_ops *ops = dev->netdev_ops;
1240 struct tc_taprio_qopt_offload *offload;
1241 int err;
1242
1243 if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
1244 return 0;
1245
1246 if (!ops->ndo_setup_tc)
1247 return -EOPNOTSUPP;
1248
1249 offload = taprio_offload_alloc(0);
1250 if (!offload) {
1251 NL_SET_ERR_MSG(extack,
1252 "Not enough memory to disable offload mode");
1253 return -ENOMEM;
1254 }
1255 offload->enable = 0;
1256
1257 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1258 if (err < 0) {
1259 NL_SET_ERR_MSG(extack,
1260 "Device failed to disable offload");
1261 goto out;
1262 }
1263
1264out:
1265 taprio_offload_free(offload);
1266
1267 return err;
1268}
1269
1270/* If full offload is enabled, the only possible clockid is the net device's
1271 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1272 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1273 * in sync with the specified clockid via a user space daemon such as phc2sys.
1274 * For both software taprio and txtime-assist, the clockid is used for the
1275 * hrtimer that advances the schedule and hence mandatory.
1276 */
1277static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1278 struct netlink_ext_ack *extack)
1279{
1280 struct taprio_sched *q = qdisc_priv(sch);
1281 struct net_device *dev = qdisc_dev(sch);
1282 int err = -EINVAL;
1283
1284 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1285 const struct ethtool_ops *ops = dev->ethtool_ops;
1286 struct ethtool_ts_info info = {
1287 .cmd = ETHTOOL_GET_TS_INFO,
1288 .phc_index = -1,
1289 };
1290
1291 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1292 NL_SET_ERR_MSG(extack,
1293 "The 'clockid' cannot be specified for full offload");
1294 goto out;
1295 }
1296
1297 if (ops && ops->get_ts_info)
1298 err = ops->get_ts_info(dev, &info);
1299
1300 if (err || info.phc_index < 0) {
1301 NL_SET_ERR_MSG(extack,
1302 "Device does not have a PTP clock");
1303 err = -ENOTSUPP;
1304 goto out;
1305 }
1306 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1307 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1308
1309 /* We only support static clockids and we don't allow
1310 * for it to be modified after the first init.
1311 */
1312 if (clockid < 0 ||
1313 (q->clockid != -1 && q->clockid != clockid)) {
1314 NL_SET_ERR_MSG(extack,
1315 "Changing the 'clockid' of a running schedule is not supported");
1316 err = -ENOTSUPP;
1317 goto out;
1318 }
1319
1320 switch (clockid) {
1321 case CLOCK_REALTIME:
1322 q->tk_offset = TK_OFFS_REAL;
1323 break;
1324 case CLOCK_MONOTONIC:
1325 q->tk_offset = TK_OFFS_MAX;
1326 break;
1327 case CLOCK_BOOTTIME:
1328 q->tk_offset = TK_OFFS_BOOT;
1329 break;
1330 case CLOCK_TAI:
1331 q->tk_offset = TK_OFFS_TAI;
1332 break;
1333 default:
1334 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1335 err = -EINVAL;
1336 goto out;
1337 }
1338
1339 q->clockid = clockid;
1340 } else {
1341 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1342 goto out;
1343 }
Vinicius Costa Gomesa9543802019-10-08 16:20:07 -07001344
1345 /* Everything went ok, return success. */
1346 err = 0;
1347
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001348out:
1349 return err;
1350}
1351
Ivan Khoronzhukb5a0faa2019-11-19 02:23:12 +02001352static int taprio_mqprio_cmp(const struct net_device *dev,
1353 const struct tc_mqprio_qopt *mqprio)
1354{
1355 int i;
1356
1357 if (!mqprio || mqprio->num_tc != dev->num_tc)
1358 return -1;
1359
1360 for (i = 0; i < mqprio->num_tc; i++)
1361 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1362 dev->tc_to_txq[i].offset != mqprio->offset[i])
1363 return -1;
1364
1365 for (i = 0; i <= TC_BITMASK; i++)
1366 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1367 return -1;
1368
1369 return 0;
1370}
1371
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001372/* The semantics of the 'flags' argument in relation to 'change()'
1373 * requests, are interpreted following two rules (which are applied in
1374 * this order): (1) an omitted 'flags' argument is interpreted as
1375 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1376 * changed.
1377 */
1378static int taprio_new_flags(const struct nlattr *attr, u32 old,
1379 struct netlink_ext_ack *extack)
1380{
1381 u32 new = 0;
1382
1383 if (attr)
1384 new = nla_get_u32(attr);
1385
1386 if (old != TAPRIO_FLAGS_INVALID && old != new) {
1387 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1388 return -EOPNOTSUPP;
1389 }
1390
1391 if (!taprio_flags_valid(new)) {
1392 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1393 return -EINVAL;
1394 }
1395
1396 return new;
1397}
1398
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001399static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1400 struct netlink_ext_ack *extack)
1401{
1402 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001403 struct sched_gate_list *oper, *admin, *new_admin;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001404 struct taprio_sched *q = qdisc_priv(sch);
1405 struct net_device *dev = qdisc_dev(sch);
1406 struct tc_mqprio_qopt *mqprio = NULL;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001407 unsigned long flags;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001408 ktime_t start;
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001409 int i, err;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001410
Johannes Berg8cb08172019-04-26 14:07:28 +02001411 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1412 taprio_policy, extack);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001413 if (err < 0)
1414 return err;
1415
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001416 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1417 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1418
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001419 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1420 q->flags, extack);
1421 if (err < 0)
1422 return err;
Vedang Patel4cfd5772019-06-25 15:07:17 -07001423
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001424 q->flags = err;
Vedang Patel4cfd5772019-06-25 15:07:17 -07001425
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001426 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001427 if (err < 0)
1428 return err;
1429
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001430 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1431 if (!new_admin) {
1432 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1433 return -ENOMEM;
1434 }
1435 INIT_LIST_HEAD(&new_admin->entries);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001436
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001437 rcu_read_lock();
1438 oper = rcu_dereference(q->oper_sched);
1439 admin = rcu_dereference(q->admin_sched);
1440 rcu_read_unlock();
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001441
Ivan Khoronzhukb5a0faa2019-11-19 02:23:12 +02001442 /* no changes - no new mqprio settings */
1443 if (!taprio_mqprio_cmp(dev, mqprio))
1444 mqprio = NULL;
1445
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001446 if (mqprio && (oper || admin)) {
1447 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1448 err = -ENOTSUPP;
1449 goto free_sched;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001450 }
1451
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001452 err = parse_taprio_schedule(tb, new_admin, extack);
1453 if (err < 0)
1454 goto free_sched;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001455
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001456 if (new_admin->num_entries == 0) {
1457 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1458 err = -EINVAL;
1459 goto free_sched;
1460 }
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001461
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001462 err = taprio_parse_clockid(sch, tb, extack);
1463 if (err < 0)
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001464 goto free_sched;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001465
1466 taprio_set_picos_per_byte(dev, q);
1467
Vinicius Costa Gomes5652e632020-02-06 13:46:06 -08001468 if (mqprio) {
1469 netdev_set_num_tc(dev, mqprio->num_tc);
1470 for (i = 0; i < mqprio->num_tc; i++)
1471 netdev_set_tc_queue(dev, i,
1472 mqprio->count[i],
1473 mqprio->offset[i]);
1474
1475 /* Always use supplied priority mappings */
1476 for (i = 0; i <= TC_BITMASK; i++)
1477 netdev_set_prio_tc_map(dev, i,
1478 mqprio->prio_tc_map[i]);
1479 }
1480
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001481 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001482 err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
1483 else
1484 err = taprio_disable_offload(dev, q, extack);
1485 if (err)
1486 goto free_sched;
1487
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001488 /* Protects against enqueue()/dequeue() */
1489 spin_lock_bh(qdisc_lock(sch));
1490
Vedang Patel4cfd5772019-06-25 15:07:17 -07001491 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1492 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1493 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1494 err = -EINVAL;
1495 goto unlock;
1496 }
1497
Vedang Patela5b64702019-07-16 12:52:18 -07001498 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
Vedang Patel4cfd5772019-06-25 15:07:17 -07001499 }
1500
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001501 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1502 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
Vedang Patel4cfd5772019-06-25 15:07:17 -07001503 !hrtimer_active(&q->advance_timer)) {
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001504 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1505 q->advance_timer.function = advance_sched;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001506 }
1507
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001508 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001509 q->dequeue = taprio_dequeue_offload;
1510 q->peek = taprio_peek_offload;
1511 } else {
1512 /* Be sure to always keep the function pointers
1513 * in a consistent state.
1514 */
1515 q->dequeue = taprio_dequeue_soft;
1516 q->peek = taprio_peek_soft;
Andre Guedes85990992019-04-23 12:44:21 -07001517 }
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001518
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001519 err = taprio_get_start_time(sch, new_admin, &start);
1520 if (err < 0) {
1521 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1522 goto unlock;
1523 }
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001524
Vinicius Costa Gomesbfabd412020-02-06 13:46:10 -08001525 setup_txtime(q, new_admin, start);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001526
Vinicius Costa Gomesbfabd412020-02-06 13:46:10 -08001527 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
Vedang Patel4cfd5772019-06-25 15:07:17 -07001528 if (!oper) {
1529 rcu_assign_pointer(q->oper_sched, new_admin);
1530 err = 0;
1531 new_admin = NULL;
1532 goto unlock;
1533 }
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001534
Vedang Patel4cfd5772019-06-25 15:07:17 -07001535 rcu_assign_pointer(q->admin_sched, new_admin);
1536 if (admin)
1537 call_rcu(&admin->rcu, taprio_free_sched_cb);
1538 } else {
1539 setup_first_close_time(q, new_admin, start);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001540
Vedang Patel4cfd5772019-06-25 15:07:17 -07001541 /* Protects against advance_sched() */
1542 spin_lock_irqsave(&q->current_entry_lock, flags);
1543
1544 taprio_start_sched(sch, start, new_admin);
1545
1546 rcu_assign_pointer(q->admin_sched, new_admin);
1547 if (admin)
1548 call_rcu(&admin->rcu, taprio_free_sched_cb);
1549
1550 spin_unlock_irqrestore(&q->current_entry_lock, flags);
Ivan Khoronzhuk0763b3e2019-11-02 01:28:28 +02001551
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001552 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
Ivan Khoronzhuk0763b3e2019-11-02 01:28:28 +02001553 taprio_offload_config_changed(q);
Vedang Patel4cfd5772019-06-25 15:07:17 -07001554 }
1555
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001556 new_admin = NULL;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001557 err = 0;
1558
1559unlock:
1560 spin_unlock_bh(qdisc_lock(sch));
1561
1562free_sched:
Ivan Khoronzhuk51650d32019-08-07 01:45:40 +03001563 if (new_admin)
1564 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001565
1566 return err;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001567}
1568
1569static void taprio_destroy(struct Qdisc *sch)
1570{
1571 struct taprio_sched *q = qdisc_priv(sch);
1572 struct net_device *dev = qdisc_dev(sch);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001573 unsigned int i;
1574
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001575 spin_lock(&taprio_list_lock);
1576 list_del(&q->taprio_list);
1577 spin_unlock(&taprio_list_lock);
1578
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001579 hrtimer_cancel(&q->advance_timer);
1580
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001581 taprio_disable_offload(dev, q, NULL);
1582
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001583 if (q->qdiscs) {
1584 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
1585 qdisc_put(q->qdiscs[i]);
1586
1587 kfree(q->qdiscs);
1588 }
1589 q->qdiscs = NULL;
1590
Vinicius Costa Gomes7c166802020-02-06 13:46:09 -08001591 netdev_reset_tc(dev);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001592
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001593 if (q->oper_sched)
1594 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1595
1596 if (q->admin_sched)
1597 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001598}
1599
1600static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1601 struct netlink_ext_ack *extack)
1602{
1603 struct taprio_sched *q = qdisc_priv(sch);
1604 struct net_device *dev = qdisc_dev(sch);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001605 int i;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001606
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001607 spin_lock_init(&q->current_entry_lock);
1608
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001609 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001610 q->advance_timer.function = advance_sched;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001611
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001612 q->dequeue = taprio_dequeue_soft;
1613 q->peek = taprio_peek_soft;
1614
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001615 q->root = sch;
1616
1617 /* We only support static clockids. Use an invalid value as default
1618 * and get the valid one on taprio_change().
1619 */
1620 q->clockid = -1;
Vinicius Costa Gomesa9d62272020-02-06 13:46:07 -08001621 q->flags = TAPRIO_FLAGS_INVALID;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001622
Vladimir Olteanefb55222019-08-30 04:07:21 +03001623 spin_lock(&taprio_list_lock);
1624 list_add(&q->taprio_list, &taprio_list);
1625 spin_unlock(&taprio_list_lock);
1626
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001627 if (sch->parent != TC_H_ROOT)
1628 return -EOPNOTSUPP;
1629
1630 if (!netif_is_multiqueue(dev))
1631 return -EOPNOTSUPP;
1632
1633 /* pre-allocate qdisc, attachment can't fail */
1634 q->qdiscs = kcalloc(dev->num_tx_queues,
1635 sizeof(q->qdiscs[0]),
1636 GFP_KERNEL);
1637
1638 if (!q->qdiscs)
1639 return -ENOMEM;
1640
1641 if (!opt)
1642 return -EINVAL;
1643
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001644 for (i = 0; i < dev->num_tx_queues; i++) {
1645 struct netdev_queue *dev_queue;
1646 struct Qdisc *qdisc;
1647
1648 dev_queue = netdev_get_tx_queue(dev, i);
1649 qdisc = qdisc_create_dflt(dev_queue,
1650 &pfifo_qdisc_ops,
1651 TC_H_MAKE(TC_H_MAJ(sch->handle),
1652 TC_H_MIN(i + 1)),
1653 extack);
1654 if (!qdisc)
1655 return -ENOMEM;
1656
1657 if (i < dev->real_num_tx_queues)
1658 qdisc_hash_add(qdisc, false);
1659
1660 q->qdiscs[i] = qdisc;
1661 }
1662
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001663 return taprio_change(sch, opt, extack);
1664}
1665
1666static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1667 unsigned long cl)
1668{
1669 struct net_device *dev = qdisc_dev(sch);
1670 unsigned long ntx = cl - 1;
1671
1672 if (ntx >= dev->num_tx_queues)
1673 return NULL;
1674
1675 return netdev_get_tx_queue(dev, ntx);
1676}
1677
1678static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1679 struct Qdisc *new, struct Qdisc **old,
1680 struct netlink_ext_ack *extack)
1681{
1682 struct taprio_sched *q = qdisc_priv(sch);
1683 struct net_device *dev = qdisc_dev(sch);
1684 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1685
1686 if (!dev_queue)
1687 return -EINVAL;
1688
1689 if (dev->flags & IFF_UP)
1690 dev_deactivate(dev);
1691
1692 *old = q->qdiscs[cl - 1];
1693 q->qdiscs[cl - 1] = new;
1694
1695 if (new)
1696 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1697
1698 if (dev->flags & IFF_UP)
1699 dev_activate(dev);
1700
1701 return 0;
1702}
1703
1704static int dump_entry(struct sk_buff *msg,
1705 const struct sched_entry *entry)
1706{
1707 struct nlattr *item;
1708
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001709 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001710 if (!item)
1711 return -ENOSPC;
1712
1713 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1714 goto nla_put_failure;
1715
1716 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1717 goto nla_put_failure;
1718
1719 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1720 entry->gate_mask))
1721 goto nla_put_failure;
1722
1723 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1724 entry->interval))
1725 goto nla_put_failure;
1726
1727 return nla_nest_end(msg, item);
1728
1729nla_put_failure:
1730 nla_nest_cancel(msg, item);
1731 return -1;
1732}
1733
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001734static int dump_schedule(struct sk_buff *msg,
1735 const struct sched_gate_list *root)
1736{
1737 struct nlattr *entry_list;
1738 struct sched_entry *entry;
1739
1740 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1741 root->base_time, TCA_TAPRIO_PAD))
1742 return -1;
1743
Vinicius Costa Gomes6ca6a662019-04-29 15:48:32 -07001744 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1745 root->cycle_time, TCA_TAPRIO_PAD))
1746 return -1;
1747
Vinicius Costa Gomesc25031e2019-04-29 15:48:33 -07001748 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1749 root->cycle_time_extension, TCA_TAPRIO_PAD))
1750 return -1;
1751
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001752 entry_list = nla_nest_start_noflag(msg,
1753 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1754 if (!entry_list)
1755 goto error_nest;
1756
1757 list_for_each_entry(entry, &root->entries, list) {
1758 if (dump_entry(msg, entry) < 0)
1759 goto error_nest;
1760 }
1761
1762 nla_nest_end(msg, entry_list);
1763 return 0;
1764
1765error_nest:
1766 nla_nest_cancel(msg, entry_list);
1767 return -1;
1768}
1769
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001770static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1771{
1772 struct taprio_sched *q = qdisc_priv(sch);
1773 struct net_device *dev = qdisc_dev(sch);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001774 struct sched_gate_list *oper, *admin;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001775 struct tc_mqprio_qopt opt = { 0 };
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001776 struct nlattr *nest, *sched_nest;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001777 unsigned int i;
1778
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001779 rcu_read_lock();
1780 oper = rcu_dereference(q->oper_sched);
1781 admin = rcu_dereference(q->admin_sched);
1782
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001783 opt.num_tc = netdev_get_num_tc(dev);
1784 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1785
1786 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1787 opt.count[i] = dev->tc_to_txq[i].count;
1788 opt.offset[i] = dev->tc_to_txq[i].offset;
1789 }
1790
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001791 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001792 if (!nest)
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001793 goto start_error;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001794
1795 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1796 goto options_error;
1797
Vinicius Costa Gomes9c66d152019-09-15 04:59:58 +03001798 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1799 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001800 goto options_error;
1801
Vedang Patel4cfd5772019-06-25 15:07:17 -07001802 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1803 goto options_error;
1804
1805 if (q->txtime_delay &&
Vedang Patela5b64702019-07-16 12:52:18 -07001806 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
Vedang Patel4cfd5772019-06-25 15:07:17 -07001807 goto options_error;
1808
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001809 if (oper && dump_schedule(skb, oper))
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001810 goto options_error;
1811
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001812 if (!admin)
1813 goto done;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001814
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001815 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
Colin Ian Kinge4acf422019-05-05 22:50:19 +01001816 if (!sched_nest)
1817 goto options_error;
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001818
1819 if (dump_schedule(skb, admin))
1820 goto admin_error;
1821
1822 nla_nest_end(skb, sched_nest);
1823
1824done:
1825 rcu_read_unlock();
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001826
1827 return nla_nest_end(skb, nest);
1828
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001829admin_error:
1830 nla_nest_cancel(skb, sched_nest);
1831
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001832options_error:
1833 nla_nest_cancel(skb, nest);
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001834
1835start_error:
1836 rcu_read_unlock();
1837 return -ENOSPC;
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001838}
1839
1840static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1841{
1842 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1843
1844 if (!dev_queue)
1845 return NULL;
1846
1847 return dev_queue->qdisc_sleeping;
1848}
1849
1850static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1851{
1852 unsigned int ntx = TC_H_MIN(classid);
1853
1854 if (!taprio_queue_get(sch, ntx))
1855 return 0;
1856 return ntx;
1857}
1858
1859static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1860 struct sk_buff *skb, struct tcmsg *tcm)
1861{
1862 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1863
1864 tcm->tcm_parent = TC_H_ROOT;
1865 tcm->tcm_handle |= TC_H_MIN(cl);
1866 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1867
1868 return 0;
1869}
1870
1871static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1872 struct gnet_dump *d)
1873 __releases(d->lock)
1874 __acquires(d->lock)
1875{
1876 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1877
1878 sch = dev_queue->qdisc_sleeping;
1879 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
Paolo Abeni5dd431b2019-03-28 16:53:12 +01001880 qdisc_qstats_copy(d, sch) < 0)
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001881 return -1;
1882 return 0;
1883}
1884
1885static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1886{
1887 struct net_device *dev = qdisc_dev(sch);
1888 unsigned long ntx;
1889
1890 if (arg->stop)
1891 return;
1892
1893 arg->count = arg->skip;
1894 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1895 if (arg->fn(sch, ntx + 1, arg) < 0) {
1896 arg->stop = 1;
1897 break;
1898 }
1899 arg->count++;
1900 }
1901}
1902
1903static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1904 struct tcmsg *tcm)
1905{
1906 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1907}
1908
1909static const struct Qdisc_class_ops taprio_class_ops = {
1910 .graft = taprio_graft,
1911 .leaf = taprio_leaf,
1912 .find = taprio_find,
1913 .walk = taprio_walk,
1914 .dump = taprio_dump_class,
1915 .dump_stats = taprio_dump_class_stats,
1916 .select_queue = taprio_select_queue,
1917};
1918
1919static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1920 .cl_ops = &taprio_class_ops,
1921 .id = "taprio",
1922 .priv_size = sizeof(struct taprio_sched),
1923 .init = taprio_init,
Vinicius Costa Gomesa3d43c02019-04-29 15:48:31 -07001924 .change = taprio_change,
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001925 .destroy = taprio_destroy,
1926 .peek = taprio_peek,
1927 .dequeue = taprio_dequeue,
1928 .enqueue = taprio_enqueue,
1929 .dump = taprio_dump,
1930 .owner = THIS_MODULE,
1931};
1932
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001933static struct notifier_block taprio_device_notifier = {
1934 .notifier_call = taprio_dev_notifier,
1935};
1936
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001937static int __init taprio_module_init(void)
1938{
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001939 int err = register_netdevice_notifier(&taprio_device_notifier);
1940
1941 if (err)
1942 return err;
1943
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001944 return register_qdisc(&taprio_qdisc_ops);
1945}
1946
1947static void __exit taprio_module_exit(void)
1948{
1949 unregister_qdisc(&taprio_qdisc_ops);
Leandro Dorileo7b9eba72019-04-08 10:12:17 -07001950 unregister_netdevice_notifier(&taprio_device_notifier);
Vinicius Costa Gomes5a781cc2018-09-28 17:59:43 -07001951}
1952
1953module_init(taprio_module_init);
1954module_exit(taprio_module_exit);
1955MODULE_LICENSE("GPL");