blob: 3671eab91107d168062ab73ebb0640d44f94fc95 [file] [log] [blame]
Eric Dumazetafe4fd02013-08-29 15:49:55 -07001/*
2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3 *
Eric Dumazet86b3bfe2015-01-28 06:06:36 -08004 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
Eric Dumazetafe4fd02013-08-29 15:49:55 -07005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
Simon Horman05e8bb82015-04-02 11:20:23 +090011 * Meant to be mostly used for locally generated traffic :
Eric Dumazetafe4fd02013-08-29 15:49:55 -070012 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
15 *
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
18 *
19 * Burst avoidance (aka pacing) capability :
20 *
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
24 *
25 * enqueue() :
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
30 *
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
35 */
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/jiffies.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/errno.h>
44#include <linux/init.h>
45#include <linux/skbuff.h>
46#include <linux/slab.h>
47#include <linux/rbtree.h>
48#include <linux/hash.h>
Eric Dumazet08f89b92013-08-30 09:46:43 -070049#include <linux/prefetch.h>
Eric Dumazetc3bd8542013-12-15 13:15:25 -080050#include <linux/vmalloc.h>
Eric Dumazetafe4fd02013-08-29 15:49:55 -070051#include <net/netlink.h>
52#include <net/pkt_sched.h>
53#include <net/sock.h>
54#include <net/tcp_states.h>
Eric Dumazet98781962015-02-03 18:31:53 -080055#include <net/tcp.h>
Eric Dumazetafe4fd02013-08-29 15:49:55 -070056
57/*
58 * Per flow structure, dynamically allocated
59 */
60struct fq_flow {
61 struct sk_buff *head; /* list of skbs for this flow : first skb */
62 union {
63 struct sk_buff *tail; /* last skb in the list */
64 unsigned long age; /* jiffies when flow was emptied, for gc */
65 };
Simon Horman05e8bb82015-04-02 11:20:23 +090066 struct rb_node fq_node; /* anchor in fq_root[] trees */
Eric Dumazetafe4fd02013-08-29 15:49:55 -070067 struct sock *sk;
68 int qlen; /* number of packets in flow queue */
69 int credit;
70 u32 socket_hash; /* sk_hash */
71 struct fq_flow *next; /* next pointer in RR lists, or &detached */
72
73 struct rb_node rate_node; /* anchor in q->delayed tree */
74 u64 time_next_packet;
75};
76
77struct fq_flow_head {
78 struct fq_flow *first;
79 struct fq_flow *last;
80};
81
82struct fq_sched_data {
83 struct fq_flow_head new_flows;
84
85 struct fq_flow_head old_flows;
86
87 struct rb_root delayed; /* for rate limited flows */
88 u64 time_next_delayed_flow;
Eric Dumazetfefa5692016-09-22 08:58:55 -070089 unsigned long unthrottle_latency_ns;
Eric Dumazetafe4fd02013-08-29 15:49:55 -070090
91 struct fq_flow internal; /* for non classified or high prio packets */
92 u32 quantum;
93 u32 initial_quantum;
Eric Dumazetf52ed892013-11-15 08:58:14 -080094 u32 flow_refill_delay;
Eric Dumazetafe4fd02013-08-29 15:49:55 -070095 u32 flow_plimit; /* max packets per flow */
Eric Dumazet76a9ebe2018-10-15 09:37:53 -070096 unsigned long flow_max_rate; /* optional max rate per flow */
Eric Dumazet48872c12018-11-11 09:11:31 -080097 u64 ce_threshold;
Eric Dumazet06eb3952015-02-04 21:30:40 -080098 u32 orphan_mask; /* mask for orphaned skb */
Eric Dumazet77879142016-09-19 23:39:11 -040099 u32 low_rate_threshold;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700100 struct rb_root *fq_root;
101 u8 rate_enable;
102 u8 fq_trees_log;
103
104 u32 flows;
105 u32 inactive_flows;
106 u32 throttled_flows;
107
108 u64 stat_gc_flows;
109 u64 stat_internal_packets;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700110 u64 stat_throttled;
Eric Dumazet48872c12018-11-11 09:11:31 -0800111 u64 stat_ce_mark;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700112 u64 stat_flows_plimit;
113 u64 stat_pkts_too_long;
114 u64 stat_allocation_errors;
115 struct qdisc_watchdog watchdog;
116};
117
118/* special value to mark a detached flow (not on old/new list) */
119static struct fq_flow detached, throttled;
120
121static void fq_flow_set_detached(struct fq_flow *f)
122{
123 f->next = &detached;
Eric Dumazetf52ed892013-11-15 08:58:14 -0800124 f->age = jiffies;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700125}
126
127static bool fq_flow_is_detached(const struct fq_flow *f)
128{
129 return f->next == &detached;
130}
131
Eric Dumazet7df40c22018-05-02 10:03:30 -0700132static bool fq_flow_is_throttled(const struct fq_flow *f)
133{
134 return f->next == &throttled;
135}
136
137static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
138{
139 if (head->first)
140 head->last->next = flow;
141 else
142 head->first = flow;
143 head->last = flow;
144 flow->next = NULL;
145}
146
147static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
148{
149 rb_erase(&f->rate_node, &q->delayed);
150 q->throttled_flows--;
151 fq_flow_add_tail(&q->old_flows, f);
152}
153
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700154static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155{
156 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
157
158 while (*p) {
159 struct fq_flow *aux;
160
161 parent = *p;
Geliang Tange1245572016-12-20 22:02:15 +0800162 aux = rb_entry(parent, struct fq_flow, rate_node);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700163 if (f->time_next_packet >= aux->time_next_packet)
164 p = &parent->rb_right;
165 else
166 p = &parent->rb_left;
167 }
168 rb_link_node(&f->rate_node, parent, p);
169 rb_insert_color(&f->rate_node, &q->delayed);
170 q->throttled_flows++;
171 q->stat_throttled++;
172
173 f->next = &throttled;
174 if (q->time_next_delayed_flow > f->time_next_packet)
175 q->time_next_delayed_flow = f->time_next_packet;
176}
177
178
179static struct kmem_cache *fq_flow_cachep __read_mostly;
180
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700181
182/* limit number of collected flows per round */
183#define FQ_GC_MAX 8
184#define FQ_GC_AGE (3*HZ)
185
186static bool fq_gc_candidate(const struct fq_flow *f)
187{
188 return fq_flow_is_detached(f) &&
189 time_after(jiffies, f->age + FQ_GC_AGE);
190}
191
192static void fq_gc(struct fq_sched_data *q,
193 struct rb_root *root,
194 struct sock *sk)
195{
196 struct fq_flow *f, *tofree[FQ_GC_MAX];
197 struct rb_node **p, *parent;
198 int fcnt = 0;
199
200 p = &root->rb_node;
201 parent = NULL;
202 while (*p) {
203 parent = *p;
204
Geliang Tange1245572016-12-20 22:02:15 +0800205 f = rb_entry(parent, struct fq_flow, fq_node);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700206 if (f->sk == sk)
207 break;
208
209 if (fq_gc_candidate(f)) {
210 tofree[fcnt++] = f;
211 if (fcnt == FQ_GC_MAX)
212 break;
213 }
214
215 if (f->sk > sk)
216 p = &parent->rb_right;
217 else
218 p = &parent->rb_left;
219 }
220
221 q->flows -= fcnt;
222 q->inactive_flows -= fcnt;
223 q->stat_gc_flows += fcnt;
224 while (fcnt) {
225 struct fq_flow *f = tofree[--fcnt];
226
227 rb_erase(&f->fq_node, root);
228 kmem_cache_free(fq_flow_cachep, f);
229 }
230}
231
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700232static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
233{
234 struct rb_node **p, *parent;
235 struct sock *sk = skb->sk;
236 struct rb_root *root;
237 struct fq_flow *f;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700238
239 /* warning: no starvation prevention... */
Maciej Żenczykowski2abc2f02013-11-14 08:50:43 -0800240 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700241 return &q->internal;
242
Eric Dumazetca6fb062015-10-02 11:43:35 -0700243 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
Eric Dumazete446f9d2015-10-08 05:01:55 -0700244 * or a listener (SYNCOOKIE mode)
Eric Dumazetca6fb062015-10-02 11:43:35 -0700245 * 1) request sockets are not full blown,
246 * they do not contain sk_pacing_rate
247 * 2) They are not part of a 'flow' yet
248 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
Eric Dumazet06eb3952015-02-04 21:30:40 -0800249 * especially if the listener set SO_MAX_PACING_RATE
Eric Dumazetca6fb062015-10-02 11:43:35 -0700250 * 4) We pretend they are orphaned
Eric Dumazet06eb3952015-02-04 21:30:40 -0800251 */
Eric Dumazete446f9d2015-10-08 05:01:55 -0700252 if (!sk || sk_listener(sk)) {
Eric Dumazet06eb3952015-02-04 21:30:40 -0800253 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
254
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700255 /* By forcing low order bit to 1, we make sure to not
256 * collide with a local flow (socket pointers are word aligned)
257 */
Eric Dumazet06eb3952015-02-04 21:30:40 -0800258 sk = (struct sock *)((hash << 1) | 1UL);
259 skb_orphan(skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700260 }
261
Eric Dumazet29c58472016-11-17 09:48:30 -0800262 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700263
264 if (q->flows >= (2U << q->fq_trees_log) &&
265 q->inactive_flows > q->flows/2)
266 fq_gc(q, root, sk);
267
268 p = &root->rb_node;
269 parent = NULL;
270 while (*p) {
271 parent = *p;
272
Geliang Tange1245572016-12-20 22:02:15 +0800273 f = rb_entry(parent, struct fq_flow, fq_node);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700274 if (f->sk == sk) {
275 /* socket might have been reallocated, so check
276 * if its sk_hash is the same.
277 * It not, we need to refill credit with
278 * initial quantum
279 */
280 if (unlikely(skb->sk &&
281 f->socket_hash != sk->sk_hash)) {
282 f->credit = q->initial_quantum;
283 f->socket_hash = sk->sk_hash;
Eric Dumazet7df40c22018-05-02 10:03:30 -0700284 if (fq_flow_is_throttled(f))
285 fq_flow_unset_throttled(q, f);
Eric Dumazetfc59d5b2013-10-27 16:26:43 -0700286 f->time_next_packet = 0ULL;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700287 }
288 return f;
289 }
290 if (f->sk > sk)
291 p = &parent->rb_right;
292 else
293 p = &parent->rb_left;
294 }
295
296 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
297 if (unlikely(!f)) {
298 q->stat_allocation_errors++;
299 return &q->internal;
300 }
301 fq_flow_set_detached(f);
302 f->sk = sk;
303 if (skb->sk)
304 f->socket_hash = sk->sk_hash;
305 f->credit = q->initial_quantum;
306
307 rb_link_node(&f->fq_node, parent, p);
308 rb_insert_color(&f->fq_node, root);
309
310 q->flows++;
311 q->inactive_flows++;
312 return f;
313}
314
315
316/* remove one skb from head of flow queue */
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700317static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700318{
319 struct sk_buff *skb = flow->head;
320
321 if (skb) {
322 flow->head = skb->next;
David S. Millera8305bf2018-07-29 20:42:53 -0700323 skb_mark_not_on_list(skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700324 flow->qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700325 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700326 sch->q.qlen--;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700327 }
328 return skb;
329}
330
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700331static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
332{
Eric Dumazet90caf672018-09-21 08:51:54 -0700333 struct sk_buff *head = flow->head;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700334
335 skb->next = NULL;
Eric Dumazet90caf672018-09-21 08:51:54 -0700336 if (!head)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700337 flow->head = skb;
Eric Dumazet90caf672018-09-21 08:51:54 -0700338 else
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700339 flow->tail->next = skb;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700340
Eric Dumazet90caf672018-09-21 08:51:54 -0700341 flow->tail = skb;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700342}
343
Eric Dumazet520ac302016-06-21 23:16:49 -0700344static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
345 struct sk_buff **to_free)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700346{
347 struct fq_sched_data *q = qdisc_priv(sch);
348 struct fq_flow *f;
349
350 if (unlikely(sch->q.qlen >= sch->limit))
Eric Dumazet520ac302016-06-21 23:16:49 -0700351 return qdisc_drop(skb, sch, to_free);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700352
353 f = fq_classify(skb, q);
354 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
355 q->stat_flows_plimit++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700356 return qdisc_drop(skb, sch, to_free);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700357 }
358
359 f->qlen++;
John Fastabend25331d62014-09-28 11:53:29 -0700360 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700361 if (fq_flow_is_detached(f)) {
Eric Dumazet218af592017-05-16 04:24:36 -0700362 struct sock *sk = skb->sk;
363
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700364 fq_flow_add_tail(&q->new_flows, f);
Eric Dumazetf52ed892013-11-15 08:58:14 -0800365 if (time_after(jiffies, f->age + q->flow_refill_delay))
366 f->credit = max_t(u32, f->credit, q->quantum);
Eric Dumazet218af592017-05-16 04:24:36 -0700367 if (sk && q->rate_enable) {
368 if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
369 SK_PACING_FQ))
370 smp_store_release(&sk->sk_pacing_status,
371 SK_PACING_FQ);
372 }
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700373 q->inactive_flows--;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700374 }
Eric Dumazetf52ed892013-11-15 08:58:14 -0800375
376 /* Note: this overwrites f->age */
377 flow_queue_add(f, skb);
378
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700379 if (unlikely(f == &q->internal)) {
380 q->stat_internal_packets++;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700381 }
382 sch->q.qlen++;
383
384 return NET_XMIT_SUCCESS;
385}
386
387static void fq_check_throttled(struct fq_sched_data *q, u64 now)
388{
Eric Dumazetfefa5692016-09-22 08:58:55 -0700389 unsigned long sample;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700390 struct rb_node *p;
391
392 if (q->time_next_delayed_flow > now)
393 return;
394
Eric Dumazetfefa5692016-09-22 08:58:55 -0700395 /* Update unthrottle latency EWMA.
396 * This is cheap and can help diagnosing timer/latency problems.
397 */
398 sample = (unsigned long)(now - q->time_next_delayed_flow);
399 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
400 q->unthrottle_latency_ns += sample >> 3;
401
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700402 q->time_next_delayed_flow = ~0ULL;
403 while ((p = rb_first(&q->delayed)) != NULL) {
Geliang Tange1245572016-12-20 22:02:15 +0800404 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700405
406 if (f->time_next_packet > now) {
407 q->time_next_delayed_flow = f->time_next_packet;
408 break;
409 }
Eric Dumazet7df40c22018-05-02 10:03:30 -0700410 fq_flow_unset_throttled(q, f);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700411 }
412}
413
414static struct sk_buff *fq_dequeue(struct Qdisc *sch)
415{
416 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetfb420d52018-09-28 10:28:44 -0700417 u64 now = ktime_get_ns();
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700418 struct fq_flow_head *head;
419 struct sk_buff *skb;
420 struct fq_flow *f;
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700421 unsigned long rate;
422 u32 plen;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700423
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700424 skb = fq_dequeue_head(sch, &q->internal);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700425 if (skb)
426 goto out;
427 fq_check_throttled(q, now);
428begin:
429 head = &q->new_flows;
430 if (!head->first) {
431 head = &q->old_flows;
432 if (!head->first) {
433 if (q->time_next_delayed_flow != ~0ULL)
434 qdisc_watchdog_schedule_ns(&q->watchdog,
Eric Dumazet45f50be2016-06-10 16:41:39 -0700435 q->time_next_delayed_flow);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700436 return NULL;
437 }
438 }
439 f = head->first;
440
441 if (f->credit <= 0) {
442 f->credit += q->quantum;
443 head->first = f->next;
444 fq_flow_add_tail(&q->old_flows, f);
445 goto begin;
446 }
447
Eric Dumazet98781962015-02-03 18:31:53 -0800448 skb = f->head;
Eric Dumazet7baf33b2018-10-15 09:37:55 -0700449 if (skb) {
Eric Dumazetab408b62018-09-21 08:51:52 -0700450 u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp),
451 f->time_next_packet);
452
453 if (now < time_next_packet) {
454 head->first = f->next;
455 f->time_next_packet = time_next_packet;
456 fq_flow_set_throttled(q, f);
457 goto begin;
458 }
Eric Dumazet48872c12018-11-11 09:11:31 -0800459 if (time_next_packet &&
460 (s64)(now - time_next_packet - q->ce_threshold) > 0) {
461 INET_ECN_set_ce(skb);
462 q->stat_ce_mark++;
463 }
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700464 }
465
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700466 skb = fq_dequeue_head(sch, f);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700467 if (!skb) {
468 head->first = f->next;
469 /* force a pass through old_flows to prevent starvation */
470 if ((head == &q->new_flows) && q->old_flows.first) {
471 fq_flow_add_tail(&q->old_flows, f);
472 } else {
473 fq_flow_set_detached(f);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700474 q->inactive_flows++;
475 }
476 goto begin;
477 }
Eric Dumazet08f89b92013-08-30 09:46:43 -0700478 prefetch(&skb->end);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700479 f->credit -= qdisc_pkt_len(skb);
480
Eric Dumazetab408b62018-09-21 08:51:52 -0700481 if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
Eric Dumazet98781962015-02-03 18:31:53 -0800482 goto out;
483
Eric Dumazet7eec4172013-10-08 15:16:00 -0700484 rate = q->flow_max_rate;
Eric Dumazet86b3bfe2015-01-28 06:06:36 -0800485 if (skb->sk)
Eric Dumazet7eec4172013-10-08 15:16:00 -0700486 rate = min(skb->sk->sk_pacing_rate, rate);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700487
Eric Dumazet77879142016-09-19 23:39:11 -0400488 if (rate <= q->low_rate_threshold) {
489 f->credit = 0;
490 plen = qdisc_pkt_len(skb);
491 } else {
492 plen = max(qdisc_pkt_len(skb), q->quantum);
493 if (f->credit > 0)
494 goto out;
495 }
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700496 if (rate != ~0UL) {
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700497 u64 len = (u64)plen * NSEC_PER_SEC;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700498
Eric Dumazet7eec4172013-10-08 15:16:00 -0700499 if (likely(rate))
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700500 len = div64_ul(len, rate);
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700501 /* Since socket rate can change later,
Eric Dumazetced7a042014-11-25 08:57:29 -0800502 * clamp the delay to 1 second.
503 * Really, providers of too big packets should be fixed !
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700504 */
Eric Dumazetced7a042014-11-25 08:57:29 -0800505 if (unlikely(len > NSEC_PER_SEC)) {
506 len = NSEC_PER_SEC;
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700507 q->stat_pkts_too_long++;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700508 }
Eric Dumazetfefa5692016-09-22 08:58:55 -0700509 /* Account for schedule/timers drifts.
510 * f->time_next_packet was set when prior packet was sent,
511 * and current time (@now) can be too late by tens of us.
512 */
513 if (f->time_next_packet)
514 len -= min(len/2, now - f->time_next_packet);
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700515 f->time_next_packet = now + len;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700516 }
517out:
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700518 qdisc_bstats_update(sch, skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700519 return skb;
520}
521
Eric Dumazete14ffdf2016-06-13 20:21:53 -0700522static void fq_flow_purge(struct fq_flow *flow)
523{
524 rtnl_kfree_skbs(flow->head, flow->tail);
525 flow->head = NULL;
526 flow->qlen = 0;
527}
528
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700529static void fq_reset(struct Qdisc *sch)
530{
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700531 struct fq_sched_data *q = qdisc_priv(sch);
532 struct rb_root *root;
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700533 struct rb_node *p;
534 struct fq_flow *f;
535 unsigned int idx;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700536
Eric Dumazete14ffdf2016-06-13 20:21:53 -0700537 sch->q.qlen = 0;
538 sch->qstats.backlog = 0;
539
540 fq_flow_purge(&q->internal);
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700541
542 if (!q->fq_root)
543 return;
544
545 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
546 root = &q->fq_root[idx];
547 while ((p = rb_first(root)) != NULL) {
Geliang Tange1245572016-12-20 22:02:15 +0800548 f = rb_entry(p, struct fq_flow, fq_node);
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700549 rb_erase(p, root);
550
Eric Dumazete14ffdf2016-06-13 20:21:53 -0700551 fq_flow_purge(f);
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700552
553 kmem_cache_free(fq_flow_cachep, f);
554 }
555 }
556 q->new_flows.first = NULL;
557 q->old_flows.first = NULL;
558 q->delayed = RB_ROOT;
559 q->flows = 0;
560 q->inactive_flows = 0;
561 q->throttled_flows = 0;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700562}
563
564static void fq_rehash(struct fq_sched_data *q,
565 struct rb_root *old_array, u32 old_log,
566 struct rb_root *new_array, u32 new_log)
567{
568 struct rb_node *op, **np, *parent;
569 struct rb_root *oroot, *nroot;
570 struct fq_flow *of, *nf;
571 int fcnt = 0;
572 u32 idx;
573
574 for (idx = 0; idx < (1U << old_log); idx++) {
575 oroot = &old_array[idx];
576 while ((op = rb_first(oroot)) != NULL) {
577 rb_erase(op, oroot);
Geliang Tange1245572016-12-20 22:02:15 +0800578 of = rb_entry(op, struct fq_flow, fq_node);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700579 if (fq_gc_candidate(of)) {
580 fcnt++;
581 kmem_cache_free(fq_flow_cachep, of);
582 continue;
583 }
Eric Dumazet29c58472016-11-17 09:48:30 -0800584 nroot = &new_array[hash_ptr(of->sk, new_log)];
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700585
586 np = &nroot->rb_node;
587 parent = NULL;
588 while (*np) {
589 parent = *np;
590
Geliang Tange1245572016-12-20 22:02:15 +0800591 nf = rb_entry(parent, struct fq_flow, fq_node);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700592 BUG_ON(nf->sk == of->sk);
593
594 if (nf->sk > of->sk)
595 np = &parent->rb_right;
596 else
597 np = &parent->rb_left;
598 }
599
600 rb_link_node(&of->fq_node, parent, np);
601 rb_insert_color(&of->fq_node, nroot);
602 }
603 }
604 q->flows -= fcnt;
605 q->inactive_flows -= fcnt;
606 q->stat_gc_flows += fcnt;
607}
608
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800609static void fq_free(void *addr)
610{
WANG Cong4cb28972014-06-02 15:55:22 -0700611 kvfree(addr);
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800612}
613
614static int fq_resize(struct Qdisc *sch, u32 log)
615{
616 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700617 struct rb_root *array;
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800618 void *old_fq_root;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700619 u32 idx;
620
621 if (q->fq_root && log == q->fq_trees_log)
622 return 0;
623
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800624 /* If XPS was setup, we can allocate memory on right NUMA node */
Michal Hockodcda9b02017-07-12 14:36:45 -0700625 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800626 netdev_queue_numa_node_read(sch->dev_queue));
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700627 if (!array)
628 return -ENOMEM;
629
630 for (idx = 0; idx < (1U << log); idx++)
631 array[idx] = RB_ROOT;
632
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800633 sch_tree_lock(sch);
634
635 old_fq_root = q->fq_root;
636 if (old_fq_root)
637 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
638
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700639 q->fq_root = array;
640 q->fq_trees_log = log;
641
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800642 sch_tree_unlock(sch);
643
644 fq_free(old_fq_root);
645
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700646 return 0;
647}
648
649static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
650 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
651 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
652 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
653 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
654 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
655 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
656 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
657 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
Eric Dumazetf52ed892013-11-15 08:58:14 -0800658 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
Eric Dumazet77879142016-09-19 23:39:11 -0400659 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
Eric Dumazet48872c12018-11-11 09:11:31 -0800660 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700661};
662
Alexander Aring20307212017-12-20 12:35:14 -0500663static int fq_change(struct Qdisc *sch, struct nlattr *opt,
664 struct netlink_ext_ack *extack)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700665{
666 struct fq_sched_data *q = qdisc_priv(sch);
667 struct nlattr *tb[TCA_FQ_MAX + 1];
668 int err, drop_count = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800669 unsigned drop_len = 0;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700670 u32 fq_log;
671
672 if (!opt)
673 return -EINVAL;
674
Johannes Bergfceb6432017-04-12 14:34:07 +0200675 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700676 if (err < 0)
677 return err;
678
679 sch_tree_lock(sch);
680
681 fq_log = q->fq_trees_log;
682
683 if (tb[TCA_FQ_BUCKETS_LOG]) {
684 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
685
686 if (nval >= 1 && nval <= ilog2(256*1024))
687 fq_log = nval;
688 else
689 err = -EINVAL;
690 }
691 if (tb[TCA_FQ_PLIMIT])
692 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
693
694 if (tb[TCA_FQ_FLOW_PLIMIT])
695 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
696
Kenneth Klette Jonassen3725a262015-02-03 17:49:18 +0100697 if (tb[TCA_FQ_QUANTUM]) {
698 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
699
700 if (quantum > 0)
701 q->quantum = quantum;
702 else
703 err = -EINVAL;
704 }
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700705
706 if (tb[TCA_FQ_INITIAL_QUANTUM])
Eric Dumazetede869c2013-10-07 12:50:18 -0700707 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700708
709 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
Eric Dumazet65c51892013-11-15 08:57:26 -0800710 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
711 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700712
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700713 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
714 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700715
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700716 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
717 }
Eric Dumazet77879142016-09-19 23:39:11 -0400718 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
719 q->low_rate_threshold =
720 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
721
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700722 if (tb[TCA_FQ_RATE_ENABLE]) {
723 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
724
725 if (enable <= 1)
726 q->rate_enable = enable;
727 else
728 err = -EINVAL;
729 }
730
Eric Dumazetf52ed892013-11-15 08:58:14 -0800731 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
732 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
733
734 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
735 }
736
Eric Dumazet06eb3952015-02-04 21:30:40 -0800737 if (tb[TCA_FQ_ORPHAN_MASK])
738 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
739
Eric Dumazet48872c12018-11-11 09:11:31 -0800740 if (tb[TCA_FQ_CE_THRESHOLD])
741 q->ce_threshold = (u64)NSEC_PER_USEC *
742 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
743
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800744 if (!err) {
745 sch_tree_unlock(sch);
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800746 err = fq_resize(sch, fq_log);
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800747 sch_tree_lock(sch);
748 }
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700749 while (sch->q.qlen > sch->limit) {
750 struct sk_buff *skb = fq_dequeue(sch);
751
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700752 if (!skb)
753 break;
WANG Cong2ccccf52016-02-25 14:55:01 -0800754 drop_len += qdisc_pkt_len(skb);
Eric Dumazete14ffdf2016-06-13 20:21:53 -0700755 rtnl_kfree_skbs(skb, skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700756 drop_count++;
757 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800758 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700759
760 sch_tree_unlock(sch);
761 return err;
762}
763
764static void fq_destroy(struct Qdisc *sch)
765{
766 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700767
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700768 fq_reset(sch);
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800769 fq_free(q->fq_root);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700770 qdisc_watchdog_cancel(&q->watchdog);
771}
772
Alexander Aringe63d7df2017-12-20 12:35:13 -0500773static int fq_init(struct Qdisc *sch, struct nlattr *opt,
774 struct netlink_ext_ack *extack)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700775{
776 struct fq_sched_data *q = qdisc_priv(sch);
777 int err;
778
779 sch->limit = 10000;
780 q->flow_plimit = 100;
781 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
782 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
Eric Dumazetf52ed892013-11-15 08:58:14 -0800783 q->flow_refill_delay = msecs_to_jiffies(40);
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700784 q->flow_max_rate = ~0UL;
Eric Dumazetfefa5692016-09-22 08:58:55 -0700785 q->time_next_delayed_flow = ~0ULL;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700786 q->rate_enable = 1;
787 q->new_flows.first = NULL;
788 q->old_flows.first = NULL;
789 q->delayed = RB_ROOT;
790 q->fq_root = NULL;
791 q->fq_trees_log = ilog2(1024);
Eric Dumazet06eb3952015-02-04 21:30:40 -0800792 q->orphan_mask = 1024 - 1;
Eric Dumazet77879142016-09-19 23:39:11 -0400793 q->low_rate_threshold = 550000 / 8;
Eric Dumazet48872c12018-11-11 09:11:31 -0800794
795 /* Default ce_threshold of 4294 seconds */
796 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
797
Eric Dumazetfb420d52018-09-28 10:28:44 -0700798 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700799
800 if (opt)
Alexander Aring20307212017-12-20 12:35:14 -0500801 err = fq_change(sch, opt, extack);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700802 else
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800803 err = fq_resize(sch, q->fq_trees_log);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700804
805 return err;
806}
807
808static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
809{
810 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazet48872c12018-11-11 09:11:31 -0800811 u64 ce_threshold = q->ce_threshold;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700812 struct nlattr *opts;
813
814 opts = nla_nest_start(skb, TCA_OPTIONS);
815 if (opts == NULL)
816 goto nla_put_failure;
817
Eric Dumazet65c51892013-11-15 08:57:26 -0800818 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
819
Eric Dumazet48872c12018-11-11 09:11:31 -0800820 do_div(ce_threshold, NSEC_PER_USEC);
821
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700822 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
823 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
824 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
825 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
826 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
Eric Dumazet76a9ebe2018-10-15 09:37:53 -0700827 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
828 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
Eric Dumazetf52ed892013-11-15 08:58:14 -0800829 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
830 jiffies_to_usecs(q->flow_refill_delay)) ||
Eric Dumazet06eb3952015-02-04 21:30:40 -0800831 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
Eric Dumazet77879142016-09-19 23:39:11 -0400832 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
833 q->low_rate_threshold) ||
Eric Dumazet48872c12018-11-11 09:11:31 -0800834 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700835 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
836 goto nla_put_failure;
837
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800838 return nla_nest_end(skb, opts);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700839
840nla_put_failure:
841 return -1;
842}
843
844static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
845{
846 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazet695b4ec2016-09-15 16:20:01 -0700847 struct tc_fq_qd_stats st;
848
849 sch_tree_lock(sch);
850
851 st.gc_flows = q->stat_gc_flows;
852 st.highprio_packets = q->stat_internal_packets;
Eric Dumazet90caf672018-09-21 08:51:54 -0700853 st.tcp_retrans = 0;
Eric Dumazet695b4ec2016-09-15 16:20:01 -0700854 st.throttled = q->stat_throttled;
855 st.flows_plimit = q->stat_flows_plimit;
856 st.pkts_too_long = q->stat_pkts_too_long;
857 st.allocation_errors = q->stat_allocation_errors;
Eric Dumazetfb420d52018-09-28 10:28:44 -0700858 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
Eric Dumazet695b4ec2016-09-15 16:20:01 -0700859 st.flows = q->flows;
860 st.inactive_flows = q->inactive_flows;
861 st.throttled_flows = q->throttled_flows;
Eric Dumazetfefa5692016-09-22 08:58:55 -0700862 st.unthrottle_latency_ns = min_t(unsigned long,
863 q->unthrottle_latency_ns, ~0U);
Eric Dumazet48872c12018-11-11 09:11:31 -0800864 st.ce_mark = q->stat_ce_mark;
Eric Dumazet695b4ec2016-09-15 16:20:01 -0700865 sch_tree_unlock(sch);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700866
867 return gnet_stats_copy_app(d, &st, sizeof(st));
868}
869
870static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
871 .id = "fq",
872 .priv_size = sizeof(struct fq_sched_data),
873
874 .enqueue = fq_enqueue,
875 .dequeue = fq_dequeue,
876 .peek = qdisc_peek_dequeued,
877 .init = fq_init,
878 .reset = fq_reset,
879 .destroy = fq_destroy,
880 .change = fq_change,
881 .dump = fq_dump,
882 .dump_stats = fq_dump_stats,
883 .owner = THIS_MODULE,
884};
885
886static int __init fq_module_init(void)
887{
888 int ret;
889
890 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
891 sizeof(struct fq_flow),
892 0, 0, NULL);
893 if (!fq_flow_cachep)
894 return -ENOMEM;
895
896 ret = register_qdisc(&fq_qdisc_ops);
897 if (ret)
898 kmem_cache_destroy(fq_flow_cachep);
899 return ret;
900}
901
902static void __exit fq_module_exit(void)
903{
904 unregister_qdisc(&fq_qdisc_ops);
905 kmem_cache_destroy(fq_flow_cachep);
906}
907
908module_init(fq_module_init)
909module_exit(fq_module_exit)
910MODULE_AUTHOR("Eric Dumazet");
911MODULE_LICENSE("GPL");