blob: e5646614e88d83733acf9ccd105885358fe0a12e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
Patrick McHardy41794772007-03-16 01:19:15 -070029#include <linux/hrtimer.h>
Jarek Poplawski25bfcd52008-08-18 20:53:34 -070030#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020032#include <net/net_namespace.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110033#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070034#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <net/pkt_sched.h>
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
38 struct Qdisc *old, struct Qdisc *new);
39static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
40 struct Qdisc *q, unsigned long cl, int event);
41
42/*
43
44 Short review.
45 -------------
46
47 This file consists of two interrelated parts:
48
49 1. queueing disciplines manager frontend.
50 2. traffic classes manager frontend.
51
52 Generally, queueing discipline ("qdisc") is a black box,
53 which is able to enqueue packets and to dequeue them (when
54 device is ready to send something) in order and at times
55 determined by algorithm hidden in it.
56
57 qdisc's are divided to two categories:
58 - "queues", which have no internal structure visible from outside.
59 - "schedulers", which split all the packets to "traffic classes",
60 using "packet classifiers" (look at cls_api.c)
61
62 In turn, classes may have child qdiscs (as rule, queues)
63 attached to them etc. etc. etc.
64
65 The goal of the routines in this file is to translate
66 information supplied by user in the form of handles
67 to more intelligible for kernel form, to make some sanity
68 checks and part of work, which is common to all qdiscs
69 and to provide rtnetlink notifications.
70
71 All real intelligent work is done inside qdisc modules.
72
73
74
75 Every discipline has two major routines: enqueue and dequeue.
76
77 ---dequeue
78
79 dequeue usually returns a skb to send. It is allowed to return NULL,
80 but it does not mean that queue is empty, it just means that
81 discipline does not want to send anything this time.
82 Queue is really empty if q->q.qlen == 0.
83 For complicated disciplines with multiple queues q->q is not
84 real packet queue, but however q->q.qlen must be valid.
85
86 ---enqueue
87
88 enqueue returns 0, if packet was enqueued successfully.
89 If packet (this one or another one) was dropped, it returns
90 not zero error code.
91 NET_XMIT_DROP - this packet dropped
92 Expected action: do not backoff, but wait until queue will clear.
93 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
94 Expected action: backoff or ignore
95 NET_XMIT_POLICED - dropped by police.
96 Expected action: backoff or error to real-time apps.
97
98 Auxiliary routines:
99
100 ---requeue
101
102 requeues once dequeued packet. It is used for non-standard or
David S. Millere65d22e2008-07-08 16:46:01 -0700103 just buggy devices, which can defer output even if netif_queue_stopped()=0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700105 ---peek
106
107 like dequeue but without removing a packet from the queue
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 ---reset
110
111 returns qdisc to initial state: purge all buffers, clear all
112 timers, counters (except for statistics) etc.
113
114 ---init
115
116 initializes newly created qdisc.
117
118 ---destroy
119
120 destroys resources allocated by init and during lifetime of qdisc.
121
122 ---change
123
124 changes qdisc parameters.
125 */
126
127/* Protects list of registered TC modules. It is pure SMP lock. */
128static DEFINE_RWLOCK(qdisc_mod_lock);
129
130
131/************************************************
132 * Queueing disciplines manipulation. *
133 ************************************************/
134
135
136/* The list of all installed queueing disciplines. */
137
138static struct Qdisc_ops *qdisc_base;
139
140/* Register/uregister queueing discipline */
141
142int register_qdisc(struct Qdisc_ops *qops)
143{
144 struct Qdisc_ops *q, **qp;
145 int rc = -EEXIST;
146
147 write_lock(&qdisc_mod_lock);
148 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
149 if (!strcmp(qops->id, q->id))
150 goto out;
151
152 if (qops->enqueue == NULL)
153 qops->enqueue = noop_qdisc_ops.enqueue;
154 if (qops->requeue == NULL)
155 qops->requeue = noop_qdisc_ops.requeue;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700156 if (qops->peek == NULL) {
157 if (qops->dequeue == NULL) {
158 qops->peek = noop_qdisc_ops.peek;
159 } else {
160 rc = -EINVAL;
161 goto out;
162 }
163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 if (qops->dequeue == NULL)
165 qops->dequeue = noop_qdisc_ops.dequeue;
166
167 qops->next = NULL;
168 *qp = qops;
169 rc = 0;
170out:
171 write_unlock(&qdisc_mod_lock);
172 return rc;
173}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800174EXPORT_SYMBOL(register_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176int unregister_qdisc(struct Qdisc_ops *qops)
177{
178 struct Qdisc_ops *q, **qp;
179 int err = -ENOENT;
180
181 write_lock(&qdisc_mod_lock);
182 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
183 if (q == qops)
184 break;
185 if (q) {
186 *qp = q->next;
187 q->next = NULL;
188 err = 0;
189 }
190 write_unlock(&qdisc_mod_lock);
191 return err;
192}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800193EXPORT_SYMBOL(unregister_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195/* We know handle. Find qdisc among all qdisc's attached to device
196 (root qdisc, all its children, children of children etc.)
197 */
198
David S. Miller8123b422008-08-08 23:23:39 -0700199struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
200{
201 struct Qdisc *q;
202
203 if (!(root->flags & TCQ_F_BUILTIN) &&
204 root->handle == handle)
205 return root;
206
207 list_for_each_entry(q, &root->list, list) {
208 if (q->handle == handle)
209 return q;
210 }
211 return NULL;
212}
213
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700214/*
215 * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
216 * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
217 */
218static DEFINE_SPINLOCK(qdisc_list_lock);
219
220static void qdisc_list_add(struct Qdisc *q)
221{
222 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
223 spin_lock_bh(&qdisc_list_lock);
224 list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
225 spin_unlock_bh(&qdisc_list_lock);
226 }
227}
228
229void qdisc_list_del(struct Qdisc *q)
230{
231 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
232 spin_lock_bh(&qdisc_list_lock);
233 list_del(&q->list);
234 spin_unlock_bh(&qdisc_list_lock);
235 }
236}
237EXPORT_SYMBOL(qdisc_list_del);
238
David S. Milleread81cc2008-07-17 00:50:32 -0700239struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
Patrick McHardy43effa12006-11-29 17:35:48 -0800240{
David S. Miller30723672008-07-18 22:50:15 -0700241 unsigned int i;
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700242 struct Qdisc *q;
243
244 spin_lock_bh(&qdisc_list_lock);
Patrick McHardy43effa12006-11-29 17:35:48 -0800245
David S. Miller30723672008-07-18 22:50:15 -0700246 for (i = 0; i < dev->num_tx_queues; i++) {
247 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700248 struct Qdisc *txq_root = txq->qdisc_sleeping;
David S. Miller30723672008-07-18 22:50:15 -0700249
David S. Miller8123b422008-08-08 23:23:39 -0700250 q = qdisc_match_from_root(txq_root, handle);
251 if (q)
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700252 goto unlock;
Patrick McHardy43effa12006-11-29 17:35:48 -0800253 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700254
255 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
256
257unlock:
258 spin_unlock_bh(&qdisc_list_lock);
259
260 return q;
Patrick McHardy43effa12006-11-29 17:35:48 -0800261}
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
264{
265 unsigned long cl;
266 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800267 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269 if (cops == NULL)
270 return NULL;
271 cl = cops->get(p, classid);
272
273 if (cl == 0)
274 return NULL;
275 leaf = cops->leaf(p, cl);
276 cops->put(p, cl);
277 return leaf;
278}
279
280/* Find queueing discipline by name */
281
Patrick McHardy1e904742008-01-22 22:11:17 -0800282static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
284 struct Qdisc_ops *q = NULL;
285
286 if (kind) {
287 read_lock(&qdisc_mod_lock);
288 for (q = qdisc_base; q; q = q->next) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800289 if (nla_strcmp(kind, q->id) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 if (!try_module_get(q->owner))
291 q = NULL;
292 break;
293 }
294 }
295 read_unlock(&qdisc_mod_lock);
296 }
297 return q;
298}
299
300static struct qdisc_rate_table *qdisc_rtab_list;
301
Patrick McHardy1e904742008-01-22 22:11:17 -0800302struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
304 struct qdisc_rate_table *rtab;
305
306 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
307 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
308 rtab->refcnt++;
309 return rtab;
310 }
311 }
312
Patrick McHardy5feb5e12008-01-23 20:35:19 -0800313 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
314 nla_len(tab) != TC_RTAB_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 return NULL;
316
317 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
318 if (rtab) {
319 rtab->rate = *r;
320 rtab->refcnt = 1;
Patrick McHardy1e904742008-01-22 22:11:17 -0800321 memcpy(rtab->data, nla_data(tab), 1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 rtab->next = qdisc_rtab_list;
323 qdisc_rtab_list = rtab;
324 }
325 return rtab;
326}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800327EXPORT_SYMBOL(qdisc_get_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
329void qdisc_put_rtab(struct qdisc_rate_table *tab)
330{
331 struct qdisc_rate_table *rtab, **rtabp;
332
333 if (!tab || --tab->refcnt)
334 return;
335
336 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
337 if (rtab == tab) {
338 *rtabp = rtab->next;
339 kfree(rtab);
340 return;
341 }
342 }
343}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800344EXPORT_SYMBOL(qdisc_put_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700346static LIST_HEAD(qdisc_stab_list);
347static DEFINE_SPINLOCK(qdisc_stab_lock);
348
349static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
350 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
351 [TCA_STAB_DATA] = { .type = NLA_BINARY },
352};
353
354static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
355{
356 struct nlattr *tb[TCA_STAB_MAX + 1];
357 struct qdisc_size_table *stab;
358 struct tc_sizespec *s;
359 unsigned int tsize = 0;
360 u16 *tab = NULL;
361 int err;
362
363 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
364 if (err < 0)
365 return ERR_PTR(err);
366 if (!tb[TCA_STAB_BASE])
367 return ERR_PTR(-EINVAL);
368
369 s = nla_data(tb[TCA_STAB_BASE]);
370
371 if (s->tsize > 0) {
372 if (!tb[TCA_STAB_DATA])
373 return ERR_PTR(-EINVAL);
374 tab = nla_data(tb[TCA_STAB_DATA]);
375 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
376 }
377
378 if (!s || tsize != s->tsize || (!tab && tsize > 0))
379 return ERR_PTR(-EINVAL);
380
David S. Millerf3b96052008-08-18 22:33:05 -0700381 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700382
383 list_for_each_entry(stab, &qdisc_stab_list, list) {
384 if (memcmp(&stab->szopts, s, sizeof(*s)))
385 continue;
386 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
387 continue;
388 stab->refcnt++;
David S. Millerf3b96052008-08-18 22:33:05 -0700389 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700390 return stab;
391 }
392
David S. Millerf3b96052008-08-18 22:33:05 -0700393 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700394
395 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
396 if (!stab)
397 return ERR_PTR(-ENOMEM);
398
399 stab->refcnt = 1;
400 stab->szopts = *s;
401 if (tsize > 0)
402 memcpy(stab->data, tab, tsize * sizeof(u16));
403
David S. Millerf3b96052008-08-18 22:33:05 -0700404 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700405 list_add_tail(&stab->list, &qdisc_stab_list);
David S. Millerf3b96052008-08-18 22:33:05 -0700406 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700407
408 return stab;
409}
410
411void qdisc_put_stab(struct qdisc_size_table *tab)
412{
413 if (!tab)
414 return;
415
David S. Millerf3b96052008-08-18 22:33:05 -0700416 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700417
418 if (--tab->refcnt == 0) {
419 list_del(&tab->list);
420 kfree(tab);
421 }
422
David S. Millerf3b96052008-08-18 22:33:05 -0700423 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700424}
425EXPORT_SYMBOL(qdisc_put_stab);
426
427static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
428{
429 struct nlattr *nest;
430
431 nest = nla_nest_start(skb, TCA_STAB);
432 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
433 nla_nest_end(skb, nest);
434
435 return skb->len;
436
437nla_put_failure:
438 return -1;
439}
440
441void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
442{
443 int pkt_len, slot;
444
445 pkt_len = skb->len + stab->szopts.overhead;
446 if (unlikely(!stab->szopts.tsize))
447 goto out;
448
449 slot = pkt_len + stab->szopts.cell_align;
450 if (unlikely(slot < 0))
451 slot = 0;
452
453 slot >>= stab->szopts.cell_log;
454 if (likely(slot < stab->szopts.tsize))
455 pkt_len = stab->data[slot];
456 else
457 pkt_len = stab->data[stab->szopts.tsize - 1] *
458 (slot / stab->szopts.tsize) +
459 stab->data[slot % stab->szopts.tsize];
460
461 pkt_len <<= stab->szopts.size_log;
462out:
463 if (unlikely(pkt_len < 1))
464 pkt_len = 1;
465 qdisc_skb_cb(skb)->pkt_len = pkt_len;
466}
467EXPORT_SYMBOL(qdisc_calculate_pkt_len);
468
Patrick McHardy41794772007-03-16 01:19:15 -0700469static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
470{
471 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
472 timer);
473
474 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
Stephen Hemminger11274e52007-03-22 12:17:42 -0700475 smp_wmb();
David S. Miller8608db02008-08-18 20:51:18 -0700476 __netif_schedule(qdisc_root(wd->qdisc));
Stephen Hemminger19365022007-03-22 12:18:35 -0700477
Patrick McHardy41794772007-03-16 01:19:15 -0700478 return HRTIMER_NORESTART;
479}
480
481void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
482{
483 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
484 wd->timer.function = qdisc_watchdog;
485 wd->qdisc = qdisc;
486}
487EXPORT_SYMBOL(qdisc_watchdog_init);
488
489void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
490{
491 ktime_t time;
492
Jarek Poplawski2540e052008-08-21 05:11:14 -0700493 if (test_bit(__QDISC_STATE_DEACTIVATED,
494 &qdisc_root_sleeping(wd->qdisc)->state))
495 return;
496
Patrick McHardy41794772007-03-16 01:19:15 -0700497 wd->qdisc->flags |= TCQ_F_THROTTLED;
498 time = ktime_set(0, 0);
499 time = ktime_add_ns(time, PSCHED_US2NS(expires));
500 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
501}
502EXPORT_SYMBOL(qdisc_watchdog_schedule);
503
504void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
505{
506 hrtimer_cancel(&wd->timer);
507 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
508}
509EXPORT_SYMBOL(qdisc_watchdog_cancel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Adrian Bunka94f7792008-07-22 14:20:11 -0700511static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700512{
513 unsigned int size = n * sizeof(struct hlist_head), i;
514 struct hlist_head *h;
515
516 if (size <= PAGE_SIZE)
517 h = kmalloc(size, GFP_KERNEL);
518 else
519 h = (struct hlist_head *)
520 __get_free_pages(GFP_KERNEL, get_order(size));
521
522 if (h != NULL) {
523 for (i = 0; i < n; i++)
524 INIT_HLIST_HEAD(&h[i]);
525 }
526 return h;
527}
528
529static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
530{
531 unsigned int size = n * sizeof(struct hlist_head);
532
533 if (size <= PAGE_SIZE)
534 kfree(h);
535 else
536 free_pages((unsigned long)h, get_order(size));
537}
538
539void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
540{
541 struct Qdisc_class_common *cl;
542 struct hlist_node *n, *next;
543 struct hlist_head *nhash, *ohash;
544 unsigned int nsize, nmask, osize;
545 unsigned int i, h;
546
547 /* Rehash when load factor exceeds 0.75 */
548 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
549 return;
550 nsize = clhash->hashsize * 2;
551 nmask = nsize - 1;
552 nhash = qdisc_class_hash_alloc(nsize);
553 if (nhash == NULL)
554 return;
555
556 ohash = clhash->hash;
557 osize = clhash->hashsize;
558
559 sch_tree_lock(sch);
560 for (i = 0; i < osize; i++) {
561 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
562 h = qdisc_class_hash(cl->classid, nmask);
563 hlist_add_head(&cl->hnode, &nhash[h]);
564 }
565 }
566 clhash->hash = nhash;
567 clhash->hashsize = nsize;
568 clhash->hashmask = nmask;
569 sch_tree_unlock(sch);
570
571 qdisc_class_hash_free(ohash, osize);
572}
573EXPORT_SYMBOL(qdisc_class_hash_grow);
574
575int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
576{
577 unsigned int size = 4;
578
579 clhash->hash = qdisc_class_hash_alloc(size);
580 if (clhash->hash == NULL)
581 return -ENOMEM;
582 clhash->hashsize = size;
583 clhash->hashmask = size - 1;
584 clhash->hashelems = 0;
585 return 0;
586}
587EXPORT_SYMBOL(qdisc_class_hash_init);
588
589void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
590{
591 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
592}
593EXPORT_SYMBOL(qdisc_class_hash_destroy);
594
595void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
596 struct Qdisc_class_common *cl)
597{
598 unsigned int h;
599
600 INIT_HLIST_NODE(&cl->hnode);
601 h = qdisc_class_hash(cl->classid, clhash->hashmask);
602 hlist_add_head(&cl->hnode, &clhash->hash[h]);
603 clhash->hashelems++;
604}
605EXPORT_SYMBOL(qdisc_class_hash_insert);
606
607void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
608 struct Qdisc_class_common *cl)
609{
610 hlist_del(&cl->hnode);
611 clhash->hashelems--;
612}
613EXPORT_SYMBOL(qdisc_class_hash_remove);
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615/* Allocate an unique handle from space managed by kernel */
616
617static u32 qdisc_alloc_handle(struct net_device *dev)
618{
619 int i = 0x10000;
620 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
621
622 do {
623 autohandle += TC_H_MAKE(0x10000U, 0);
624 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
625 autohandle = TC_H_MAKE(0x80000000U, 0);
626 } while (qdisc_lookup(dev, autohandle) && --i > 0);
627
628 return i>0 ? autohandle : 0;
629}
630
David S. Miller99194cf2008-07-17 04:54:10 -0700631/* Attach toplevel qdisc to device queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
David S. Miller99194cf2008-07-17 04:54:10 -0700633static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
634 struct Qdisc *qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
David S. Miller8d50b532008-07-30 02:37:46 -0700636 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
David S. Miller53049972008-07-16 03:00:19 -0700637 spinlock_t *root_lock;
David S. Miller53049972008-07-16 03:00:19 -0700638
Jarek Poplawski666d9bb2008-08-27 02:12:52 -0700639 root_lock = qdisc_lock(oqdisc);
David S. Miller53049972008-07-16 03:00:19 -0700640 spin_lock_bh(root_lock);
641
David S. Miller8d50b532008-07-30 02:37:46 -0700642 /* Prune old scheduler */
643 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
644 qdisc_reset(oqdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
David S. Miller8d50b532008-07-30 02:37:46 -0700646 /* ... and graft new one */
647 if (qdisc == NULL)
648 qdisc = &noop_qdisc;
649 dev_queue->qdisc_sleeping = qdisc;
Jarek Poplawskif7a54c12008-08-27 02:22:07 -0700650 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
David S. Miller53049972008-07-16 03:00:19 -0700652 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return oqdisc;
655}
656
Patrick McHardy43effa12006-11-29 17:35:48 -0800657void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
658{
Eric Dumazet20fea082007-11-14 01:44:41 -0800659 const struct Qdisc_class_ops *cops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800660 unsigned long cl;
661 u32 parentid;
662
663 if (n == 0)
664 return;
665 while ((parentid = sch->parent)) {
Jarek Poplawski066a3b52008-04-14 15:10:42 -0700666 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
667 return;
668
David S. Miller5ce2d482008-07-08 17:06:30 -0700669 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700670 if (sch == NULL) {
671 WARN_ON(parentid != TC_H_ROOT);
672 return;
673 }
Patrick McHardy43effa12006-11-29 17:35:48 -0800674 cops = sch->ops->cl_ops;
675 if (cops->qlen_notify) {
676 cl = cops->get(sch, parentid);
677 cops->qlen_notify(sch, cl);
678 cops->put(sch, cl);
679 }
680 sch->q.qlen -= n;
681 }
682}
683EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
David S. Miller99194cf2008-07-17 04:54:10 -0700685static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
686 struct Qdisc *old, struct Qdisc *new)
687{
688 if (new || old)
689 qdisc_notify(skb, n, clid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
David S. Miller4d8863a2008-08-18 21:03:15 -0700691 if (old)
David S. Miller99194cf2008-07-17 04:54:10 -0700692 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700693}
694
695/* Graft qdisc "new" to class "classid" of qdisc "parent" or
696 * to device "dev".
697 *
698 * When appropriate send a netlink notification using 'skb'
699 * and "n".
700 *
701 * On success, destroy old qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 */
703
704static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
David S. Miller99194cf2008-07-17 04:54:10 -0700705 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
706 struct Qdisc *new, struct Qdisc *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707{
David S. Miller99194cf2008-07-17 04:54:10 -0700708 struct Qdisc *q = old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900711 if (parent == NULL) {
David S. Miller99194cf2008-07-17 04:54:10 -0700712 unsigned int i, num_q, ingress;
713
714 ingress = 0;
715 num_q = dev->num_tx_queues;
David S. Miller8d50b532008-07-30 02:37:46 -0700716 if ((q && q->flags & TCQ_F_INGRESS) ||
717 (new && new->flags & TCQ_F_INGRESS)) {
David S. Miller99194cf2008-07-17 04:54:10 -0700718 num_q = 1;
719 ingress = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 }
David S. Miller99194cf2008-07-17 04:54:10 -0700721
722 if (dev->flags & IFF_UP)
723 dev_deactivate(dev);
724
725 for (i = 0; i < num_q; i++) {
726 struct netdev_queue *dev_queue = &dev->rx_queue;
727
728 if (!ingress)
729 dev_queue = netdev_get_tx_queue(dev, i);
730
David S. Miller8d50b532008-07-30 02:37:46 -0700731 old = dev_graft_qdisc(dev_queue, new);
732 if (new && i > 0)
733 atomic_inc(&new->refcnt);
734
David S. Miller99194cf2008-07-17 04:54:10 -0700735 notify_and_destroy(skb, n, classid, old, new);
736 }
737
738 if (dev->flags & IFF_UP)
739 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 } else {
Eric Dumazet20fea082007-11-14 01:44:41 -0800741 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743 err = -EINVAL;
744
745 if (cops) {
746 unsigned long cl = cops->get(parent, classid);
747 if (cl) {
David S. Miller99194cf2008-07-17 04:54:10 -0700748 err = cops->graft(parent, cl, new, &old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 cops->put(parent, cl);
750 }
751 }
David S. Miller99194cf2008-07-17 04:54:10 -0700752 if (!err)
753 notify_and_destroy(skb, n, classid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 }
755 return err;
756}
757
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700758/* lockdep annotation is needed for ingress; egress gets it only for name */
759static struct lock_class_key qdisc_tx_lock;
760static struct lock_class_key qdisc_rx_lock;
761
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762/*
763 Allocate and initialize new qdisc.
764
765 Parameters are passed via opt.
766 */
767
768static struct Qdisc *
David S. Millerbb949fb2008-07-08 16:55:56 -0700769qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
770 u32 parent, u32 handle, struct nlattr **tca, int *errp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
772 int err;
Patrick McHardy1e904742008-01-22 22:11:17 -0800773 struct nlattr *kind = tca[TCA_KIND];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 struct Qdisc *sch;
775 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700776 struct qdisc_size_table *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 ops = qdisc_lookup_ops(kind);
Johannes Berg95a5afc2008-10-16 15:24:51 -0700779#ifdef CONFIG_MODULES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (ops == NULL && kind != NULL) {
781 char name[IFNAMSIZ];
Patrick McHardy1e904742008-01-22 22:11:17 -0800782 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 /* We dropped the RTNL semaphore in order to
784 * perform the module load. So, even if we
785 * succeeded in loading the module we have to
786 * tell the caller to replay the request. We
787 * indicate this using -EAGAIN.
788 * We replay the request because the device may
789 * go away in the mean time.
790 */
791 rtnl_unlock();
792 request_module("sch_%s", name);
793 rtnl_lock();
794 ops = qdisc_lookup_ops(kind);
795 if (ops != NULL) {
796 /* We will try again qdisc_lookup_ops,
797 * so don't keep a reference.
798 */
799 module_put(ops->owner);
800 err = -EAGAIN;
801 goto err_out;
802 }
803 }
804 }
805#endif
806
Jamal Hadi Salimb9e2cc02006-08-03 16:36:51 -0700807 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 if (ops == NULL)
809 goto err_out;
810
David S. Miller5ce2d482008-07-08 17:06:30 -0700811 sch = qdisc_alloc(dev_queue, ops);
Thomas Graf3d54b822005-07-05 14:15:09 -0700812 if (IS_ERR(sch)) {
813 err = PTR_ERR(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 goto err_out2;
Thomas Graf3d54b822005-07-05 14:15:09 -0700815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700817 sch->parent = parent;
818
Thomas Graf3d54b822005-07-05 14:15:09 -0700819 if (handle == TC_H_INGRESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 sch->flags |= TCQ_F_INGRESS;
Thomas Graf3d54b822005-07-05 14:15:09 -0700821 handle = TC_H_MAKE(TC_H_INGRESS, 0);
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700822 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700823 } else {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700824 if (handle == 0) {
825 handle = qdisc_alloc_handle(dev);
826 err = -ENOMEM;
827 if (handle == 0)
828 goto err_out3;
829 }
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700830 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
832
Thomas Graf3d54b822005-07-05 14:15:09 -0700833 sch->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Patrick McHardy1e904742008-01-22 22:11:17 -0800835 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700836 if (tca[TCA_STAB]) {
837 stab = qdisc_get_stab(tca[TCA_STAB]);
838 if (IS_ERR(stab)) {
839 err = PTR_ERR(stab);
840 goto err_out3;
841 }
842 sch->stab = stab;
843 }
Patrick McHardy1e904742008-01-22 22:11:17 -0800844 if (tca[TCA_RATE]) {
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700845 spinlock_t *root_lock;
846
847 if ((sch->parent != TC_H_ROOT) &&
848 !(sch->flags & TCQ_F_INGRESS))
849 root_lock = qdisc_root_sleeping_lock(sch);
850 else
851 root_lock = qdisc_lock(sch);
852
Thomas Graf023e09a2005-07-05 14:15:53 -0700853 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700854 root_lock, tca[TCA_RATE]);
Thomas Graf023e09a2005-07-05 14:15:53 -0700855 if (err) {
856 /*
857 * Any broken qdiscs that would require
858 * a ops->reset() here? The qdisc was never
859 * in action so it shouldn't be necessary.
860 */
861 if (ops->destroy)
862 ops->destroy(sch);
863 goto err_out3;
864 }
865 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700866
867 qdisc_list_add(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 return sch;
870 }
871err_out3:
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700872 qdisc_put_stab(sch->stab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 dev_put(dev);
Thomas Graf3d54b822005-07-05 14:15:09 -0700874 kfree((char *) sch - sch->padded);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875err_out2:
876 module_put(ops->owner);
877err_out:
878 *errp = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return NULL;
880}
881
Patrick McHardy1e904742008-01-22 22:11:17 -0800882static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700884 struct qdisc_size_table *stab = NULL;
885 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700887 if (tca[TCA_OPTIONS]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 if (sch->ops->change == NULL)
889 return -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -0800890 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 if (err)
892 return err;
893 }
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700894
895 if (tca[TCA_STAB]) {
896 stab = qdisc_get_stab(tca[TCA_STAB]);
897 if (IS_ERR(stab))
898 return PTR_ERR(stab);
899 }
900
901 qdisc_put_stab(sch->stab);
902 sch->stab = stab;
903
Patrick McHardy1e904742008-01-22 22:11:17 -0800904 if (tca[TCA_RATE])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 gen_replace_estimator(&sch->bstats, &sch->rate_est,
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700906 qdisc_root_sleeping_lock(sch),
907 tca[TCA_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 return 0;
909}
910
911struct check_loop_arg
912{
913 struct qdisc_walker w;
914 struct Qdisc *p;
915 int depth;
916};
917
918static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
919
920static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
921{
922 struct check_loop_arg arg;
923
924 if (q->ops->cl_ops == NULL)
925 return 0;
926
927 arg.w.stop = arg.w.skip = arg.w.count = 0;
928 arg.w.fn = check_loop_fn;
929 arg.depth = depth;
930 arg.p = p;
931 q->ops->cl_ops->walk(q, &arg.w);
932 return arg.w.stop ? -ELOOP : 0;
933}
934
935static int
936check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
937{
938 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800939 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 struct check_loop_arg *arg = (struct check_loop_arg *)w;
941
942 leaf = cops->leaf(q, cl);
943 if (leaf) {
944 if (leaf == arg->p || arg->depth > 7)
945 return -ELOOP;
946 return check_loop(leaf, arg->p, arg->depth + 1);
947 }
948 return 0;
949}
950
951/*
952 * Delete/get qdisc.
953 */
954
955static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
956{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900957 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 struct tcmsg *tcm = NLMSG_DATA(n);
Patrick McHardy1e904742008-01-22 22:11:17 -0800959 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 struct net_device *dev;
961 u32 clid = tcm->tcm_parent;
962 struct Qdisc *q = NULL;
963 struct Qdisc *p = NULL;
964 int err;
965
Denis V. Lunevb8542722007-12-01 00:21:31 +1100966 if (net != &init_net)
967 return -EINVAL;
968
Eric W. Biederman881d9662007-09-17 11:56:21 -0700969 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 return -ENODEV;
971
Patrick McHardy1e904742008-01-22 22:11:17 -0800972 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
973 if (err < 0)
974 return err;
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 if (clid) {
977 if (clid != TC_H_ROOT) {
978 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
979 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
980 return -ENOENT;
981 q = qdisc_leaf(p, clid);
982 } else { /* ingress */
David S. Miller8123b422008-08-08 23:23:39 -0700983 q = dev->rx_queue.qdisc_sleeping;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900984 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 } else {
David S. Millere8a04642008-07-17 00:34:19 -0700986 struct netdev_queue *dev_queue;
987 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -0700988 q = dev_queue->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 }
990 if (!q)
991 return -ENOENT;
992
993 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
994 return -EINVAL;
995 } else {
996 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
997 return -ENOENT;
998 }
999
Patrick McHardy1e904742008-01-22 22:11:17 -08001000 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 return -EINVAL;
1002
1003 if (n->nlmsg_type == RTM_DELQDISC) {
1004 if (!clid)
1005 return -EINVAL;
1006 if (q->handle == 0)
1007 return -ENOENT;
David S. Miller99194cf2008-07-17 04:54:10 -07001008 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 } else {
1011 qdisc_notify(skb, n, clid, NULL, q);
1012 }
1013 return 0;
1014}
1015
1016/*
1017 Create/change qdisc.
1018 */
1019
1020static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1021{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001022 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 struct tcmsg *tcm;
Patrick McHardy1e904742008-01-22 22:11:17 -08001024 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 struct net_device *dev;
1026 u32 clid;
1027 struct Qdisc *q, *p;
1028 int err;
1029
Denis V. Lunevb8542722007-12-01 00:21:31 +11001030 if (net != &init_net)
1031 return -EINVAL;
1032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033replay:
1034 /* Reinit, just in case something touches this. */
1035 tcm = NLMSG_DATA(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 clid = tcm->tcm_parent;
1037 q = p = NULL;
1038
Eric W. Biederman881d9662007-09-17 11:56:21 -07001039 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 return -ENODEV;
1041
Patrick McHardy1e904742008-01-22 22:11:17 -08001042 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1043 if (err < 0)
1044 return err;
1045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 if (clid) {
1047 if (clid != TC_H_ROOT) {
1048 if (clid != TC_H_INGRESS) {
1049 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1050 return -ENOENT;
1051 q = qdisc_leaf(p, clid);
1052 } else { /*ingress */
David S. Miller8123b422008-08-08 23:23:39 -07001053 q = dev->rx_queue.qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 }
1055 } else {
David S. Millere8a04642008-07-17 00:34:19 -07001056 struct netdev_queue *dev_queue;
1057 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -07001058 q = dev_queue->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 }
1060
1061 /* It may be default qdisc, ignore it */
1062 if (q && q->handle == 0)
1063 q = NULL;
1064
1065 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1066 if (tcm->tcm_handle) {
1067 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
1068 return -EEXIST;
1069 if (TC_H_MIN(tcm->tcm_handle))
1070 return -EINVAL;
1071 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
1072 goto create_n_graft;
1073 if (n->nlmsg_flags&NLM_F_EXCL)
1074 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001075 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 return -EINVAL;
1077 if (q == p ||
1078 (p && check_loop(q, p, 0)))
1079 return -ELOOP;
1080 atomic_inc(&q->refcnt);
1081 goto graft;
1082 } else {
1083 if (q == NULL)
1084 goto create_n_graft;
1085
1086 /* This magic test requires explanation.
1087 *
1088 * We know, that some child q is already
1089 * attached to this parent and have choice:
1090 * either to change it or to create/graft new one.
1091 *
1092 * 1. We are allowed to create/graft only
1093 * if CREATE and REPLACE flags are set.
1094 *
1095 * 2. If EXCL is set, requestor wanted to say,
1096 * that qdisc tcm_handle is not expected
1097 * to exist, so that we choose create/graft too.
1098 *
1099 * 3. The last case is when no flags are set.
1100 * Alas, it is sort of hole in API, we
1101 * cannot decide what to do unambiguously.
1102 * For now we select create/graft, if
1103 * user gave KIND, which does not match existing.
1104 */
1105 if ((n->nlmsg_flags&NLM_F_CREATE) &&
1106 (n->nlmsg_flags&NLM_F_REPLACE) &&
1107 ((n->nlmsg_flags&NLM_F_EXCL) ||
Patrick McHardy1e904742008-01-22 22:11:17 -08001108 (tca[TCA_KIND] &&
1109 nla_strcmp(tca[TCA_KIND], q->ops->id))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 goto create_n_graft;
1111 }
1112 }
1113 } else {
1114 if (!tcm->tcm_handle)
1115 return -EINVAL;
1116 q = qdisc_lookup(dev, tcm->tcm_handle);
1117 }
1118
1119 /* Change qdisc parameters */
1120 if (q == NULL)
1121 return -ENOENT;
1122 if (n->nlmsg_flags&NLM_F_EXCL)
1123 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001124 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 return -EINVAL;
1126 err = qdisc_change(q, tca);
1127 if (err == 0)
1128 qdisc_notify(skb, n, clid, NULL, q);
1129 return err;
1130
1131create_n_graft:
1132 if (!(n->nlmsg_flags&NLM_F_CREATE))
1133 return -ENOENT;
1134 if (clid == TC_H_INGRESS)
David S. Millerbb949fb2008-07-08 16:55:56 -07001135 q = qdisc_create(dev, &dev->rx_queue,
1136 tcm->tcm_parent, tcm->tcm_parent,
Patrick McHardyffc8fef2007-07-30 17:11:50 -07001137 tca, &err);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001138 else
David S. Millere8a04642008-07-17 00:34:19 -07001139 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
David S. Millerbb949fb2008-07-08 16:55:56 -07001140 tcm->tcm_parent, tcm->tcm_handle,
Patrick McHardyffc8fef2007-07-30 17:11:50 -07001141 tca, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 if (q == NULL) {
1143 if (err == -EAGAIN)
1144 goto replay;
1145 return err;
1146 }
1147
1148graft:
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001149 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1150 if (err) {
1151 if (q)
1152 qdisc_destroy(q);
1153 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 }
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return 0;
1157}
1158
1159static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -07001160 u32 pid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
1162 struct tcmsg *tcm;
1163 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001164 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 struct gnet_dump d;
1166
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -07001167 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 tcm = NLMSG_DATA(nlh);
1169 tcm->tcm_family = AF_UNSPEC;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001170 tcm->tcm__pad1 = 0;
1171 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001172 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 tcm->tcm_parent = clid;
1174 tcm->tcm_handle = q->handle;
1175 tcm->tcm_info = atomic_read(&q->refcnt);
Patrick McHardy57e1c482008-01-23 20:34:28 -08001176 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 if (q->ops->dump && q->ops->dump(q, skb) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001178 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 q->qstats.qlen = q->q.qlen;
1180
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001181 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1182 goto nla_put_failure;
1183
Jarek Poplawski102396a2008-08-29 14:21:52 -07001184 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1185 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001186 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001189 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 gnet_stats_copy_queue(&d, &q->qstats) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001194 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001197 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001198
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001199 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 return skb->len;
1201
1202nlmsg_failure:
Patrick McHardy1e904742008-01-22 22:11:17 -08001203nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001204 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 return -1;
1206}
1207
1208static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1209 u32 clid, struct Qdisc *old, struct Qdisc *new)
1210{
1211 struct sk_buff *skb;
1212 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1213
1214 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1215 if (!skb)
1216 return -ENOBUFS;
1217
1218 if (old && old->handle) {
1219 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1220 goto err_out;
1221 }
1222 if (new) {
1223 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1224 goto err_out;
1225 }
1226
1227 if (skb->len)
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08001228 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
1230err_out:
1231 kfree_skb(skb);
1232 return -EINVAL;
1233}
1234
David S. Miller30723672008-07-18 22:50:15 -07001235static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1236{
1237 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1238}
1239
1240static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1241 struct netlink_callback *cb,
1242 int *q_idx_p, int s_q_idx)
1243{
1244 int ret = 0, q_idx = *q_idx_p;
1245 struct Qdisc *q;
1246
1247 if (!root)
1248 return 0;
1249
1250 q = root;
1251 if (q_idx < s_q_idx) {
1252 q_idx++;
1253 } else {
1254 if (!tc_qdisc_dump_ignore(q) &&
1255 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1256 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1257 goto done;
1258 q_idx++;
1259 }
1260 list_for_each_entry(q, &root->list, list) {
1261 if (q_idx < s_q_idx) {
1262 q_idx++;
1263 continue;
1264 }
1265 if (!tc_qdisc_dump_ignore(q) &&
1266 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1267 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1268 goto done;
1269 q_idx++;
1270 }
1271
1272out:
1273 *q_idx_p = q_idx;
1274 return ret;
1275done:
1276 ret = -1;
1277 goto out;
1278}
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1281{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001282 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 int idx, q_idx;
1284 int s_idx, s_q_idx;
1285 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Denis V. Lunevb8542722007-12-01 00:21:31 +11001287 if (net != &init_net)
1288 return 0;
1289
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 s_idx = cb->args[0];
1291 s_q_idx = q_idx = cb->args[1];
1292 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07001293 idx = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001294 for_each_netdev(&init_net, dev) {
David S. Miller30723672008-07-18 22:50:15 -07001295 struct netdev_queue *dev_queue;
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 if (idx < s_idx)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001298 goto cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 if (idx > s_idx)
1300 s_q_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 q_idx = 0;
David S. Miller30723672008-07-18 22:50:15 -07001302
1303 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Miller827ebd62008-08-07 20:26:40 -07001304 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001305 goto done;
1306
1307 dev_queue = &dev->rx_queue;
David S. Miller827ebd62008-08-07 20:26:40 -07001308 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001309 goto done;
1310
Pavel Emelianov7562f872007-05-03 15:13:45 -07001311cont:
1312 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 }
1314
1315done:
1316 read_unlock(&dev_base_lock);
1317
1318 cb->args[0] = idx;
1319 cb->args[1] = q_idx;
1320
1321 return skb->len;
1322}
1323
1324
1325
1326/************************************************
1327 * Traffic classes manipulation. *
1328 ************************************************/
1329
1330
1331
1332static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1333{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001334 struct net *net = sock_net(skb->sk);
David S. Millerb0e1e642008-07-08 17:42:10 -07001335 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 struct tcmsg *tcm = NLMSG_DATA(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001337 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 struct net_device *dev;
1339 struct Qdisc *q = NULL;
Eric Dumazet20fea082007-11-14 01:44:41 -08001340 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 unsigned long cl = 0;
1342 unsigned long new_cl;
1343 u32 pid = tcm->tcm_parent;
1344 u32 clid = tcm->tcm_handle;
1345 u32 qid = TC_H_MAJ(clid);
1346 int err;
1347
Denis V. Lunevb8542722007-12-01 00:21:31 +11001348 if (net != &init_net)
1349 return -EINVAL;
1350
Eric W. Biederman881d9662007-09-17 11:56:21 -07001351 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return -ENODEV;
1353
Patrick McHardy1e904742008-01-22 22:11:17 -08001354 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1355 if (err < 0)
1356 return err;
1357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 /*
1359 parent == TC_H_UNSPEC - unspecified parent.
1360 parent == TC_H_ROOT - class is root, which has no parent.
1361 parent == X:0 - parent is root class.
1362 parent == X:Y - parent is a node in hierarchy.
1363 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1364
1365 handle == 0:0 - generate handle from kernel pool.
1366 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1367 handle == X:Y - clear.
1368 handle == X:0 - root class.
1369 */
1370
1371 /* Step 1. Determine qdisc handle X:0 */
1372
David S. Millere8a04642008-07-17 00:34:19 -07001373 dev_queue = netdev_get_tx_queue(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (pid != TC_H_ROOT) {
1375 u32 qid1 = TC_H_MAJ(pid);
1376
1377 if (qid && qid1) {
1378 /* If both majors are known, they must be identical. */
1379 if (qid != qid1)
1380 return -EINVAL;
1381 } else if (qid1) {
1382 qid = qid1;
1383 } else if (qid == 0)
David S. Millerb0e1e642008-07-08 17:42:10 -07001384 qid = dev_queue->qdisc_sleeping->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 /* Now qid is genuine qdisc handle consistent
1387 both with parent and child.
1388
1389 TC_H_MAJ(pid) still may be unspecified, complete it now.
1390 */
1391 if (pid)
1392 pid = TC_H_MAKE(qid, pid);
1393 } else {
1394 if (qid == 0)
David S. Millerb0e1e642008-07-08 17:42:10 -07001395 qid = dev_queue->qdisc_sleeping->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
1397
1398 /* OK. Locate qdisc */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001399 if ((q = qdisc_lookup(dev, qid)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 return -ENOENT;
1401
1402 /* An check that it supports classes */
1403 cops = q->ops->cl_ops;
1404 if (cops == NULL)
1405 return -EINVAL;
1406
1407 /* Now try to get class */
1408 if (clid == 0) {
1409 if (pid == TC_H_ROOT)
1410 clid = qid;
1411 } else
1412 clid = TC_H_MAKE(qid, clid);
1413
1414 if (clid)
1415 cl = cops->get(q, clid);
1416
1417 if (cl == 0) {
1418 err = -ENOENT;
1419 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1420 goto out;
1421 } else {
1422 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001423 case RTM_NEWTCLASS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 err = -EEXIST;
1425 if (n->nlmsg_flags&NLM_F_EXCL)
1426 goto out;
1427 break;
1428 case RTM_DELTCLASS:
1429 err = cops->delete(q, cl);
1430 if (err == 0)
1431 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1432 goto out;
1433 case RTM_GETTCLASS:
1434 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1435 goto out;
1436 default:
1437 err = -EINVAL;
1438 goto out;
1439 }
1440 }
1441
1442 new_cl = cl;
1443 err = cops->change(q, clid, pid, tca, &new_cl);
1444 if (err == 0)
1445 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1446
1447out:
1448 if (cl)
1449 cops->put(q, cl);
1450
1451 return err;
1452}
1453
1454
1455static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1456 unsigned long cl,
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -07001457 u32 pid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458{
1459 struct tcmsg *tcm;
1460 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001461 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 struct gnet_dump d;
Eric Dumazet20fea082007-11-14 01:44:41 -08001463 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
Jamal Hadi Salime431b8c2005-06-18 22:55:31 -07001465 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 tcm = NLMSG_DATA(nlh);
1467 tcm->tcm_family = AF_UNSPEC;
David S. Miller5ce2d482008-07-08 17:06:30 -07001468 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 tcm->tcm_parent = q->handle;
1470 tcm->tcm_handle = q->handle;
1471 tcm->tcm_info = 0;
Patrick McHardy57e1c482008-01-23 20:34:28 -08001472 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001474 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Jarek Poplawski102396a2008-08-29 14:21:52 -07001476 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1477 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001478 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001481 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
1483 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001484 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001486 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 return skb->len;
1488
1489nlmsg_failure:
Patrick McHardy1e904742008-01-22 22:11:17 -08001490nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001491 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return -1;
1493}
1494
1495static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1496 struct Qdisc *q, unsigned long cl, int event)
1497{
1498 struct sk_buff *skb;
1499 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1500
1501 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1502 if (!skb)
1503 return -ENOBUFS;
1504
1505 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1506 kfree_skb(skb);
1507 return -EINVAL;
1508 }
1509
Denis V. Lunev97c53ca2007-11-19 22:26:51 -08001510 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511}
1512
1513struct qdisc_dump_args
1514{
1515 struct qdisc_walker w;
1516 struct sk_buff *skb;
1517 struct netlink_callback *cb;
1518};
1519
1520static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1521{
1522 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1523
1524 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1525 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1526}
1527
David S. Miller30723672008-07-18 22:50:15 -07001528static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1529 struct tcmsg *tcm, struct netlink_callback *cb,
1530 int *t_p, int s_t)
1531{
1532 struct qdisc_dump_args arg;
1533
1534 if (tc_qdisc_dump_ignore(q) ||
1535 *t_p < s_t || !q->ops->cl_ops ||
1536 (tcm->tcm_parent &&
1537 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1538 (*t_p)++;
1539 return 0;
1540 }
1541 if (*t_p > s_t)
1542 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1543 arg.w.fn = qdisc_class_dump;
1544 arg.skb = skb;
1545 arg.cb = cb;
1546 arg.w.stop = 0;
1547 arg.w.skip = cb->args[1];
1548 arg.w.count = 0;
1549 q->ops->cl_ops->walk(q, &arg.w);
1550 cb->args[1] = arg.w.count;
1551 if (arg.w.stop)
1552 return -1;
1553 (*t_p)++;
1554 return 0;
1555}
1556
1557static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1558 struct tcmsg *tcm, struct netlink_callback *cb,
1559 int *t_p, int s_t)
1560{
1561 struct Qdisc *q;
1562
1563 if (!root)
1564 return 0;
1565
1566 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1567 return -1;
1568
1569 list_for_each_entry(q, &root->list, list) {
1570 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1571 return -1;
1572 }
1573
1574 return 0;
1575}
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1578{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
David S. Miller30723672008-07-18 22:50:15 -07001580 struct net *net = sock_net(skb->sk);
1581 struct netdev_queue *dev_queue;
1582 struct net_device *dev;
1583 int t, s_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Denis V. Lunevb8542722007-12-01 00:21:31 +11001585 if (net != &init_net)
1586 return 0;
1587
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1589 return 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001590 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 return 0;
1592
1593 s_t = cb->args[0];
1594 t = 0;
1595
David S. Miller30723672008-07-18 22:50:15 -07001596 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Miller8123b422008-08-08 23:23:39 -07001597 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001598 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
David S. Miller30723672008-07-18 22:50:15 -07001600 dev_queue = &dev->rx_queue;
David S. Miller8123b422008-08-08 23:23:39 -07001601 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001602 goto done;
1603
1604done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 cb->args[0] = t;
1606
1607 dev_put(dev);
1608 return skb->len;
1609}
1610
1611/* Main classifier routine: scans classifier chain attached
1612 to this qdisc, (optionally) tests for protocol and asks
1613 specific classifiers.
1614 */
Patrick McHardy73ca4912007-07-15 00:02:31 -07001615int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1616 struct tcf_result *res)
1617{
1618 __be16 protocol = skb->protocol;
1619 int err = 0;
1620
1621 for (; tp; tp = tp->next) {
1622 if ((tp->protocol == protocol ||
1623 tp->protocol == htons(ETH_P_ALL)) &&
1624 (err = tp->classify(skb, tp, res)) >= 0) {
1625#ifdef CONFIG_NET_CLS_ACT
1626 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1627 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1628#endif
1629 return err;
1630 }
1631 }
1632 return -1;
1633}
1634EXPORT_SYMBOL(tc_classify_compat);
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001637 struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638{
1639 int err = 0;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001640 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641#ifdef CONFIG_NET_CLS_ACT
1642 struct tcf_proto *otp = tp;
1643reclassify:
1644#endif
1645 protocol = skb->protocol;
1646
Patrick McHardy73ca4912007-07-15 00:02:31 -07001647 err = tc_classify_compat(skb, tp, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy73ca4912007-07-15 00:02:31 -07001649 if (err == TC_ACT_RECLASSIFY) {
1650 u32 verd = G_TC_VERD(skb->tc_verd);
1651 tp = otp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Patrick McHardy73ca4912007-07-15 00:02:31 -07001653 if (verd++ >= MAX_REC_LOOP) {
1654 printk("rule prio %u protocol %02x reclassify loop, "
1655 "packet dropped\n",
1656 tp->prio&0xffff, ntohs(tp->protocol));
1657 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001659 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1660 goto reclassify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001662#endif
1663 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664}
Patrick McHardy73ca4912007-07-15 00:02:31 -07001665EXPORT_SYMBOL(tc_classify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Patrick McHardya48b5a62007-03-23 11:29:43 -07001667void tcf_destroy(struct tcf_proto *tp)
1668{
1669 tp->ops->destroy(tp);
1670 module_put(tp->ops->owner);
1671 kfree(tp);
1672}
1673
Patrick McHardyff31ab52008-07-01 19:52:38 -07001674void tcf_destroy_chain(struct tcf_proto **fl)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001675{
1676 struct tcf_proto *tp;
1677
Patrick McHardyff31ab52008-07-01 19:52:38 -07001678 while ((tp = *fl) != NULL) {
1679 *fl = tp->next;
Patrick McHardya48b5a62007-03-23 11:29:43 -07001680 tcf_destroy(tp);
1681 }
1682}
1683EXPORT_SYMBOL(tcf_destroy_chain);
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685#ifdef CONFIG_PROC_FS
1686static int psched_show(struct seq_file *seq, void *v)
1687{
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001688 struct timespec ts;
1689
1690 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 seq_printf(seq, "%08x %08x %08x %08x\n",
Patrick McHardy641b9e02007-03-16 01:18:42 -07001692 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
Patrick McHardy514bca32007-03-16 12:34:52 -07001693 1000000,
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001694 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 return 0;
1697}
1698
1699static int psched_open(struct inode *inode, struct file *file)
1700{
1701 return single_open(file, psched_show, PDE(inode)->data);
1702}
1703
Arjan van de Venda7071d2007-02-12 00:55:36 -08001704static const struct file_operations psched_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 .owner = THIS_MODULE,
1706 .open = psched_open,
1707 .read = seq_read,
1708 .llseek = seq_lseek,
1709 .release = single_release,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001710};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711#endif
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713static int __init pktsched_init(void)
1714{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 register_qdisc(&pfifo_qdisc_ops);
1716 register_qdisc(&bfifo_qdisc_ops);
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02001717 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Thomas Grafbe577dd2007-03-22 11:55:50 -07001719 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1720 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1721 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1722 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1723 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1724 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return 0;
1727}
1728
1729subsys_initcall(pktsched_init);