blob: a7f7667ae9849ae17fdfe5644b4ae463d40ec7ba [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Graf63d886c2005-07-05 15:29:16 -07002/*
3 * net/sched/sch_blackhole.c Black hole queue
4 *
Thomas Graf63d886c2005-07-05 15:29:16 -07005 * Authors: Thomas Graf <tgraf@suug.ch>
6 *
7 * Note: Quantum tunneling is not supported.
8 */
9
Paul Gortmaker075640e2015-10-07 17:27:45 -040010#include <linux/init.h>
Thomas Graf63d886c2005-07-05 15:29:16 -070011#include <linux/types.h>
12#include <linux/kernel.h>
Thomas Graf63d886c2005-07-05 15:29:16 -070013#include <linux/skbuff.h>
14#include <net/pkt_sched.h>
15
Eric Dumazet520ac302016-06-21 23:16:49 -070016static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
17 struct sk_buff **to_free)
Thomas Graf63d886c2005-07-05 15:29:16 -070018{
Eric Dumazet520ac302016-06-21 23:16:49 -070019 qdisc_drop(skb, sch, to_free);
Konstantin Khlebnikov7e85dc82018-06-15 13:27:31 +030020 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Thomas Graf63d886c2005-07-05 15:29:16 -070021}
22
23static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
24{
25 return NULL;
26}
27
Eric Dumazet20fea082007-11-14 01:44:41 -080028static struct Qdisc_ops blackhole_qdisc_ops __read_mostly = {
Thomas Graf63d886c2005-07-05 15:29:16 -070029 .id = "blackhole",
30 .priv_size = 0,
31 .enqueue = blackhole_enqueue,
32 .dequeue = blackhole_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -070033 .peek = blackhole_dequeue,
Thomas Graf63d886c2005-07-05 15:29:16 -070034 .owner = THIS_MODULE,
35};
36
Paul Gortmaker075640e2015-10-07 17:27:45 -040037static int __init blackhole_init(void)
Thomas Graf63d886c2005-07-05 15:29:16 -070038{
39 return register_qdisc(&blackhole_qdisc_ops);
40}
Paul Gortmaker075640e2015-10-07 17:27:45 -040041device_initcall(blackhole_init)