blob: 1dbece34496e5457e5d0a4758a8f5f04cf107efc [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Pavel Emelyanov52b7c592011-12-09 06:23:51 +00002/*
3 * udp_diag.c Module for monitoring UDP transport protocols sockets.
4 *
5 * Authors: Pavel Emelyanov, <xemul@parallels.com>
Pavel Emelyanov52b7c592011-12-09 06:23:51 +00006 */
7
8
9#include <linux/module.h>
10#include <linux/inet_diag.h>
11#include <linux/udp.h>
12#include <net/udp.h>
13#include <net/udplite.h>
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000014#include <linux/sock_diag.h>
15
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000016static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070017 struct netlink_callback *cb,
18 const struct inet_diag_req_v2 *req,
Lorenzo Colittid545cac2016-09-08 00:42:25 +090019 struct nlattr *bc, bool net_admin)
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000020{
21 if (!inet_diag_bc_sk(bc, sk))
22 return 0;
23
Martin KaFai Lau5682d392020-02-25 15:04:09 -080024 return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
25 net_admin);
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000026}
27
Martin KaFai Lau5682d392020-02-25 15:04:09 -080028static int udp_dump_one(struct udp_table *tbl,
29 struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070030 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000031{
Martin KaFai Lau5682d392020-02-25 15:04:09 -080032 struct sk_buff *in_skb = cb->skb;
Pavel Emelyanova925aa02011-12-09 06:24:06 +000033 int err = -EINVAL;
Eric Dumazetca065d02016-04-01 08:52:13 -070034 struct sock *sk = NULL;
Pavel Emelyanova925aa02011-12-09 06:24:06 +000035 struct sk_buff *rep;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000036 struct net *net = sock_net(in_skb->sk);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000037
Eric Dumazetca065d02016-04-01 08:52:13 -070038 rcu_read_lock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000039 if (req->sdiag_family == AF_INET)
Lorenzo Colitti747569b2018-10-29 09:15:22 +090040 /* src and dst are swapped for historical reasons */
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000041 sk = __udp4_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000042 req->id.idiag_src[0], req->id.idiag_sport,
43 req->id.idiag_dst[0], req->id.idiag_dport,
David Ahernfb74c272017-08-07 08:44:16 -070044 req->id.idiag_if, 0, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000045#if IS_ENABLED(CONFIG_IPV6)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000046 else if (req->sdiag_family == AF_INET6)
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000047 sk = __udp6_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000048 (struct in6_addr *)req->id.idiag_src,
49 req->id.idiag_sport,
50 (struct in6_addr *)req->id.idiag_dst,
51 req->id.idiag_dport,
David Ahern1801b572017-08-07 08:44:20 -070052 req->id.idiag_if, 0, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000053#endif
Reshetova, Elena41c6d652017-06-30 13:08:01 +030054 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
Eric Dumazetca065d02016-04-01 08:52:13 -070055 sk = NULL;
56 rcu_read_unlock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000057 err = -ENOENT;
Ian Morris51456b22015-04-03 09:17:26 +010058 if (!sk)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000059 goto out_nosk;
60
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000061 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000062 if (err)
63 goto out;
64
65 err = -ENOMEM;
Dmitry Yakunin83f73c5b2020-03-05 15:33:12 +030066 rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
67 inet_diag_msg_attrs_size() +
68 nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
Hong zhi guo573ce262013-03-27 06:47:04 +000069 GFP_KERNEL);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000070 if (!rep)
71 goto out;
72
Martin KaFai Lau5682d392020-02-25 15:04:09 -080073 err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0,
74 netlink_net_capable(in_skb, CAP_NET_ADMIN));
Pavel Emelyanova925aa02011-12-09 06:24:06 +000075 if (err < 0) {
76 WARN_ON(err == -EMSGSIZE);
77 kfree_skb(rep);
78 goto out;
79 }
Eric W. Biederman15e47302012-09-07 20:12:54 +000080 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000081 MSG_DONTWAIT);
82 if (err > 0)
83 err = 0;
84out:
85 if (sk)
86 sock_put(sk);
87out_nosk:
88 return err;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000089}
90
Eric Dumazet34160ea2015-03-10 07:15:54 -070091static void udp_dump(struct udp_table *table, struct sk_buff *skb,
92 struct netlink_callback *cb,
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080093 const struct inet_diag_req_v2 *r)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000094{
Lorenzo Colittid545cac2016-09-08 00:42:25 +090095 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000096 struct net *net = sock_net(skb->sk);
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080097 struct inet_diag_dump_data *cb_data;
Eric Dumazetca065d02016-04-01 08:52:13 -070098 int num, s_num, slot, s_slot;
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080099 struct nlattr *bc;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000100
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800101 cb_data = cb->data;
102 bc = cb_data->inet_diag_nla_bc;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000103 s_slot = cb->args[0];
104 num = s_num = cb->args[1];
105
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100106 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000107 struct udp_hslot *hslot = &table->hash[slot];
Eric Dumazetca065d02016-04-01 08:52:13 -0700108 struct sock *sk;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000109
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100110 num = 0;
111
Eric Dumazetca065d02016-04-01 08:52:13 -0700112 if (hlist_empty(&hslot->head))
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000113 continue;
114
115 spin_lock_bh(&hslot->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -0700116 sk_for_each(sk, &hslot->head) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000117 struct inet_sock *inet = inet_sk(sk);
118
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000119 if (!net_eq(sock_net(sk), net))
120 continue;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000121 if (num < s_num)
122 goto next;
123 if (!(r->idiag_states & (1 << sk->sk_state)))
124 goto next;
125 if (r->sdiag_family != AF_UNSPEC &&
126 sk->sk_family != r->sdiag_family)
127 goto next;
128 if (r->id.idiag_sport != inet->inet_sport &&
129 r->id.idiag_sport)
130 goto next;
131 if (r->id.idiag_dport != inet->inet_dport &&
132 r->id.idiag_dport)
133 goto next;
134
Lorenzo Colittid545cac2016-09-08 00:42:25 +0900135 if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000136 spin_unlock_bh(&hslot->lock);
137 goto done;
138 }
139next:
140 num++;
141 }
142 spin_unlock_bh(&hslot->lock);
143 }
144done:
145 cb->args[0] = slot;
146 cb->args[1] = num;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000147}
148
149static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800150 const struct inet_diag_req_v2 *r)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000151{
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800152 udp_dump(&udp_table, skb, cb, r);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000153}
154
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800155static int udp_diag_dump_one(struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700156 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000157{
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800158 return udp_dump_one(&udp_table, cb, req);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000159}
160
Shan Wei62ad6fc2012-04-24 18:15:41 +0000161static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
162 void *info)
163{
Paolo Abeni6c206b22018-06-08 11:35:40 +0200164 r->idiag_rqueue = udp_rqueue_get(sk);
Shan Wei62ad6fc2012-04-24 18:15:41 +0000165 r->idiag_wqueue = sk_wmem_alloc_get(sk);
166}
167
David Ahern5d77dca2016-08-23 21:06:33 -0700168#ifdef CONFIG_INET_DIAG_DESTROY
169static int __udp_diag_destroy(struct sk_buff *in_skb,
170 const struct inet_diag_req_v2 *req,
171 struct udp_table *tbl)
172{
173 struct net *net = sock_net(in_skb->sk);
174 struct sock *sk;
175 int err;
176
177 rcu_read_lock();
178
179 if (req->sdiag_family == AF_INET)
180 sk = __udp4_lib_lookup(net,
181 req->id.idiag_dst[0], req->id.idiag_dport,
182 req->id.idiag_src[0], req->id.idiag_sport,
David Ahernfb74c272017-08-07 08:44:16 -0700183 req->id.idiag_if, 0, tbl, NULL);
David Ahern5d77dca2016-08-23 21:06:33 -0700184#if IS_ENABLED(CONFIG_IPV6)
185 else if (req->sdiag_family == AF_INET6) {
186 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
187 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
188 sk = __udp4_lib_lookup(net,
Lorenzo Colittif95bf342016-09-07 13:38:35 +0900189 req->id.idiag_dst[3], req->id.idiag_dport,
190 req->id.idiag_src[3], req->id.idiag_sport,
David Ahernfb74c272017-08-07 08:44:16 -0700191 req->id.idiag_if, 0, tbl, NULL);
David Ahern5d77dca2016-08-23 21:06:33 -0700192
193 else
194 sk = __udp6_lib_lookup(net,
195 (struct in6_addr *)req->id.idiag_dst,
196 req->id.idiag_dport,
197 (struct in6_addr *)req->id.idiag_src,
198 req->id.idiag_sport,
David Ahern1801b572017-08-07 08:44:20 -0700199 req->id.idiag_if, 0, tbl, NULL);
David Ahern5d77dca2016-08-23 21:06:33 -0700200 }
201#endif
202 else {
203 rcu_read_unlock();
204 return -EINVAL;
205 }
206
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300207 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
David Ahern5d77dca2016-08-23 21:06:33 -0700208 sk = NULL;
209
210 rcu_read_unlock();
211
212 if (!sk)
213 return -ENOENT;
214
215 if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
216 sock_put(sk);
217 return -ENOENT;
218 }
219
220 err = sock_diag_destroy(sk, ECONNABORTED);
221
222 sock_put(sk);
223
224 return err;
225}
226
227static int udp_diag_destroy(struct sk_buff *in_skb,
228 const struct inet_diag_req_v2 *req)
229{
230 return __udp_diag_destroy(in_skb, req, &udp_table);
231}
232
233static int udplite_diag_destroy(struct sk_buff *in_skb,
234 const struct inet_diag_req_v2 *req)
235{
236 return __udp_diag_destroy(in_skb, req, &udplite_table);
237}
238
239#endif
240
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000241static const struct inet_diag_handler udp_diag_handler = {
242 .dump = udp_diag_dump,
243 .dump_one = udp_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000244 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000245 .idiag_type = IPPROTO_UDP,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400246 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700247#ifdef CONFIG_INET_DIAG_DESTROY
248 .destroy = udp_diag_destroy,
249#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000250};
251
252static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800253 const struct inet_diag_req_v2 *r)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000254{
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800255 udp_dump(&udplite_table, skb, cb, r);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000256}
257
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800258static int udplite_diag_dump_one(struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700259 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000260{
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800261 return udp_dump_one(&udplite_table, cb, req);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000262}
263
264static const struct inet_diag_handler udplite_diag_handler = {
265 .dump = udplite_diag_dump,
266 .dump_one = udplite_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000267 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000268 .idiag_type = IPPROTO_UDPLITE,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400269 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700270#ifdef CONFIG_INET_DIAG_DESTROY
271 .destroy = udplite_diag_destroy,
272#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000273};
274
275static int __init udp_diag_init(void)
276{
277 int err;
278
279 err = inet_diag_register(&udp_diag_handler);
280 if (err)
281 goto out;
282 err = inet_diag_register(&udplite_diag_handler);
283 if (err)
284 goto out_lite;
285out:
286 return err;
287out_lite:
288 inet_diag_unregister(&udp_diag_handler);
289 goto out;
290}
291
292static void __exit udp_diag_exit(void)
293{
294 inet_diag_unregister(&udplite_diag_handler);
295 inet_diag_unregister(&udp_diag_handler);
296}
297
298module_init(udp_diag_init);
299module_exit(udp_diag_exit);
300MODULE_LICENSE("GPL");
Pavel Emelyanovaec8dc622011-12-15 02:43:27 +0000301MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
302MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);