blob: 1ed8c4d78e5cac3e447cf666a1dc28a25c114f11 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Pavel Emelyanov52b7c592011-12-09 06:23:51 +00002/*
3 * udp_diag.c Module for monitoring UDP transport protocols sockets.
4 *
5 * Authors: Pavel Emelyanov, <xemul@parallels.com>
Pavel Emelyanov52b7c592011-12-09 06:23:51 +00006 */
7
8
9#include <linux/module.h>
10#include <linux/inet_diag.h>
11#include <linux/udp.h>
12#include <net/udp.h>
13#include <net/udplite.h>
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000014#include <linux/sock_diag.h>
15
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000016static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070017 struct netlink_callback *cb,
18 const struct inet_diag_req_v2 *req,
Lorenzo Colittid545cac2016-09-08 00:42:25 +090019 struct nlattr *bc, bool net_admin)
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000020{
21 if (!inet_diag_bc_sk(bc, sk))
22 return 0;
23
Martin KaFai Lau5682d392020-02-25 15:04:09 -080024 return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
25 net_admin);
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000026}
27
Martin KaFai Lau5682d392020-02-25 15:04:09 -080028static int udp_dump_one(struct udp_table *tbl,
29 struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070030 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000031{
Martin KaFai Lau5682d392020-02-25 15:04:09 -080032 struct sk_buff *in_skb = cb->skb;
Menglong Dong6e822c2c22020-11-06 01:42:38 -050033 int err;
Eric Dumazetca065d02016-04-01 08:52:13 -070034 struct sock *sk = NULL;
Pavel Emelyanova925aa02011-12-09 06:24:06 +000035 struct sk_buff *rep;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000036 struct net *net = sock_net(in_skb->sk);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000037
Eric Dumazetca065d02016-04-01 08:52:13 -070038 rcu_read_lock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000039 if (req->sdiag_family == AF_INET)
Lorenzo Colitti747569b2018-10-29 09:15:22 +090040 /* src and dst are swapped for historical reasons */
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000041 sk = __udp4_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000042 req->id.idiag_src[0], req->id.idiag_sport,
43 req->id.idiag_dst[0], req->id.idiag_dport,
David Ahernfb74c272017-08-07 08:44:16 -070044 req->id.idiag_if, 0, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000045#if IS_ENABLED(CONFIG_IPV6)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000046 else if (req->sdiag_family == AF_INET6)
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000047 sk = __udp6_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000048 (struct in6_addr *)req->id.idiag_src,
49 req->id.idiag_sport,
50 (struct in6_addr *)req->id.idiag_dst,
51 req->id.idiag_dport,
David Ahern1801b572017-08-07 08:44:20 -070052 req->id.idiag_if, 0, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000053#endif
Reshetova, Elena41c6d652017-06-30 13:08:01 +030054 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
Eric Dumazetca065d02016-04-01 08:52:13 -070055 sk = NULL;
56 rcu_read_unlock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000057 err = -ENOENT;
Ian Morris51456b22015-04-03 09:17:26 +010058 if (!sk)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000059 goto out_nosk;
60
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000061 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000062 if (err)
63 goto out;
64
65 err = -ENOMEM;
Dmitry Yakunin83f73c5b2020-03-05 15:33:12 +030066 rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
67 inet_diag_msg_attrs_size() +
68 nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
Hong zhi guo573ce262013-03-27 06:47:04 +000069 GFP_KERNEL);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000070 if (!rep)
71 goto out;
72
Martin KaFai Lau5682d392020-02-25 15:04:09 -080073 err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0,
74 netlink_net_capable(in_skb, CAP_NET_ADMIN));
Pavel Emelyanova925aa02011-12-09 06:24:06 +000075 if (err < 0) {
76 WARN_ON(err == -EMSGSIZE);
77 kfree_skb(rep);
78 goto out;
79 }
Yajun Deng01757f52021-07-13 10:48:24 +080080 err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
81
Pavel Emelyanova925aa02011-12-09 06:24:06 +000082out:
83 if (sk)
84 sock_put(sk);
85out_nosk:
86 return err;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000087}
88
Eric Dumazet34160ea2015-03-10 07:15:54 -070089static void udp_dump(struct udp_table *table, struct sk_buff *skb,
90 struct netlink_callback *cb,
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080091 const struct inet_diag_req_v2 *r)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000092{
Lorenzo Colittid545cac2016-09-08 00:42:25 +090093 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000094 struct net *net = sock_net(skb->sk);
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080095 struct inet_diag_dump_data *cb_data;
Eric Dumazetca065d02016-04-01 08:52:13 -070096 int num, s_num, slot, s_slot;
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080097 struct nlattr *bc;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000098
Martin KaFai Lau0df6d322020-02-25 15:04:15 -080099 cb_data = cb->data;
100 bc = cb_data->inet_diag_nla_bc;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000101 s_slot = cb->args[0];
102 num = s_num = cb->args[1];
103
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100104 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000105 struct udp_hslot *hslot = &table->hash[slot];
Eric Dumazetca065d02016-04-01 08:52:13 -0700106 struct sock *sk;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000107
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100108 num = 0;
109
Eric Dumazetca065d02016-04-01 08:52:13 -0700110 if (hlist_empty(&hslot->head))
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000111 continue;
112
113 spin_lock_bh(&hslot->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -0700114 sk_for_each(sk, &hslot->head) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000115 struct inet_sock *inet = inet_sk(sk);
116
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000117 if (!net_eq(sock_net(sk), net))
118 continue;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000119 if (num < s_num)
120 goto next;
121 if (!(r->idiag_states & (1 << sk->sk_state)))
122 goto next;
123 if (r->sdiag_family != AF_UNSPEC &&
124 sk->sk_family != r->sdiag_family)
125 goto next;
126 if (r->id.idiag_sport != inet->inet_sport &&
127 r->id.idiag_sport)
128 goto next;
129 if (r->id.idiag_dport != inet->inet_dport &&
130 r->id.idiag_dport)
131 goto next;
132
Lorenzo Colittid545cac2016-09-08 00:42:25 +0900133 if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000134 spin_unlock_bh(&hslot->lock);
135 goto done;
136 }
137next:
138 num++;
139 }
140 spin_unlock_bh(&hslot->lock);
141 }
142done:
143 cb->args[0] = slot;
144 cb->args[1] = num;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000145}
146
147static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800148 const struct inet_diag_req_v2 *r)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000149{
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800150 udp_dump(&udp_table, skb, cb, r);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000151}
152
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800153static int udp_diag_dump_one(struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700154 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000155{
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800156 return udp_dump_one(&udp_table, cb, req);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000157}
158
Shan Wei62ad6fc2012-04-24 18:15:41 +0000159static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
160 void *info)
161{
Paolo Abeni6c206b22018-06-08 11:35:40 +0200162 r->idiag_rqueue = udp_rqueue_get(sk);
Shan Wei62ad6fc2012-04-24 18:15:41 +0000163 r->idiag_wqueue = sk_wmem_alloc_get(sk);
164}
165
David Ahern5d77dca2016-08-23 21:06:33 -0700166#ifdef CONFIG_INET_DIAG_DESTROY
167static int __udp_diag_destroy(struct sk_buff *in_skb,
168 const struct inet_diag_req_v2 *req,
169 struct udp_table *tbl)
170{
171 struct net *net = sock_net(in_skb->sk);
172 struct sock *sk;
173 int err;
174
175 rcu_read_lock();
176
177 if (req->sdiag_family == AF_INET)
178 sk = __udp4_lib_lookup(net,
179 req->id.idiag_dst[0], req->id.idiag_dport,
180 req->id.idiag_src[0], req->id.idiag_sport,
David Ahernfb74c272017-08-07 08:44:16 -0700181 req->id.idiag_if, 0, tbl, NULL);
David Ahern5d77dca2016-08-23 21:06:33 -0700182#if IS_ENABLED(CONFIG_IPV6)
183 else if (req->sdiag_family == AF_INET6) {
184 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
185 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
186 sk = __udp4_lib_lookup(net,
Lorenzo Colittif95bf342016-09-07 13:38:35 +0900187 req->id.idiag_dst[3], req->id.idiag_dport,
188 req->id.idiag_src[3], req->id.idiag_sport,
David Ahernfb74c272017-08-07 08:44:16 -0700189 req->id.idiag_if, 0, tbl, NULL);
David Ahern5d77dca2016-08-23 21:06:33 -0700190
191 else
192 sk = __udp6_lib_lookup(net,
193 (struct in6_addr *)req->id.idiag_dst,
194 req->id.idiag_dport,
195 (struct in6_addr *)req->id.idiag_src,
196 req->id.idiag_sport,
David Ahern1801b572017-08-07 08:44:20 -0700197 req->id.idiag_if, 0, tbl, NULL);
David Ahern5d77dca2016-08-23 21:06:33 -0700198 }
199#endif
200 else {
201 rcu_read_unlock();
202 return -EINVAL;
203 }
204
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300205 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
David Ahern5d77dca2016-08-23 21:06:33 -0700206 sk = NULL;
207
208 rcu_read_unlock();
209
210 if (!sk)
211 return -ENOENT;
212
213 if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
214 sock_put(sk);
215 return -ENOENT;
216 }
217
218 err = sock_diag_destroy(sk, ECONNABORTED);
219
220 sock_put(sk);
221
222 return err;
223}
224
225static int udp_diag_destroy(struct sk_buff *in_skb,
226 const struct inet_diag_req_v2 *req)
227{
228 return __udp_diag_destroy(in_skb, req, &udp_table);
229}
230
231static int udplite_diag_destroy(struct sk_buff *in_skb,
232 const struct inet_diag_req_v2 *req)
233{
234 return __udp_diag_destroy(in_skb, req, &udplite_table);
235}
236
237#endif
238
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000239static const struct inet_diag_handler udp_diag_handler = {
240 .dump = udp_diag_dump,
241 .dump_one = udp_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000242 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000243 .idiag_type = IPPROTO_UDP,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400244 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700245#ifdef CONFIG_INET_DIAG_DESTROY
246 .destroy = udp_diag_destroy,
247#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000248};
249
250static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800251 const struct inet_diag_req_v2 *r)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000252{
Martin KaFai Lau0df6d322020-02-25 15:04:15 -0800253 udp_dump(&udplite_table, skb, cb, r);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000254}
255
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800256static int udplite_diag_dump_one(struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700257 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000258{
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800259 return udp_dump_one(&udplite_table, cb, req);
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000260}
261
262static const struct inet_diag_handler udplite_diag_handler = {
263 .dump = udplite_diag_dump,
264 .dump_one = udplite_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000265 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000266 .idiag_type = IPPROTO_UDPLITE,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400267 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700268#ifdef CONFIG_INET_DIAG_DESTROY
269 .destroy = udplite_diag_destroy,
270#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000271};
272
273static int __init udp_diag_init(void)
274{
275 int err;
276
277 err = inet_diag_register(&udp_diag_handler);
278 if (err)
279 goto out;
280 err = inet_diag_register(&udplite_diag_handler);
281 if (err)
282 goto out_lite;
283out:
284 return err;
285out_lite:
286 inet_diag_unregister(&udp_diag_handler);
287 goto out;
288}
289
290static void __exit udp_diag_exit(void)
291{
292 inet_diag_unregister(&udplite_diag_handler);
293 inet_diag_unregister(&udp_diag_handler);
294}
295
296module_init(udp_diag_init);
297module_exit(udp_diag_exit);
298MODULE_LICENSE("GPL");
Pavel Emelyanovaec8dc622011-12-15 02:43:27 +0000299MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
300MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);