blob: a2933eeabd917b614fc6fabe5b8ee78a367ca21b [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Cyrill Gorcunov432490f2016-10-21 13:03:44 +03002#include <linux/module.h>
3
4#include <linux/inet_diag.h>
5#include <linux/sock_diag.h>
6
Arnd Bergmannf8da9772016-10-25 17:53:22 +02007#include <net/inet_sock.h>
Cyrill Gorcunov432490f2016-10-21 13:03:44 +03008#include <net/raw.h>
9#include <net/rawv6.h>
10
11#ifdef pr_fmt
12# undef pr_fmt
13#endif
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17static struct raw_hashinfo *
18raw_get_hashinfo(const struct inet_diag_req_v2 *r)
19{
20 if (r->sdiag_family == AF_INET) {
21 return &raw_v4_hashinfo;
22#if IS_ENABLED(CONFIG_IPV6)
23 } else if (r->sdiag_family == AF_INET6) {
24 return &raw_v6_hashinfo;
25#endif
26 } else {
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030027 return ERR_PTR(-EINVAL);
28 }
29}
30
31/*
32 * Due to requirement of not breaking user API we can't simply
33 * rename @pad field in inet_diag_req_v2 structure, instead
34 * use helper to figure it out.
35 */
36
37static struct sock *raw_lookup(struct net *net, struct sock *from,
38 const struct inet_diag_req_v2 *req)
39{
40 struct inet_diag_req_raw *r = (void *)req;
41 struct sock *sk = NULL;
42
43 if (r->sdiag_family == AF_INET)
44 sk = __raw_v4_lookup(net, from, r->sdiag_raw_protocol,
45 r->id.idiag_dst[0],
46 r->id.idiag_src[0],
David Ahern67359932017-08-07 08:44:18 -070047 r->id.idiag_if, 0);
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030048#if IS_ENABLED(CONFIG_IPV6)
49 else
50 sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol,
51 (const struct in6_addr *)r->id.idiag_src,
52 (const struct in6_addr *)r->id.idiag_dst,
David Ahern5108ab42017-08-07 08:44:22 -070053 r->id.idiag_if, 0);
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030054#endif
55 return sk;
56}
57
58static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
59{
60 struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
61 struct sock *sk = NULL, *s;
62 int slot;
63
64 if (IS_ERR(hashinfo))
65 return ERR_CAST(hashinfo);
66
67 read_lock(&hashinfo->lock);
68 for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
69 sk_for_each(s, &hashinfo->ht[slot]) {
70 sk = raw_lookup(net, s, r);
71 if (sk) {
72 /*
73 * Grab it and keep until we fill
74 * diag meaage to be reported, so
75 * caller should call sock_put then.
76 * We can do that because we're keeping
77 * hashinfo->lock here.
78 */
79 sock_hold(sk);
Cyrill Gorcunov99993702016-11-02 15:36:32 +030080 goto out_unlock;
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030081 }
82 }
83 }
Cyrill Gorcunov99993702016-11-02 15:36:32 +030084out_unlock:
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030085 read_unlock(&hashinfo->lock);
86
87 return sk ? sk : ERR_PTR(-ENOENT);
88}
89
Martin KaFai Lau5682d392020-02-25 15:04:09 -080090static int raw_diag_dump_one(struct netlink_callback *cb,
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030091 const struct inet_diag_req_v2 *r)
92{
Martin KaFai Lau5682d392020-02-25 15:04:09 -080093 struct sk_buff *in_skb = cb->skb;
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030094 struct sk_buff *rep;
95 struct sock *sk;
Martin KaFai Lau5682d392020-02-25 15:04:09 -080096 struct net *net;
Cyrill Gorcunov432490f2016-10-21 13:03:44 +030097 int err;
98
Martin KaFai Lau5682d392020-02-25 15:04:09 -080099 net = sock_net(in_skb->sk);
Cyrill Gorcunov432490f2016-10-21 13:03:44 +0300100 sk = raw_sock_get(net, r);
101 if (IS_ERR(sk))
102 return PTR_ERR(sk);
103
104 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
105 sizeof(struct inet_diag_meminfo) + 64,
106 GFP_KERNEL);
107 if (!rep) {
108 sock_put(sk);
109 return -ENOMEM;
110 }
111
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800112 err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0,
Cyrill Gorcunov432490f2016-10-21 13:03:44 +0300113 netlink_net_capable(in_skb, CAP_NET_ADMIN));
114 sock_put(sk);
115
116 if (err < 0) {
117 kfree_skb(rep);
118 return err;
119 }
120
121 err = netlink_unicast(net->diag_nlsk, rep,
122 NETLINK_CB(in_skb).portid,
123 MSG_DONTWAIT);
124 if (err > 0)
125 err = 0;
126 return err;
127}
128
129static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
130 struct netlink_callback *cb,
131 const struct inet_diag_req_v2 *r,
132 struct nlattr *bc, bool net_admin)
133{
134 if (!inet_diag_bc_sk(bc, sk))
135 return 0;
136
Martin KaFai Lau5682d392020-02-25 15:04:09 -0800137 return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin);
Cyrill Gorcunov432490f2016-10-21 13:03:44 +0300138}
139
140static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
141 const struct inet_diag_req_v2 *r, struct nlattr *bc)
142{
143 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
144 struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
145 struct net *net = sock_net(skb->sk);
146 int num, s_num, slot, s_slot;
147 struct sock *sk = NULL;
148
149 if (IS_ERR(hashinfo))
150 return;
151
152 s_slot = cb->args[0];
153 num = s_num = cb->args[1];
154
155 read_lock(&hashinfo->lock);
156 for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) {
157 num = 0;
158
159 sk_for_each(sk, &hashinfo->ht[slot]) {
160 struct inet_sock *inet = inet_sk(sk);
161
162 if (!net_eq(sock_net(sk), net))
163 continue;
164 if (num < s_num)
165 goto next;
166 if (sk->sk_family != r->sdiag_family)
167 goto next;
168 if (r->id.idiag_sport != inet->inet_sport &&
169 r->id.idiag_sport)
170 goto next;
171 if (r->id.idiag_dport != inet->inet_dport &&
172 r->id.idiag_dport)
173 goto next;
174 if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0)
175 goto out_unlock;
176next:
177 num++;
178 }
179 }
180
181out_unlock:
182 read_unlock(&hashinfo->lock);
183
184 cb->args[0] = slot;
185 cb->args[1] = num;
186}
187
188static void raw_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
189 void *info)
190{
191 r->idiag_rqueue = sk_rmem_alloc_get(sk);
192 r->idiag_wqueue = sk_wmem_alloc_get(sk);
193}
194
195#ifdef CONFIG_INET_DIAG_DESTROY
196static int raw_diag_destroy(struct sk_buff *in_skb,
197 const struct inet_diag_req_v2 *r)
198{
199 struct net *net = sock_net(in_skb->sk);
200 struct sock *sk;
Cyrill Gorcunovcd05a0e2016-11-02 15:36:31 +0300201 int err;
Cyrill Gorcunov432490f2016-10-21 13:03:44 +0300202
203 sk = raw_sock_get(net, r);
204 if (IS_ERR(sk))
205 return PTR_ERR(sk);
Cyrill Gorcunovcd05a0e2016-11-02 15:36:31 +0300206 err = sock_diag_destroy(sk, ECONNABORTED);
207 sock_put(sk);
208 return err;
Cyrill Gorcunov432490f2016-10-21 13:03:44 +0300209}
210#endif
211
212static const struct inet_diag_handler raw_diag_handler = {
213 .dump = raw_diag_dump,
214 .dump_one = raw_diag_dump_one,
215 .idiag_get_info = raw_diag_get_info,
216 .idiag_type = IPPROTO_RAW,
217 .idiag_info_size = 0,
218#ifdef CONFIG_INET_DIAG_DESTROY
219 .destroy = raw_diag_destroy,
220#endif
221};
222
223static void __always_unused __check_inet_diag_req_raw(void)
224{
225 /*
226 * Make sure the two structures are identical,
227 * except the @pad field.
228 */
229#define __offset_mismatch(m1, m2) \
230 (offsetof(struct inet_diag_req_v2, m1) != \
231 offsetof(struct inet_diag_req_raw, m2))
232
233 BUILD_BUG_ON(sizeof(struct inet_diag_req_v2) !=
234 sizeof(struct inet_diag_req_raw));
235 BUILD_BUG_ON(__offset_mismatch(sdiag_family, sdiag_family));
236 BUILD_BUG_ON(__offset_mismatch(sdiag_protocol, sdiag_protocol));
237 BUILD_BUG_ON(__offset_mismatch(idiag_ext, idiag_ext));
238 BUILD_BUG_ON(__offset_mismatch(pad, sdiag_raw_protocol));
239 BUILD_BUG_ON(__offset_mismatch(idiag_states, idiag_states));
240 BUILD_BUG_ON(__offset_mismatch(id, id));
241#undef __offset_mismatch
242}
243
244static int __init raw_diag_init(void)
245{
246 return inet_diag_register(&raw_diag_handler);
247}
248
249static void __exit raw_diag_exit(void)
250{
251 inet_diag_unregister(&raw_diag_handler);
252}
253
254module_init(raw_diag_init);
255module_exit(raw_diag_exit);
256MODULE_LICENSE("GPL");
257MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-255 /* AF_INET - IPPROTO_RAW */);
258MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10-255 /* AF_INET6 - IPPROTO_RAW */);