blob: bb0b5ea1655f8b5909cb5c945811bf9352206fd5 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanov22931d32011-12-15 02:44:35 +00002#include <linux/types.h>
3#include <linux/spinlock.h>
4#include <linux/sock_diag.h>
5#include <linux/unix_diag.h>
6#include <linux/skbuff.h>
Cyrill Gorcunov2ea744a2011-12-20 04:33:03 +00007#include <linux/module.h>
Felipe Gaspercae99102019-05-20 19:43:51 -05008#include <linux/uidgid.h>
Pavel Emelyanov22931d32011-12-15 02:44:35 +00009#include <net/netlink.h>
10#include <net/af_unix.h>
11#include <net/tcp_states.h>
Felipe Gaspercae99102019-05-20 19:43:51 -050012#include <net/sock.h>
Pavel Emelyanov22931d32011-12-15 02:44:35 +000013
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000014static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15{
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +090016 /* might or might not have unix_table_locks */
Al Viroae3b5642019-02-15 20:09:35 +000017 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000018
Thomas Graf42453752012-06-26 23:36:10 +000019 if (!addr)
20 return 0;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000021
Kuniyuki Iwashima755662c2021-11-24 11:14:19 +090022 return nla_put(nlskb, UNIX_DIAG_NAME,
23 addr->len - offsetof(struct sockaddr_un, sun_path),
Thomas Graf42453752012-06-26 23:36:10 +000024 addr->name->sun_path);
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000025}
26
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000027static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
28{
Al Viro40ffe672012-03-14 21:54:32 -040029 struct dentry *dentry = unix_sk(sk)->path.dentry;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000030
31 if (dentry) {
Thomas Graf42453752012-06-26 23:36:10 +000032 struct unix_diag_vfs uv = {
David Howellsa25b3762015-03-17 22:26:21 +000033 .udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
Thomas Graf42453752012-06-26 23:36:10 +000034 .udiag_vfs_dev = dentry->d_sb->s_dev,
35 };
36
37 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000038 }
39
40 return 0;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000041}
42
Pavel Emelyanovac02be82011-12-15 02:45:58 +000043static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
44{
45 struct sock *peer;
46 int ino;
47
48 peer = unix_peer_get(sk);
49 if (peer) {
50 unix_state_lock(peer);
51 ino = sock_i_ino(peer);
52 unix_state_unlock(peer);
53 sock_put(peer);
54
Thomas Graf42453752012-06-26 23:36:10 +000055 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
Pavel Emelyanovac02be82011-12-15 02:45:58 +000056 }
57
58 return 0;
Pavel Emelyanovac02be82011-12-15 02:45:58 +000059}
60
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000061static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
62{
63 struct sk_buff *skb;
Thomas Graf42453752012-06-26 23:36:10 +000064 struct nlattr *attr;
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000065 u32 *buf;
66 int i;
67
68 if (sk->sk_state == TCP_LISTEN) {
69 spin_lock(&sk->sk_receive_queue.lock);
Thomas Graf42453752012-06-26 23:36:10 +000070
71 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
72 sk->sk_receive_queue.qlen * sizeof(u32));
73 if (!attr)
74 goto errout;
75
76 buf = nla_data(attr);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000077 i = 0;
78 skb_queue_walk(&sk->sk_receive_queue, skb) {
79 struct sock *req, *peer;
80
81 req = skb->sk;
82 /*
83 * The state lock is outer for the same sk's
84 * queue lock. With the other's queue locked it's
85 * OK to lock the state.
86 */
87 unix_state_lock_nested(req);
88 peer = unix_sk(req)->peer;
David S. Millere09e9d12011-12-26 14:41:55 -050089 buf[i++] = (peer ? sock_i_ino(peer) : 0);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000090 unix_state_unlock(req);
91 }
92 spin_unlock(&sk->sk_receive_queue.lock);
93 }
94
95 return 0;
96
Thomas Graf42453752012-06-26 23:36:10 +000097errout:
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000098 spin_unlock(&sk->sk_receive_queue.lock);
99 return -EMSGSIZE;
100}
101
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103{
Thomas Graf42453752012-06-26 23:36:10 +0000104 struct unix_diag_rqlen rql;
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000105
106 if (sk->sk_state == TCP_LISTEN) {
Thomas Graf42453752012-06-26 23:36:10 +0000107 rql.udiag_rqueue = sk->sk_receive_queue.qlen;
108 rql.udiag_wqueue = sk->sk_max_ack_backlog;
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000109 } else {
Thomas Graf42453752012-06-26 23:36:10 +0000110 rql.udiag_rqueue = (u32) unix_inq_len(sk);
111 rql.udiag_wqueue = (u32) unix_outq_len(sk);
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000112 }
113
Thomas Graf42453752012-06-26 23:36:10 +0000114 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000115}
116
Felipe Gaspercae99102019-05-20 19:43:51 -0500117static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
118{
119 uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
120 return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
121}
122
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000123static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000124 u32 portid, u32 seq, u32 flags, int sk_ino)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000125{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000126 struct nlmsghdr *nlh;
127 struct unix_diag_msg *rep;
128
Eric W. Biederman15e47302012-09-07 20:12:54 +0000129 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
Thomas Graf42453752012-06-26 23:36:10 +0000130 flags);
David S. Millerb61bb012012-06-26 21:41:00 -0700131 if (!nlh)
Thomas Graf42453752012-06-26 23:36:10 +0000132 return -EMSGSIZE;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000133
David S. Millerb61bb012012-06-26 21:41:00 -0700134 rep = nlmsg_data(nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000135 rep->udiag_family = AF_UNIX;
136 rep->udiag_type = sk->sk_type;
137 rep->udiag_state = sk->sk_state;
Mathias Krause6865d1e2013-09-30 22:05:40 +0200138 rep->pad = 0;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000139 rep->udiag_ino = sk_ino;
140 sock_diag_save_cookie(sk, rep->udiag_cookie);
141
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000142 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000143 sk_diag_dump_name(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700144 goto out_nlmsg_trim;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000145
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000146 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000147 sk_diag_dump_vfs(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700148 goto out_nlmsg_trim;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000149
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000150 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000151 sk_diag_dump_peer(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700152 goto out_nlmsg_trim;
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000153
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000154 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000155 sk_diag_dump_icons(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700156 goto out_nlmsg_trim;
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000157
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000158 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000159 sk_diag_show_rqlen(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700160 goto out_nlmsg_trim;
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000161
162 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
163 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
David S. Millerb61bb012012-06-26 21:41:00 -0700164 goto out_nlmsg_trim;
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000165
Pavel Emelyanove4e541a2012-10-23 22:29:56 +0400166 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
167 goto out_nlmsg_trim;
168
Felipe Gaspercae99102019-05-20 19:43:51 -0500169 if ((req->udiag_show & UDIAG_SHOW_UID) &&
170 sk_diag_dump_uid(sk, skb))
171 goto out_nlmsg_trim;
172
Johannes Berg053c0952015-01-16 22:09:00 +0100173 nlmsg_end(skb, nlh);
174 return 0;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000175
David S. Millerb61bb012012-06-26 21:41:00 -0700176out_nlmsg_trim:
Thomas Graf42453752012-06-26 23:36:10 +0000177 nlmsg_cancel(skb, nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000178 return -EMSGSIZE;
179}
180
181static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000182 u32 portid, u32 seq, u32 flags)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000183{
184 int sk_ino;
185
186 unix_state_lock(sk);
187 sk_ino = sock_i_ino(sk);
188 unix_state_unlock(sk);
189
190 if (!sk_ino)
191 return 0;
192
Eric W. Biederman15e47302012-09-07 20:12:54 +0000193 return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000194}
195
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000196static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
197{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000198 struct unix_diag_req *req;
199 int num, s_num, slot, s_slot;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000200 struct net *net = sock_net(skb->sk);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000201
David S. Millerb61bb012012-06-26 21:41:00 -0700202 req = nlmsg_data(cb->nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000203
204 s_slot = cb->args[0];
205 num = s_num = cb->args[1];
206
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000207 for (slot = s_slot;
208 slot < ARRAY_SIZE(unix_socket_table);
209 s_num = 0, slot++) {
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000210 struct sock *sk;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000211
212 num = 0;
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900213 spin_lock(&unix_table_locks[slot]);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800214 sk_for_each(sk, &unix_socket_table[slot]) {
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000215 if (!net_eq(sock_net(sk), net))
216 continue;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000217 if (num < s_num)
218 goto next;
219 if (!(req->udiag_states & (1 << sk->sk_state)))
220 goto next;
221 if (sk_diag_dump(sk, skb, req,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000222 NETLINK_CB(cb->skb).portid,
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000223 cb->nlh->nlmsg_seq,
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900224 NLM_F_MULTI) < 0) {
225 spin_unlock(&unix_table_locks[slot]);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000226 goto done;
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900227 }
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000228next:
229 num++;
230 }
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900231 spin_unlock(&unix_table_locks[slot]);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000232 }
233done:
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000234 cb->args[0] = slot;
235 cb->args[1] = num;
236
237 return skb->len;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000238}
239
Dmitry V. Levinb5f05492016-02-19 04:27:48 +0300240static struct sock *unix_lookup_by_ino(unsigned int ino)
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000241{
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000242 struct sock *sk;
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900243 int i;
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000244
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000245 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900246 spin_lock(&unix_table_locks[i]);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800247 sk_for_each(sk, &unix_socket_table[i])
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000248 if (ino == sock_i_ino(sk)) {
249 sock_hold(sk);
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900250 spin_unlock(&unix_table_locks[i]);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000251 return sk;
252 }
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900253 spin_unlock(&unix_table_locks[i]);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000254 }
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000255 return NULL;
256}
257
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000258static int unix_diag_get_exact(struct sk_buff *in_skb,
259 const struct nlmsghdr *nlh,
260 struct unix_diag_req *req)
261{
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000262 int err = -EINVAL;
263 struct sock *sk;
264 struct sk_buff *rep;
265 unsigned int extra_len;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000266 struct net *net = sock_net(in_skb->sk);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000267
268 if (req->udiag_ino == 0)
269 goto out_nosk;
270
271 sk = unix_lookup_by_ino(req->udiag_ino);
272 err = -ENOENT;
273 if (sk == NULL)
274 goto out_nosk;
Andrei Vagin0f5da652017-10-25 10:16:42 -0700275 if (!net_eq(sock_net(sk), net))
276 goto out;
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000277
278 err = sock_diag_check_cookie(sk, req->udiag_cookie);
279 if (err)
280 goto out;
281
282 extra_len = 256;
283again:
284 err = -ENOMEM;
Thomas Graf42453752012-06-26 23:36:10 +0000285 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000286 if (!rep)
287 goto out;
288
Eric W. Biederman15e47302012-09-07 20:12:54 +0000289 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000290 nlh->nlmsg_seq, 0, req->udiag_ino);
291 if (err < 0) {
Thomas Graf42453752012-06-26 23:36:10 +0000292 nlmsg_free(rep);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000293 extra_len += 256;
294 if (extra_len >= PAGE_SIZE)
295 goto out;
296
297 goto again;
298 }
Yajun Deng01757f52021-07-13 10:48:24 +0800299 err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
300
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000301out:
302 if (sk)
303 sock_put(sk);
304out_nosk:
305 return err;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000306}
307
308static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
309{
310 int hdrlen = sizeof(struct unix_diag_req);
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000311 struct net *net = sock_net(skb->sk);
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000312
313 if (nlmsg_len(h) < hdrlen)
314 return -EINVAL;
315
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000316 if (h->nlmsg_flags & NLM_F_DUMP) {
317 struct netlink_dump_control c = {
318 .dump = unix_diag_dump,
319 };
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000320 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000321 } else
David S. Millerb61bb012012-06-26 21:41:00 -0700322 return unix_diag_get_exact(skb, h, nlmsg_data(h));
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000323}
324
Shan Wei8dcf01f2012-04-24 18:21:07 +0000325static const struct sock_diag_handler unix_diag_handler = {
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000326 .family = AF_UNIX,
327 .dump = unix_diag_handler_dump,
328};
329
330static int __init unix_diag_init(void)
331{
332 return sock_diag_register(&unix_diag_handler);
333}
334
335static void __exit unix_diag_exit(void)
336{
337 sock_diag_unregister(&unix_diag_handler);
338}
339
340module_init(unix_diag_init);
341module_exit(unix_diag_exit);
342MODULE_LICENSE("GPL");
343MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);