Thomas Gleixner | 09c434b | 2019-05-19 13:08:20 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 2 | #include <linux/types.h> |
| 3 | #include <linux/spinlock.h> |
| 4 | #include <linux/sock_diag.h> |
| 5 | #include <linux/unix_diag.h> |
| 6 | #include <linux/skbuff.h> |
Cyrill Gorcunov | 2ea744a | 2011-12-20 04:33:03 +0000 | [diff] [blame] | 7 | #include <linux/module.h> |
Felipe Gasper | cae9910 | 2019-05-20 19:43:51 -0500 | [diff] [blame] | 8 | #include <linux/uidgid.h> |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 9 | #include <net/netlink.h> |
| 10 | #include <net/af_unix.h> |
| 11 | #include <net/tcp_states.h> |
Felipe Gasper | cae9910 | 2019-05-20 19:43:51 -0500 | [diff] [blame] | 12 | #include <net/sock.h> |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 13 | |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 14 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) |
| 15 | { |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 16 | /* might or might not have unix_table_locks */ |
Al Viro | ae3b564 | 2019-02-15 20:09:35 +0000 | [diff] [blame] | 17 | struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 18 | |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 19 | if (!addr) |
| 20 | return 0; |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 21 | |
Kuniyuki Iwashima | 755662c | 2021-11-24 11:14:19 +0900 | [diff] [blame] | 22 | return nla_put(nlskb, UNIX_DIAG_NAME, |
| 23 | addr->len - offsetof(struct sockaddr_un, sun_path), |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 24 | addr->name->sun_path); |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 25 | } |
| 26 | |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 27 | static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) |
| 28 | { |
Al Viro | 40ffe67 | 2012-03-14 21:54:32 -0400 | [diff] [blame] | 29 | struct dentry *dentry = unix_sk(sk)->path.dentry; |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 30 | |
| 31 | if (dentry) { |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 32 | struct unix_diag_vfs uv = { |
David Howells | a25b376 | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 33 | .udiag_vfs_ino = d_backing_inode(dentry)->i_ino, |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 34 | .udiag_vfs_dev = dentry->d_sb->s_dev, |
| 35 | }; |
| 36 | |
| 37 | return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 38 | } |
| 39 | |
| 40 | return 0; |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 41 | } |
| 42 | |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 43 | static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) |
| 44 | { |
| 45 | struct sock *peer; |
| 46 | int ino; |
| 47 | |
| 48 | peer = unix_peer_get(sk); |
| 49 | if (peer) { |
| 50 | unix_state_lock(peer); |
| 51 | ino = sock_i_ino(peer); |
| 52 | unix_state_unlock(peer); |
| 53 | sock_put(peer); |
| 54 | |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 55 | return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | return 0; |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 59 | } |
| 60 | |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 61 | static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) |
| 62 | { |
| 63 | struct sk_buff *skb; |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 64 | struct nlattr *attr; |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 65 | u32 *buf; |
| 66 | int i; |
| 67 | |
| 68 | if (sk->sk_state == TCP_LISTEN) { |
| 69 | spin_lock(&sk->sk_receive_queue.lock); |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 70 | |
| 71 | attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, |
| 72 | sk->sk_receive_queue.qlen * sizeof(u32)); |
| 73 | if (!attr) |
| 74 | goto errout; |
| 75 | |
| 76 | buf = nla_data(attr); |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 77 | i = 0; |
| 78 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
| 79 | struct sock *req, *peer; |
| 80 | |
| 81 | req = skb->sk; |
| 82 | /* |
| 83 | * The state lock is outer for the same sk's |
| 84 | * queue lock. With the other's queue locked it's |
| 85 | * OK to lock the state. |
| 86 | */ |
| 87 | unix_state_lock_nested(req); |
| 88 | peer = unix_sk(req)->peer; |
David S. Miller | e09e9d1 | 2011-12-26 14:41:55 -0500 | [diff] [blame] | 89 | buf[i++] = (peer ? sock_i_ino(peer) : 0); |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 90 | unix_state_unlock(req); |
| 91 | } |
| 92 | spin_unlock(&sk->sk_receive_queue.lock); |
| 93 | } |
| 94 | |
| 95 | return 0; |
| 96 | |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 97 | errout: |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 98 | spin_unlock(&sk->sk_receive_queue.lock); |
| 99 | return -EMSGSIZE; |
| 100 | } |
| 101 | |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 102 | static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) |
| 103 | { |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 104 | struct unix_diag_rqlen rql; |
Pavel Emelyanov | c9da99e | 2011-12-30 00:54:39 +0000 | [diff] [blame] | 105 | |
| 106 | if (sk->sk_state == TCP_LISTEN) { |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 107 | rql.udiag_rqueue = sk->sk_receive_queue.qlen; |
| 108 | rql.udiag_wqueue = sk->sk_max_ack_backlog; |
Pavel Emelyanov | c9da99e | 2011-12-30 00:54:39 +0000 | [diff] [blame] | 109 | } else { |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 110 | rql.udiag_rqueue = (u32) unix_inq_len(sk); |
| 111 | rql.udiag_wqueue = (u32) unix_outq_len(sk); |
Pavel Emelyanov | c9da99e | 2011-12-30 00:54:39 +0000 | [diff] [blame] | 112 | } |
| 113 | |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 114 | return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 115 | } |
| 116 | |
Felipe Gasper | cae9910 | 2019-05-20 19:43:51 -0500 | [diff] [blame] | 117 | static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb) |
| 118 | { |
| 119 | uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk)); |
| 120 | return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); |
| 121 | } |
| 122 | |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 123 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 124 | u32 portid, u32 seq, u32 flags, int sk_ino) |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 125 | { |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 126 | struct nlmsghdr *nlh; |
| 127 | struct unix_diag_msg *rep; |
| 128 | |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 129 | nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 130 | flags); |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 131 | if (!nlh) |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 132 | return -EMSGSIZE; |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 133 | |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 134 | rep = nlmsg_data(nlh); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 135 | rep->udiag_family = AF_UNIX; |
| 136 | rep->udiag_type = sk->sk_type; |
| 137 | rep->udiag_state = sk->sk_state; |
Mathias Krause | 6865d1e | 2013-09-30 22:05:40 +0200 | [diff] [blame] | 138 | rep->pad = 0; |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 139 | rep->udiag_ino = sk_ino; |
| 140 | sock_diag_save_cookie(sk, rep->udiag_cookie); |
| 141 | |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 142 | if ((req->udiag_show & UDIAG_SHOW_NAME) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 143 | sk_diag_dump_name(sk, skb)) |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 144 | goto out_nlmsg_trim; |
Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 145 | |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 146 | if ((req->udiag_show & UDIAG_SHOW_VFS) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 147 | sk_diag_dump_vfs(sk, skb)) |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 148 | goto out_nlmsg_trim; |
Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 149 | |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 150 | if ((req->udiag_show & UDIAG_SHOW_PEER) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 151 | sk_diag_dump_peer(sk, skb)) |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 152 | goto out_nlmsg_trim; |
Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 153 | |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 154 | if ((req->udiag_show & UDIAG_SHOW_ICONS) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 155 | sk_diag_dump_icons(sk, skb)) |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 156 | goto out_nlmsg_trim; |
Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 157 | |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 158 | if ((req->udiag_show & UDIAG_SHOW_RQLEN) && |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 159 | sk_diag_show_rqlen(sk, skb)) |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 160 | goto out_nlmsg_trim; |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 161 | |
| 162 | if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && |
| 163 | sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 164 | goto out_nlmsg_trim; |
Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 165 | |
Pavel Emelyanov | e4e541a | 2012-10-23 22:29:56 +0400 | [diff] [blame] | 166 | if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) |
| 167 | goto out_nlmsg_trim; |
| 168 | |
Felipe Gasper | cae9910 | 2019-05-20 19:43:51 -0500 | [diff] [blame] | 169 | if ((req->udiag_show & UDIAG_SHOW_UID) && |
| 170 | sk_diag_dump_uid(sk, skb)) |
| 171 | goto out_nlmsg_trim; |
| 172 | |
Johannes Berg | 053c095 | 2015-01-16 22:09:00 +0100 | [diff] [blame] | 173 | nlmsg_end(skb, nlh); |
| 174 | return 0; |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 175 | |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 176 | out_nlmsg_trim: |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 177 | nlmsg_cancel(skb, nlh); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 178 | return -EMSGSIZE; |
| 179 | } |
| 180 | |
| 181 | static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 182 | u32 portid, u32 seq, u32 flags) |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 183 | { |
| 184 | int sk_ino; |
| 185 | |
| 186 | unix_state_lock(sk); |
| 187 | sk_ino = sock_i_ino(sk); |
| 188 | unix_state_unlock(sk); |
| 189 | |
| 190 | if (!sk_ino) |
| 191 | return 0; |
| 192 | |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 193 | return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 194 | } |
| 195 | |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 196 | static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) |
| 197 | { |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 198 | struct unix_diag_req *req; |
| 199 | int num, s_num, slot, s_slot; |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 200 | struct net *net = sock_net(skb->sk); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 201 | |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 202 | req = nlmsg_data(cb->nlh); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 203 | |
| 204 | s_slot = cb->args[0]; |
| 205 | num = s_num = cb->args[1]; |
| 206 | |
Eric Dumazet | 7123aaa | 2012-06-08 05:03:21 +0000 | [diff] [blame] | 207 | for (slot = s_slot; |
| 208 | slot < ARRAY_SIZE(unix_socket_table); |
| 209 | s_num = 0, slot++) { |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 210 | struct sock *sk; |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 211 | |
| 212 | num = 0; |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 213 | spin_lock(&unix_table_locks[slot]); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 214 | sk_for_each(sk, &unix_socket_table[slot]) { |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 215 | if (!net_eq(sock_net(sk), net)) |
| 216 | continue; |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 217 | if (num < s_num) |
| 218 | goto next; |
| 219 | if (!(req->udiag_states & (1 << sk->sk_state))) |
| 220 | goto next; |
| 221 | if (sk_diag_dump(sk, skb, req, |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 222 | NETLINK_CB(cb->skb).portid, |
Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 223 | cb->nlh->nlmsg_seq, |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 224 | NLM_F_MULTI) < 0) { |
| 225 | spin_unlock(&unix_table_locks[slot]); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 226 | goto done; |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 227 | } |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 228 | next: |
| 229 | num++; |
| 230 | } |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 231 | spin_unlock(&unix_table_locks[slot]); |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 232 | } |
| 233 | done: |
Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 234 | cb->args[0] = slot; |
| 235 | cb->args[1] = num; |
| 236 | |
| 237 | return skb->len; |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 238 | } |
| 239 | |
Dmitry V. Levin | b5f0549 | 2016-02-19 04:27:48 +0300 | [diff] [blame] | 240 | static struct sock *unix_lookup_by_ino(unsigned int ino) |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 241 | { |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 242 | struct sock *sk; |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 243 | int i; |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 244 | |
Eric Dumazet | 7123aaa | 2012-06-08 05:03:21 +0000 | [diff] [blame] | 245 | for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 246 | spin_lock(&unix_table_locks[i]); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 247 | sk_for_each(sk, &unix_socket_table[i]) |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 248 | if (ino == sock_i_ino(sk)) { |
| 249 | sock_hold(sk); |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 250 | spin_unlock(&unix_table_locks[i]); |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 251 | return sk; |
| 252 | } |
Kuniyuki Iwashima | afd20b9 | 2021-11-24 11:14:30 +0900 | [diff] [blame] | 253 | spin_unlock(&unix_table_locks[i]); |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 254 | } |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 255 | return NULL; |
| 256 | } |
| 257 | |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 258 | static int unix_diag_get_exact(struct sk_buff *in_skb, |
| 259 | const struct nlmsghdr *nlh, |
| 260 | struct unix_diag_req *req) |
| 261 | { |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 262 | int err = -EINVAL; |
| 263 | struct sock *sk; |
| 264 | struct sk_buff *rep; |
| 265 | unsigned int extra_len; |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 266 | struct net *net = sock_net(in_skb->sk); |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 267 | |
| 268 | if (req->udiag_ino == 0) |
| 269 | goto out_nosk; |
| 270 | |
| 271 | sk = unix_lookup_by_ino(req->udiag_ino); |
| 272 | err = -ENOENT; |
| 273 | if (sk == NULL) |
| 274 | goto out_nosk; |
Andrei Vagin | 0f5da65 | 2017-10-25 10:16:42 -0700 | [diff] [blame] | 275 | if (!net_eq(sock_net(sk), net)) |
| 276 | goto out; |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 277 | |
| 278 | err = sock_diag_check_cookie(sk, req->udiag_cookie); |
| 279 | if (err) |
| 280 | goto out; |
| 281 | |
| 282 | extra_len = 256; |
| 283 | again: |
| 284 | err = -ENOMEM; |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 285 | rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 286 | if (!rep) |
| 287 | goto out; |
| 288 | |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 289 | err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid, |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 290 | nlh->nlmsg_seq, 0, req->udiag_ino); |
| 291 | if (err < 0) { |
Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 292 | nlmsg_free(rep); |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 293 | extra_len += 256; |
| 294 | if (extra_len >= PAGE_SIZE) |
| 295 | goto out; |
| 296 | |
| 297 | goto again; |
| 298 | } |
Yajun Deng | 01757f5 | 2021-07-13 10:48:24 +0800 | [diff] [blame] | 299 | err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); |
| 300 | |
Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 301 | out: |
| 302 | if (sk) |
| 303 | sock_put(sk); |
| 304 | out_nosk: |
| 305 | return err; |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) |
| 309 | { |
| 310 | int hdrlen = sizeof(struct unix_diag_req); |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 311 | struct net *net = sock_net(skb->sk); |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 312 | |
| 313 | if (nlmsg_len(h) < hdrlen) |
| 314 | return -EINVAL; |
| 315 | |
Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 316 | if (h->nlmsg_flags & NLM_F_DUMP) { |
| 317 | struct netlink_dump_control c = { |
| 318 | .dump = unix_diag_dump, |
| 319 | }; |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 320 | return netlink_dump_start(net->diag_nlsk, skb, h, &c); |
Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 321 | } else |
David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 322 | return unix_diag_get_exact(skb, h, nlmsg_data(h)); |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 323 | } |
| 324 | |
Shan Wei | 8dcf01f | 2012-04-24 18:21:07 +0000 | [diff] [blame] | 325 | static const struct sock_diag_handler unix_diag_handler = { |
Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 326 | .family = AF_UNIX, |
| 327 | .dump = unix_diag_handler_dump, |
| 328 | }; |
| 329 | |
| 330 | static int __init unix_diag_init(void) |
| 331 | { |
| 332 | return sock_diag_register(&unix_diag_handler); |
| 333 | } |
| 334 | |
| 335 | static void __exit unix_diag_exit(void) |
| 336 | { |
| 337 | sock_diag_unregister(&unix_diag_handler); |
| 338 | } |
| 339 | |
| 340 | module_init(unix_diag_init); |
| 341 | module_exit(unix_diag_exit); |
| 342 | MODULE_LICENSE("GPL"); |
| 343 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); |