blob: c7d976859d40bdd0753afb7c098ccfe36d15db30 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* /proc/net/ support for AF_RXRPC
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <net/sock.h>
14#include <net/af_rxrpc.h>
15#include "ar-internal.h"
16
David Howellsbba304d2016-06-27 10:32:02 +010017static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
18 [RXRPC_CONN_UNUSED] = "Unused ",
19 [RXRPC_CONN_CLIENT] = "Client ",
David Howells00e90712016-09-08 11:10:12 +010020 [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
David Howellsbba304d2016-06-27 10:32:02 +010021 [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
22 [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
23 [RXRPC_CONN_SERVICE] = "SvSecure",
24 [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
25 [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
David Howells17926a72007-04-26 15:48:28 -070026};
27
David Howells17926a72007-04-26 15:48:28 -070028/*
29 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
30 */
31static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
David Howells88f2a8252018-03-30 21:05:17 +010032 __acquires(rcu)
33 __acquires(rxnet->call_lock)
David Howells17926a72007-04-26 15:48:28 -070034{
David Howells2baec2c2017-05-24 17:02:32 +010035 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
36
David Howells8d94aa32016-09-07 09:19:31 +010037 rcu_read_lock();
David Howells2baec2c2017-05-24 17:02:32 +010038 read_lock(&rxnet->call_lock);
39 return seq_list_start_head(&rxnet->calls, *_pos);
David Howells17926a72007-04-26 15:48:28 -070040}
41
42static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
43{
David Howells2baec2c2017-05-24 17:02:32 +010044 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
45
46 return seq_list_next(v, &rxnet->calls, pos);
David Howells17926a72007-04-26 15:48:28 -070047}
48
49static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
David Howells88f2a8252018-03-30 21:05:17 +010050 __releases(rxnet->call_lock)
51 __releases(rcu)
David Howells17926a72007-04-26 15:48:28 -070052{
David Howells2baec2c2017-05-24 17:02:32 +010053 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
54
55 read_unlock(&rxnet->call_lock);
David Howells8d94aa32016-09-07 09:19:31 +010056 rcu_read_unlock();
David Howells17926a72007-04-26 15:48:28 -070057}
58
59static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
60{
David Howellsdf5d8bf2016-08-24 14:31:43 +010061 struct rxrpc_local *local;
62 struct rxrpc_sock *rx;
63 struct rxrpc_peer *peer;
David Howells17926a72007-04-26 15:48:28 -070064 struct rxrpc_call *call;
David Howells2baec2c2017-05-24 17:02:32 +010065 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
Wei Yongjun770b26d2018-08-02 09:13:33 +010066 unsigned long timeout = 0;
David Howells3e018da2017-01-05 10:38:33 +000067 rxrpc_seq_t tx_hard_ack, rx_hard_ack;
David Howells75b54cb2016-09-13 08:49:05 +010068 char lbuff[50], rbuff[50];
David Howells17926a72007-04-26 15:48:28 -070069
David Howells2baec2c2017-05-24 17:02:32 +010070 if (v == &rxnet->calls) {
David Howells17926a72007-04-26 15:48:28 -070071 seq_puts(seq,
David Howells75b54cb2016-09-13 08:49:05 +010072 "Proto Local "
73 " Remote "
David Howells17926a72007-04-26 15:48:28 -070074 " SvID ConnID CallID End Use State Abort "
David Howells6b97bd72018-07-23 17:18:36 +010075 " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n");
David Howells17926a72007-04-26 15:48:28 -070076 return 0;
77 }
78
79 call = list_entry(v, struct rxrpc_call, link);
David Howells17926a72007-04-26 15:48:28 -070080
David Howells8d94aa32016-09-07 09:19:31 +010081 rx = rcu_dereference(call->socket);
David Howellsdf5d8bf2016-08-24 14:31:43 +010082 if (rx) {
83 local = READ_ONCE(rx->local);
84 if (local)
David Howells75b54cb2016-09-13 08:49:05 +010085 sprintf(lbuff, "%pISpc", &local->srx.transport);
David Howellsdf5d8bf2016-08-24 14:31:43 +010086 else
87 strcpy(lbuff, "no_local");
88 } else {
89 strcpy(lbuff, "no_socket");
90 }
David Howells17926a72007-04-26 15:48:28 -070091
David Howellsdf5d8bf2016-08-24 14:31:43 +010092 peer = call->peer;
93 if (peer)
David Howells75b54cb2016-09-13 08:49:05 +010094 sprintf(rbuff, "%pISpc", &peer->srx.transport);
David Howellsf4e7da82016-06-17 11:07:55 +010095 else
96 strcpy(rbuff, "no_connection");
David Howells17926a72007-04-26 15:48:28 -070097
David Howells887763b2018-07-23 17:18:36 +010098 if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
99 timeout = READ_ONCE(call->expect_rx_by);
David Howells887763b2018-07-23 17:18:36 +0100100 timeout -= jiffies;
101 }
102
David Howells3e018da2017-01-05 10:38:33 +0000103 tx_hard_ack = READ_ONCE(call->tx_hard_ack);
104 rx_hard_ack = READ_ONCE(call->rx_hard_ack);
David Howells17926a72007-04-26 15:48:28 -0700105 seq_printf(seq,
David Howells75b54cb2016-09-13 08:49:05 +0100106 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
David Howells6b97bd72018-07-23 17:18:36 +0100107 " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
David Howells17926a72007-04-26 15:48:28 -0700108 lbuff,
109 rbuff,
David Howellsf4e7da82016-06-17 11:07:55 +0100110 call->service_id,
David Howells0d12f8a2016-03-04 15:53:46 +0000111 call->cid,
112 call->call_id,
David Howellsdabe5a72016-08-23 15:27:24 +0100113 rxrpc_is_service_call(call) ? "Svc" : "Clt",
David Howells17926a72007-04-26 15:48:28 -0700114 atomic_read(&call->usage),
115 rxrpc_call_states[call->state],
David Howellsf5c17aa2016-08-30 09:49:28 +0100116 call->abort_code,
David Howells3e018da2017-01-05 10:38:33 +0000117 call->user_call_ID,
118 tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
David Howells887763b2018-07-23 17:18:36 +0100119 rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
David Howells6b97bd72018-07-23 17:18:36 +0100120 call->rx_serial,
David Howells887763b2018-07-23 17:18:36 +0100121 timeout);
David Howells17926a72007-04-26 15:48:28 -0700122
123 return 0;
124}
125
Christoph Hellwigc3506372018-04-10 19:42:55 +0200126const struct seq_operations rxrpc_call_seq_ops = {
David Howells17926a72007-04-26 15:48:28 -0700127 .start = rxrpc_call_seq_start,
128 .next = rxrpc_call_seq_next,
129 .stop = rxrpc_call_seq_stop,
130 .show = rxrpc_call_seq_show,
131};
132
David Howells17926a72007-04-26 15:48:28 -0700133/*
134 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
135 */
136static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
David Howells88f2a8252018-03-30 21:05:17 +0100137 __acquires(rxnet->conn_lock)
David Howells17926a72007-04-26 15:48:28 -0700138{
David Howells2baec2c2017-05-24 17:02:32 +0100139 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
140
141 read_lock(&rxnet->conn_lock);
142 return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
David Howells17926a72007-04-26 15:48:28 -0700143}
144
145static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
146 loff_t *pos)
147{
David Howells2baec2c2017-05-24 17:02:32 +0100148 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
149
150 return seq_list_next(v, &rxnet->conn_proc_list, pos);
David Howells17926a72007-04-26 15:48:28 -0700151}
152
153static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
David Howells88f2a8252018-03-30 21:05:17 +0100154 __releases(rxnet->conn_lock)
David Howells17926a72007-04-26 15:48:28 -0700155{
David Howells2baec2c2017-05-24 17:02:32 +0100156 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
157
158 read_unlock(&rxnet->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700159}
160
161static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
162{
163 struct rxrpc_connection *conn;
David Howells2baec2c2017-05-24 17:02:32 +0100164 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
David Howells75b54cb2016-09-13 08:49:05 +0100165 char lbuff[50], rbuff[50];
David Howells17926a72007-04-26 15:48:28 -0700166
David Howells2baec2c2017-05-24 17:02:32 +0100167 if (v == &rxnet->conn_proc_list) {
David Howells17926a72007-04-26 15:48:28 -0700168 seq_puts(seq,
David Howells75b54cb2016-09-13 08:49:05 +0100169 "Proto Local "
170 " Remote "
David Howellsa1399f82016-06-27 14:39:44 +0100171 " SvID ConnID End Use State Key "
David Howells17926a72007-04-26 15:48:28 -0700172 " Serial ISerial\n"
173 );
174 return 0;
175 }
176
David Howells4d028b22016-08-24 07:30:52 +0100177 conn = list_entry(v, struct rxrpc_connection, proc_link);
David Howells00e90712016-09-08 11:10:12 +0100178 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
179 strcpy(lbuff, "no_local");
180 strcpy(rbuff, "no_connection");
181 goto print;
182 }
David Howells17926a72007-04-26 15:48:28 -0700183
David Howells75b54cb2016-09-13 08:49:05 +0100184 sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
David Howells17926a72007-04-26 15:48:28 -0700185
David Howells75b54cb2016-09-13 08:49:05 +0100186 sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
David Howells00e90712016-09-08 11:10:12 +0100187print:
David Howells17926a72007-04-26 15:48:28 -0700188 seq_printf(seq,
David Howells75b54cb2016-09-13 08:49:05 +0100189 "UDP %-47.47s %-47.47s %4x %08x %s %3u"
David Howells6b97bd72018-07-23 17:18:36 +0100190 " %s %08x %08x %08x %08x %08x %08x %08x\n",
David Howells17926a72007-04-26 15:48:28 -0700191 lbuff,
192 rbuff,
David Howells68d6d1a2017-06-05 14:30:49 +0100193 conn->service_id,
David Howells19ffa012016-04-04 14:00:36 +0100194 conn->proto.cid,
David Howells19ffa012016-04-04 14:00:36 +0100195 rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
David Howells17926a72007-04-26 15:48:28 -0700196 atomic_read(&conn->usage),
197 rxrpc_conn_states[conn->state],
David Howells19ffa012016-04-04 14:00:36 +0100198 key_serial(conn->params.key),
David Howells17926a72007-04-26 15:48:28 -0700199 atomic_read(&conn->serial),
David Howells6b97bd72018-07-23 17:18:36 +0100200 conn->hi_serial,
201 conn->channels[0].call_id,
202 conn->channels[1].call_id,
203 conn->channels[2].call_id,
204 conn->channels[3].call_id);
David Howells17926a72007-04-26 15:48:28 -0700205
206 return 0;
207}
208
Christoph Hellwigc3506372018-04-10 19:42:55 +0200209const struct seq_operations rxrpc_connection_seq_ops = {
David Howells17926a72007-04-26 15:48:28 -0700210 .start = rxrpc_connection_seq_start,
211 .next = rxrpc_connection_seq_next,
212 .stop = rxrpc_connection_seq_stop,
213 .show = rxrpc_connection_seq_show,
214};
David Howellsbc0e7cf2018-10-15 11:31:03 +0100215
216/*
217 * generate a list of extant virtual peers in /proc/net/rxrpc/peers
218 */
219static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
220{
221 struct rxrpc_peer *peer;
222 time64_t now;
223 char lbuff[50], rbuff[50];
224
225 if (v == SEQ_START_TOKEN) {
226 seq_puts(seq,
227 "Proto Local "
228 " Remote "
229 " Use CW MTU LastUse RTT Rc\n"
230 );
231 return 0;
232 }
233
234 peer = list_entry(v, struct rxrpc_peer, hash_link);
235
236 sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
237
238 sprintf(rbuff, "%pISpc", &peer->srx.transport);
239
240 now = ktime_get_seconds();
241 seq_printf(seq,
242 "UDP %-47.47s %-47.47s %3u"
243 " %3u %5u %6llus %12llu %2u\n",
244 lbuff,
245 rbuff,
246 atomic_read(&peer->usage),
247 peer->cong_cwnd,
248 peer->mtu,
249 now - peer->last_tx_at,
250 peer->rtt,
251 peer->rtt_cursor);
252
253 return 0;
254}
255
256static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
257 __acquires(rcu)
258{
259 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
260 unsigned int bucket, n;
261 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
262 void *p;
263
264 rcu_read_lock();
265
266 if (*_pos >= UINT_MAX)
267 return NULL;
268
269 n = *_pos & ((1U << shift) - 1);
270 bucket = *_pos >> shift;
271 for (;;) {
272 if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
273 *_pos = UINT_MAX;
274 return NULL;
275 }
276 if (n == 0) {
277 if (bucket == 0)
278 return SEQ_START_TOKEN;
279 *_pos += 1;
280 n++;
281 }
282
283 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
284 if (p)
285 return p;
286 bucket++;
287 n = 1;
288 *_pos = (bucket << shift) | n;
289 }
290}
291
292static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
293{
294 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
295 unsigned int bucket, n;
296 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
297 void *p;
298
299 if (*_pos >= UINT_MAX)
300 return NULL;
301
302 bucket = *_pos >> shift;
303
304 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
305 if (p)
306 return p;
307
308 for (;;) {
309 bucket++;
310 n = 1;
311 *_pos = (bucket << shift) | n;
312
313 if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
314 *_pos = UINT_MAX;
315 return NULL;
316 }
317 if (n == 0) {
318 *_pos += 1;
319 n++;
320 }
321
322 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
323 if (p)
324 return p;
325 }
326}
327
328static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
329 __releases(rcu)
330{
331 rcu_read_unlock();
332}
333
334
335const struct seq_operations rxrpc_peer_seq_ops = {
336 .start = rxrpc_peer_seq_start,
337 .next = rxrpc_peer_seq_next,
338 .stop = rxrpc_peer_seq_stop,
339 .show = rxrpc_peer_seq_show,
340};