Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 2 | /* /proc/net/ support for AF_RXRPC |
| 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/module.h> |
| 9 | #include <net/sock.h> |
| 10 | #include <net/af_rxrpc.h> |
| 11 | #include "ar-internal.h" |
| 12 | |
David Howells | bba304d | 2016-06-27 10:32:02 +0100 | [diff] [blame] | 13 | static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { |
| 14 | [RXRPC_CONN_UNUSED] = "Unused ", |
| 15 | [RXRPC_CONN_CLIENT] = "Client ", |
David Howells | 00e9071 | 2016-09-08 11:10:12 +0100 | [diff] [blame] | 16 | [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc", |
David Howells | bba304d | 2016-06-27 10:32:02 +0100 | [diff] [blame] | 17 | [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ", |
| 18 | [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ", |
| 19 | [RXRPC_CONN_SERVICE] = "SvSecure", |
| 20 | [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort", |
| 21 | [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort", |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 22 | }; |
| 23 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 24 | /* |
| 25 | * generate a list of extant and dead calls in /proc/net/rxrpc_calls |
| 26 | */ |
| 27 | static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) |
David Howells | 88f2a825 | 2018-03-30 21:05:17 +0100 | [diff] [blame] | 28 | __acquires(rcu) |
| 29 | __acquires(rxnet->call_lock) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 30 | { |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 31 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 32 | |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 33 | rcu_read_lock(); |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 34 | read_lock(&rxnet->call_lock); |
| 35 | return seq_list_start_head(&rxnet->calls, *_pos); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 39 | { |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 40 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 41 | |
| 42 | return seq_list_next(v, &rxnet->calls, pos); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) |
David Howells | 88f2a825 | 2018-03-30 21:05:17 +0100 | [diff] [blame] | 46 | __releases(rxnet->call_lock) |
| 47 | __releases(rcu) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 48 | { |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 49 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 50 | |
| 51 | read_unlock(&rxnet->call_lock); |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 52 | rcu_read_unlock(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static int rxrpc_call_seq_show(struct seq_file *seq, void *v) |
| 56 | { |
David Howells | df5d8bf | 2016-08-24 14:31:43 +0100 | [diff] [blame] | 57 | struct rxrpc_local *local; |
| 58 | struct rxrpc_sock *rx; |
| 59 | struct rxrpc_peer *peer; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 60 | struct rxrpc_call *call; |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 61 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
Wei Yongjun | 770b26d | 2018-08-02 09:13:33 +0100 | [diff] [blame] | 62 | unsigned long timeout = 0; |
David Howells | 3e018da | 2017-01-05 10:38:33 +0000 | [diff] [blame] | 63 | rxrpc_seq_t tx_hard_ack, rx_hard_ack; |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 64 | char lbuff[50], rbuff[50]; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 65 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 66 | if (v == &rxnet->calls) { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 67 | seq_puts(seq, |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 68 | "Proto Local " |
| 69 | " Remote " |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 70 | " SvID ConnID CallID End Use State Abort " |
David Howells | 32f71aa | 2020-05-02 13:38:23 +0100 | [diff] [blame] | 71 | " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 72 | return 0; |
| 73 | } |
| 74 | |
| 75 | call = list_entry(v, struct rxrpc_call, link); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 76 | |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 77 | rx = rcu_dereference(call->socket); |
David Howells | df5d8bf | 2016-08-24 14:31:43 +0100 | [diff] [blame] | 78 | if (rx) { |
| 79 | local = READ_ONCE(rx->local); |
| 80 | if (local) |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 81 | sprintf(lbuff, "%pISpc", &local->srx.transport); |
David Howells | df5d8bf | 2016-08-24 14:31:43 +0100 | [diff] [blame] | 82 | else |
| 83 | strcpy(lbuff, "no_local"); |
| 84 | } else { |
| 85 | strcpy(lbuff, "no_socket"); |
| 86 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 87 | |
David Howells | df5d8bf | 2016-08-24 14:31:43 +0100 | [diff] [blame] | 88 | peer = call->peer; |
| 89 | if (peer) |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 90 | sprintf(rbuff, "%pISpc", &peer->srx.transport); |
David Howells | f4e7da8 | 2016-06-17 11:07:55 +0100 | [diff] [blame] | 91 | else |
| 92 | strcpy(rbuff, "no_connection"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 93 | |
David Howells | 887763b | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 94 | if (call->state != RXRPC_CALL_SERVER_PREALLOC) { |
| 95 | timeout = READ_ONCE(call->expect_rx_by); |
David Howells | 887763b | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 96 | timeout -= jiffies; |
| 97 | } |
| 98 | |
David Howells | 3e018da | 2017-01-05 10:38:33 +0000 | [diff] [blame] | 99 | tx_hard_ack = READ_ONCE(call->tx_hard_ack); |
| 100 | rx_hard_ack = READ_ONCE(call->rx_hard_ack); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 101 | seq_printf(seq, |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 102 | "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" |
David Howells | 32f71aa | 2020-05-02 13:38:23 +0100 | [diff] [blame] | 103 | " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n", |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 104 | lbuff, |
| 105 | rbuff, |
David Howells | f4e7da8 | 2016-06-17 11:07:55 +0100 | [diff] [blame] | 106 | call->service_id, |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 107 | call->cid, |
| 108 | call->call_id, |
David Howells | dabe5a7 | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 109 | rxrpc_is_service_call(call) ? "Svc" : "Clt", |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 110 | atomic_read(&call->usage), |
| 111 | rxrpc_call_states[call->state], |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 112 | call->abort_code, |
David Howells | 32f71aa | 2020-05-02 13:38:23 +0100 | [diff] [blame] | 113 | call->debug_id, |
David Howells | 3e018da | 2017-01-05 10:38:33 +0000 | [diff] [blame] | 114 | tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, |
David Howells | 887763b | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 115 | rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, |
David Howells | 6b97bd7 | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 116 | call->rx_serial, |
David Howells | 887763b | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 117 | timeout); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 122 | const struct seq_operations rxrpc_call_seq_ops = { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 123 | .start = rxrpc_call_seq_start, |
| 124 | .next = rxrpc_call_seq_next, |
| 125 | .stop = rxrpc_call_seq_stop, |
| 126 | .show = rxrpc_call_seq_show, |
| 127 | }; |
| 128 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 129 | /* |
| 130 | * generate a list of extant virtual connections in /proc/net/rxrpc_conns |
| 131 | */ |
| 132 | static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) |
David Howells | 88f2a825 | 2018-03-30 21:05:17 +0100 | [diff] [blame] | 133 | __acquires(rxnet->conn_lock) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 134 | { |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 135 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 136 | |
| 137 | read_lock(&rxnet->conn_lock); |
| 138 | return seq_list_start_head(&rxnet->conn_proc_list, *_pos); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, |
| 142 | loff_t *pos) |
| 143 | { |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 144 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 145 | |
| 146 | return seq_list_next(v, &rxnet->conn_proc_list, pos); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) |
David Howells | 88f2a825 | 2018-03-30 21:05:17 +0100 | [diff] [blame] | 150 | __releases(rxnet->conn_lock) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 151 | { |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 152 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 153 | |
| 154 | read_unlock(&rxnet->conn_lock); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) |
| 158 | { |
| 159 | struct rxrpc_connection *conn; |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 160 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 161 | char lbuff[50], rbuff[50]; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 162 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 163 | if (v == &rxnet->conn_proc_list) { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 164 | seq_puts(seq, |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 165 | "Proto Local " |
| 166 | " Remote " |
David Howells | a1399f8 | 2016-06-27 14:39:44 +0100 | [diff] [blame] | 167 | " SvID ConnID End Use State Key " |
David Howells | 245500d | 2020-07-01 11:15:32 +0100 | [diff] [blame] | 168 | " Serial ISerial CallId0 CallId1 CallId2 CallId3\n" |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 169 | ); |
| 170 | return 0; |
| 171 | } |
| 172 | |
David Howells | 4d028b2 | 2016-08-24 07:30:52 +0100 | [diff] [blame] | 173 | conn = list_entry(v, struct rxrpc_connection, proc_link); |
David Howells | 00e9071 | 2016-09-08 11:10:12 +0100 | [diff] [blame] | 174 | if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) { |
| 175 | strcpy(lbuff, "no_local"); |
| 176 | strcpy(rbuff, "no_connection"); |
| 177 | goto print; |
| 178 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 179 | |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 180 | sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 181 | |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 182 | sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport); |
David Howells | 00e9071 | 2016-09-08 11:10:12 +0100 | [diff] [blame] | 183 | print: |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 184 | seq_printf(seq, |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 185 | "UDP %-47.47s %-47.47s %4x %08x %s %3u" |
David Howells | 6b97bd7 | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 186 | " %s %08x %08x %08x %08x %08x %08x %08x\n", |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 187 | lbuff, |
| 188 | rbuff, |
David Howells | 68d6d1a | 2017-06-05 14:30:49 +0100 | [diff] [blame] | 189 | conn->service_id, |
David Howells | 19ffa01 | 2016-04-04 14:00:36 +0100 | [diff] [blame] | 190 | conn->proto.cid, |
David Howells | 19ffa01 | 2016-04-04 14:00:36 +0100 | [diff] [blame] | 191 | rxrpc_conn_is_service(conn) ? "Svc" : "Clt", |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 192 | atomic_read(&conn->usage), |
| 193 | rxrpc_conn_states[conn->state], |
David Howells | 19ffa01 | 2016-04-04 14:00:36 +0100 | [diff] [blame] | 194 | key_serial(conn->params.key), |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 195 | atomic_read(&conn->serial), |
David Howells | 6b97bd7 | 2018-07-23 17:18:36 +0100 | [diff] [blame] | 196 | conn->hi_serial, |
| 197 | conn->channels[0].call_id, |
| 198 | conn->channels[1].call_id, |
| 199 | conn->channels[2].call_id, |
| 200 | conn->channels[3].call_id); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 205 | const struct seq_operations rxrpc_connection_seq_ops = { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 206 | .start = rxrpc_connection_seq_start, |
| 207 | .next = rxrpc_connection_seq_next, |
| 208 | .stop = rxrpc_connection_seq_stop, |
| 209 | .show = rxrpc_connection_seq_show, |
| 210 | }; |
David Howells | bc0e7cf | 2018-10-15 11:31:03 +0100 | [diff] [blame] | 211 | |
| 212 | /* |
| 213 | * generate a list of extant virtual peers in /proc/net/rxrpc/peers |
| 214 | */ |
| 215 | static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) |
| 216 | { |
| 217 | struct rxrpc_peer *peer; |
| 218 | time64_t now; |
| 219 | char lbuff[50], rbuff[50]; |
| 220 | |
| 221 | if (v == SEQ_START_TOKEN) { |
| 222 | seq_puts(seq, |
| 223 | "Proto Local " |
| 224 | " Remote " |
David Howells | c410bf01 | 2020-05-11 14:54:34 +0100 | [diff] [blame] | 225 | " Use CW MTU LastUse RTT RTO\n" |
David Howells | bc0e7cf | 2018-10-15 11:31:03 +0100 | [diff] [blame] | 226 | ); |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | peer = list_entry(v, struct rxrpc_peer, hash_link); |
| 231 | |
| 232 | sprintf(lbuff, "%pISpc", &peer->local->srx.transport); |
| 233 | |
| 234 | sprintf(rbuff, "%pISpc", &peer->srx.transport); |
| 235 | |
| 236 | now = ktime_get_seconds(); |
| 237 | seq_printf(seq, |
| 238 | "UDP %-47.47s %-47.47s %3u" |
David Howells | c410bf01 | 2020-05-11 14:54:34 +0100 | [diff] [blame] | 239 | " %3u %5u %6llus %8u %8u\n", |
David Howells | bc0e7cf | 2018-10-15 11:31:03 +0100 | [diff] [blame] | 240 | lbuff, |
| 241 | rbuff, |
| 242 | atomic_read(&peer->usage), |
| 243 | peer->cong_cwnd, |
| 244 | peer->mtu, |
| 245 | now - peer->last_tx_at, |
David Howells | c410bf01 | 2020-05-11 14:54:34 +0100 | [diff] [blame] | 246 | peer->srtt_us >> 3, |
| 247 | jiffies_to_usecs(peer->rto_j)); |
David Howells | bc0e7cf | 2018-10-15 11:31:03 +0100 | [diff] [blame] | 248 | |
| 249 | return 0; |
| 250 | } |
| 251 | |
| 252 | static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos) |
| 253 | __acquires(rcu) |
| 254 | { |
| 255 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 256 | unsigned int bucket, n; |
| 257 | unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); |
| 258 | void *p; |
| 259 | |
| 260 | rcu_read_lock(); |
| 261 | |
| 262 | if (*_pos >= UINT_MAX) |
| 263 | return NULL; |
| 264 | |
| 265 | n = *_pos & ((1U << shift) - 1); |
| 266 | bucket = *_pos >> shift; |
| 267 | for (;;) { |
| 268 | if (bucket >= HASH_SIZE(rxnet->peer_hash)) { |
| 269 | *_pos = UINT_MAX; |
| 270 | return NULL; |
| 271 | } |
| 272 | if (n == 0) { |
| 273 | if (bucket == 0) |
| 274 | return SEQ_START_TOKEN; |
| 275 | *_pos += 1; |
| 276 | n++; |
| 277 | } |
| 278 | |
| 279 | p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); |
| 280 | if (p) |
| 281 | return p; |
| 282 | bucket++; |
| 283 | n = 1; |
| 284 | *_pos = (bucket << shift) | n; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos) |
| 289 | { |
| 290 | struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); |
| 291 | unsigned int bucket, n; |
| 292 | unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); |
| 293 | void *p; |
| 294 | |
| 295 | if (*_pos >= UINT_MAX) |
| 296 | return NULL; |
| 297 | |
| 298 | bucket = *_pos >> shift; |
| 299 | |
| 300 | p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); |
| 301 | if (p) |
| 302 | return p; |
| 303 | |
| 304 | for (;;) { |
| 305 | bucket++; |
| 306 | n = 1; |
| 307 | *_pos = (bucket << shift) | n; |
| 308 | |
| 309 | if (bucket >= HASH_SIZE(rxnet->peer_hash)) { |
| 310 | *_pos = UINT_MAX; |
| 311 | return NULL; |
| 312 | } |
| 313 | if (n == 0) { |
| 314 | *_pos += 1; |
| 315 | n++; |
| 316 | } |
| 317 | |
| 318 | p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); |
| 319 | if (p) |
| 320 | return p; |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v) |
| 325 | __releases(rcu) |
| 326 | { |
| 327 | rcu_read_unlock(); |
| 328 | } |
| 329 | |
| 330 | |
| 331 | const struct seq_operations rxrpc_peer_seq_ops = { |
| 332 | .start = rxrpc_peer_seq_start, |
| 333 | .next = rxrpc_peer_seq_next, |
| 334 | .stop = rxrpc_peer_seq_stop, |
| 335 | .show = rxrpc_peer_seq_show, |
| 336 | }; |