David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 1 | /* incoming call handling |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
Joe Perches | 9b6d539 | 2016-06-02 12:08:52 -0700 | [diff] [blame] | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 13 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/net.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <linux/errqueue.h> |
| 18 | #include <linux/udp.h> |
| 19 | #include <linux/in.h> |
| 20 | #include <linux/in6.h> |
| 21 | #include <linux/icmp.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 22 | #include <linux/gfp.h> |
David Howells | 00e9071 | 2016-09-08 11:10:12 +0100 | [diff] [blame^] | 23 | #include <linux/circ_buf.h> |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 24 | #include <net/sock.h> |
| 25 | #include <net/af_rxrpc.h> |
| 26 | #include <net/ip.h> |
| 27 | #include "ar-internal.h" |
| 28 | |
| 29 | /* |
David Howells | 00e9071 | 2016-09-08 11:10:12 +0100 | [diff] [blame^] | 30 | * Preallocate a single service call, connection and peer and, if possible, |
| 31 | * give them a user ID and attach the user's side of the ID to them. |
| 32 | */ |
| 33 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, |
| 34 | struct rxrpc_backlog *b, |
| 35 | rxrpc_notify_rx_t notify_rx, |
| 36 | rxrpc_user_attach_call_t user_attach_call, |
| 37 | unsigned long user_call_ID, gfp_t gfp) |
| 38 | { |
| 39 | const void *here = __builtin_return_address(0); |
| 40 | struct rxrpc_call *call; |
| 41 | int max, tmp; |
| 42 | unsigned int size = RXRPC_BACKLOG_MAX; |
| 43 | unsigned int head, tail, call_head, call_tail; |
| 44 | |
| 45 | max = rx->sk.sk_max_ack_backlog; |
| 46 | tmp = rx->sk.sk_ack_backlog; |
| 47 | if (tmp >= max) { |
| 48 | _leave(" = -ENOBUFS [full %u]", max); |
| 49 | return -ENOBUFS; |
| 50 | } |
| 51 | max -= tmp; |
| 52 | |
| 53 | /* We don't need more conns and peers than we have calls, but on the |
| 54 | * other hand, we shouldn't ever use more peers than conns or conns |
| 55 | * than calls. |
| 56 | */ |
| 57 | call_head = b->call_backlog_head; |
| 58 | call_tail = READ_ONCE(b->call_backlog_tail); |
| 59 | tmp = CIRC_CNT(call_head, call_tail, size); |
| 60 | if (tmp >= max) { |
| 61 | _leave(" = -ENOBUFS [enough %u]", tmp); |
| 62 | return -ENOBUFS; |
| 63 | } |
| 64 | max = tmp + 1; |
| 65 | |
| 66 | head = b->peer_backlog_head; |
| 67 | tail = READ_ONCE(b->peer_backlog_tail); |
| 68 | if (CIRC_CNT(head, tail, size) < max) { |
| 69 | struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); |
| 70 | if (!peer) |
| 71 | return -ENOMEM; |
| 72 | b->peer_backlog[head] = peer; |
| 73 | smp_store_release(&b->peer_backlog_head, |
| 74 | (head + 1) & (size - 1)); |
| 75 | } |
| 76 | |
| 77 | head = b->conn_backlog_head; |
| 78 | tail = READ_ONCE(b->conn_backlog_tail); |
| 79 | if (CIRC_CNT(head, tail, size) < max) { |
| 80 | struct rxrpc_connection *conn; |
| 81 | |
| 82 | conn = rxrpc_prealloc_service_connection(gfp); |
| 83 | if (!conn) |
| 84 | return -ENOMEM; |
| 85 | b->conn_backlog[head] = conn; |
| 86 | smp_store_release(&b->conn_backlog_head, |
| 87 | (head + 1) & (size - 1)); |
| 88 | } |
| 89 | |
| 90 | /* Now it gets complicated, because calls get registered with the |
| 91 | * socket here, particularly if a user ID is preassigned by the user. |
| 92 | */ |
| 93 | call = rxrpc_alloc_call(gfp); |
| 94 | if (!call) |
| 95 | return -ENOMEM; |
| 96 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); |
| 97 | call->state = RXRPC_CALL_SERVER_PREALLOC; |
| 98 | |
| 99 | trace_rxrpc_call(call, rxrpc_call_new_service, |
| 100 | atomic_read(&call->usage), |
| 101 | here, (const void *)user_call_ID); |
| 102 | |
| 103 | write_lock(&rx->call_lock); |
| 104 | if (user_attach_call) { |
| 105 | struct rxrpc_call *xcall; |
| 106 | struct rb_node *parent, **pp; |
| 107 | |
| 108 | /* Check the user ID isn't already in use */ |
| 109 | pp = &rx->calls.rb_node; |
| 110 | parent = NULL; |
| 111 | while (*pp) { |
| 112 | parent = *pp; |
| 113 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
| 114 | if (user_call_ID < call->user_call_ID) |
| 115 | pp = &(*pp)->rb_left; |
| 116 | else if (user_call_ID > call->user_call_ID) |
| 117 | pp = &(*pp)->rb_right; |
| 118 | else |
| 119 | goto id_in_use; |
| 120 | } |
| 121 | |
| 122 | call->user_call_ID = user_call_ID; |
| 123 | call->notify_rx = notify_rx; |
| 124 | rxrpc_get_call(call, rxrpc_call_got); |
| 125 | user_attach_call(call, user_call_ID); |
| 126 | rxrpc_get_call(call, rxrpc_call_got_userid); |
| 127 | rb_link_node(&call->sock_node, parent, pp); |
| 128 | rb_insert_color(&call->sock_node, &rx->calls); |
| 129 | set_bit(RXRPC_CALL_HAS_USERID, &call->flags); |
| 130 | } |
| 131 | |
| 132 | write_unlock(&rx->call_lock); |
| 133 | |
| 134 | write_lock(&rxrpc_call_lock); |
| 135 | list_add_tail(&call->link, &rxrpc_calls); |
| 136 | write_unlock(&rxrpc_call_lock); |
| 137 | |
| 138 | b->call_backlog[call_head] = call; |
| 139 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); |
| 140 | _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); |
| 141 | return 0; |
| 142 | |
| 143 | id_in_use: |
| 144 | write_unlock(&rx->call_lock); |
| 145 | rxrpc_cleanup_call(call); |
| 146 | _leave(" = -EBADSLT"); |
| 147 | return -EBADSLT; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Preallocate sufficient service connections, calls and peers to cover the |
| 152 | * entire backlog of a socket. When a new call comes in, if we don't have |
| 153 | * sufficient of each available, the call gets rejected as busy or ignored. |
| 154 | * |
| 155 | * The backlog is replenished when a connection is accepted or rejected. |
| 156 | */ |
| 157 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) |
| 158 | { |
| 159 | struct rxrpc_backlog *b = rx->backlog; |
| 160 | |
| 161 | if (!b) { |
| 162 | b = kzalloc(sizeof(struct rxrpc_backlog), gfp); |
| 163 | if (!b) |
| 164 | return -ENOMEM; |
| 165 | rx->backlog = b; |
| 166 | } |
| 167 | |
| 168 | if (rx->discard_new_call) |
| 169 | return 0; |
| 170 | |
| 171 | while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0) |
| 172 | ; |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * Discard the preallocation on a service. |
| 179 | */ |
| 180 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) |
| 181 | { |
| 182 | struct rxrpc_backlog *b = rx->backlog; |
| 183 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
| 184 | |
| 185 | if (!b) |
| 186 | return; |
| 187 | rx->backlog = NULL; |
| 188 | |
| 189 | head = b->peer_backlog_head; |
| 190 | tail = b->peer_backlog_tail; |
| 191 | while (CIRC_CNT(head, tail, size) > 0) { |
| 192 | struct rxrpc_peer *peer = b->peer_backlog[tail]; |
| 193 | kfree(peer); |
| 194 | tail = (tail + 1) & (size - 1); |
| 195 | } |
| 196 | |
| 197 | head = b->conn_backlog_head; |
| 198 | tail = b->conn_backlog_tail; |
| 199 | while (CIRC_CNT(head, tail, size) > 0) { |
| 200 | struct rxrpc_connection *conn = b->conn_backlog[tail]; |
| 201 | write_lock(&rxrpc_connection_lock); |
| 202 | list_del(&conn->link); |
| 203 | list_del(&conn->proc_link); |
| 204 | write_unlock(&rxrpc_connection_lock); |
| 205 | kfree(conn); |
| 206 | tail = (tail + 1) & (size - 1); |
| 207 | } |
| 208 | |
| 209 | head = b->call_backlog_head; |
| 210 | tail = b->call_backlog_tail; |
| 211 | while (CIRC_CNT(head, tail, size) > 0) { |
| 212 | struct rxrpc_call *call = b->call_backlog[tail]; |
| 213 | if (rx->discard_new_call) { |
| 214 | _debug("discard %lx", call->user_call_ID); |
| 215 | rx->discard_new_call(call, call->user_call_ID); |
| 216 | } |
| 217 | rxrpc_call_completed(call); |
| 218 | rxrpc_release_call(rx, call); |
| 219 | rxrpc_put_call(call, rxrpc_call_put); |
| 220 | tail = (tail + 1) & (size - 1); |
| 221 | } |
| 222 | |
| 223 | kfree(b); |
| 224 | } |
| 225 | |
| 226 | /* |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 227 | * generate a connection-level abort |
| 228 | */ |
| 229 | static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 230 | struct rxrpc_wire_header *whdr) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 231 | { |
| 232 | struct msghdr msg; |
| 233 | struct kvec iov[1]; |
| 234 | size_t len; |
| 235 | int ret; |
| 236 | |
| 237 | _enter("%d,,", local->debug_id); |
| 238 | |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 239 | whdr->type = RXRPC_PACKET_TYPE_BUSY; |
| 240 | whdr->serial = htonl(1); |
| 241 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 242 | msg.msg_name = &srx->transport.sin; |
| 243 | msg.msg_namelen = sizeof(srx->transport.sin); |
| 244 | msg.msg_control = NULL; |
| 245 | msg.msg_controllen = 0; |
| 246 | msg.msg_flags = 0; |
| 247 | |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 248 | iov[0].iov_base = whdr; |
| 249 | iov[0].iov_len = sizeof(*whdr); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 250 | |
| 251 | len = iov[0].iov_len; |
| 252 | |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 253 | _proto("Tx BUSY %%1"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 254 | |
| 255 | ret = kernel_sendmsg(local->socket, &msg, iov, 1, len); |
| 256 | if (ret < 0) { |
| 257 | _leave(" = -EAGAIN [sendmsg failed: %d]", ret); |
| 258 | return -EAGAIN; |
| 259 | } |
| 260 | |
| 261 | _leave(" = 0"); |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | /* |
| 266 | * accept an incoming call that needs peer, transport and/or connection setting |
| 267 | * up |
| 268 | */ |
| 269 | static int rxrpc_accept_incoming_call(struct rxrpc_local *local, |
| 270 | struct rxrpc_sock *rx, |
| 271 | struct sk_buff *skb, |
| 272 | struct sockaddr_rxrpc *srx) |
| 273 | { |
| 274 | struct rxrpc_connection *conn; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 275 | struct rxrpc_skb_priv *sp, *nsp; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 276 | struct rxrpc_call *call; |
| 277 | struct sk_buff *notification; |
| 278 | int ret; |
| 279 | |
| 280 | _enter(""); |
| 281 | |
| 282 | sp = rxrpc_skb(skb); |
| 283 | |
| 284 | /* get a notification message to send to the server app */ |
| 285 | notification = alloc_skb(0, GFP_NOFS); |
Tetsuo Handa | c3824d2 | 2010-03-22 13:50:19 +0000 | [diff] [blame] | 286 | if (!notification) { |
| 287 | _debug("no memory"); |
| 288 | ret = -ENOMEM; |
| 289 | goto error_nofree; |
| 290 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 291 | rxrpc_new_skb(notification); |
| 292 | notification->mark = RXRPC_SKB_MARK_NEW_CALL; |
| 293 | |
David Howells | d991b4a | 2016-06-29 14:40:39 +0100 | [diff] [blame] | 294 | conn = rxrpc_incoming_connection(local, srx, skb); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 295 | if (IS_ERR(conn)) { |
| 296 | _debug("no conn"); |
| 297 | ret = PTR_ERR(conn); |
| 298 | goto error; |
| 299 | } |
| 300 | |
David Howells | 42886ff | 2016-06-16 13:31:07 +0100 | [diff] [blame] | 301 | call = rxrpc_incoming_call(rx, conn, skb); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 302 | rxrpc_put_connection(conn); |
| 303 | if (IS_ERR(call)) { |
| 304 | _debug("no call"); |
| 305 | ret = PTR_ERR(call); |
| 306 | goto error; |
| 307 | } |
| 308 | |
| 309 | /* attach the call to the socket */ |
| 310 | read_lock_bh(&local->services_lock); |
| 311 | if (rx->sk.sk_state == RXRPC_CLOSE) |
| 312 | goto invalid_service; |
| 313 | |
| 314 | write_lock(&rx->call_lock); |
| 315 | if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) { |
David Howells | fff72429 | 2016-09-07 14:34:21 +0100 | [diff] [blame] | 316 | rxrpc_get_call(call, rxrpc_call_got); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 317 | |
| 318 | spin_lock(&call->conn->state_lock); |
| 319 | if (sp->hdr.securityIndex > 0 && |
David Howells | bba304d | 2016-06-27 10:32:02 +0100 | [diff] [blame] | 320 | call->conn->state == RXRPC_CONN_SERVICE_UNSECURED) { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 321 | _debug("await conn sec"); |
| 322 | list_add_tail(&call->accept_link, &rx->secureq); |
David Howells | bba304d | 2016-06-27 10:32:02 +0100 | [diff] [blame] | 323 | call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING; |
David Howells | bba304d | 2016-06-27 10:32:02 +0100 | [diff] [blame] | 324 | set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 325 | rxrpc_queue_conn(call->conn); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 326 | } else { |
| 327 | _debug("conn ready"); |
| 328 | call->state = RXRPC_CALL_SERVER_ACCEPTING; |
| 329 | list_add_tail(&call->accept_link, &rx->acceptq); |
David Howells | e34d423 | 2016-08-30 09:49:29 +0100 | [diff] [blame] | 330 | rxrpc_get_call_for_skb(call, notification); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 331 | nsp = rxrpc_skb(notification); |
| 332 | nsp->call = call; |
| 333 | |
| 334 | ASSERTCMP(atomic_read(&call->usage), >=, 3); |
| 335 | |
| 336 | _debug("notify"); |
| 337 | spin_lock(&call->lock); |
| 338 | ret = rxrpc_queue_rcv_skb(call, notification, true, |
| 339 | false); |
| 340 | spin_unlock(&call->lock); |
| 341 | notification = NULL; |
Julia Lawall | 163e3cb | 2008-02-17 18:42:03 -0800 | [diff] [blame] | 342 | BUG_ON(ret < 0); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 343 | } |
| 344 | spin_unlock(&call->conn->state_lock); |
| 345 | |
| 346 | _debug("queued"); |
| 347 | } |
| 348 | write_unlock(&rx->call_lock); |
| 349 | |
| 350 | _debug("process"); |
| 351 | rxrpc_fast_process_packet(call, skb); |
| 352 | |
| 353 | _debug("done"); |
| 354 | read_unlock_bh(&local->services_lock); |
| 355 | rxrpc_free_skb(notification); |
David Howells | fff72429 | 2016-09-07 14:34:21 +0100 | [diff] [blame] | 356 | rxrpc_put_call(call, rxrpc_call_put); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 357 | _leave(" = 0"); |
| 358 | return 0; |
| 359 | |
| 360 | invalid_service: |
| 361 | _debug("invalid"); |
| 362 | read_unlock_bh(&local->services_lock); |
| 363 | |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 364 | rxrpc_release_call(rx, call); |
David Howells | fff72429 | 2016-09-07 14:34:21 +0100 | [diff] [blame] | 365 | rxrpc_put_call(call, rxrpc_call_put); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 366 | ret = -ECONNREFUSED; |
| 367 | error: |
| 368 | rxrpc_free_skb(notification); |
Tetsuo Handa | c3824d2 | 2010-03-22 13:50:19 +0000 | [diff] [blame] | 369 | error_nofree: |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 370 | _leave(" = %d", ret); |
| 371 | return ret; |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * accept incoming calls that need peer, transport and/or connection setting up |
| 376 | * - the packets we get are all incoming client DATA packets that have seq == 1 |
| 377 | */ |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 378 | void rxrpc_accept_incoming_calls(struct rxrpc_local *local) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 379 | { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 380 | struct rxrpc_skb_priv *sp; |
| 381 | struct sockaddr_rxrpc srx; |
| 382 | struct rxrpc_sock *rx; |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 383 | struct rxrpc_wire_header whdr; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 384 | struct sk_buff *skb; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 385 | int ret; |
| 386 | |
| 387 | _enter("%d", local->debug_id); |
| 388 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 389 | skb = skb_dequeue(&local->accept_queue); |
| 390 | if (!skb) { |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 391 | _leave("\n"); |
| 392 | return; |
| 393 | } |
| 394 | |
| 395 | _net("incoming call skb %p", skb); |
| 396 | |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 397 | rxrpc_see_skb(skb); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 398 | sp = rxrpc_skb(skb); |
| 399 | |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 400 | /* Set up a response packet header in case we need it */ |
| 401 | whdr.epoch = htonl(sp->hdr.epoch); |
| 402 | whdr.cid = htonl(sp->hdr.cid); |
| 403 | whdr.callNumber = htonl(sp->hdr.callNumber); |
| 404 | whdr.seq = htonl(sp->hdr.seq); |
| 405 | whdr.serial = 0; |
| 406 | whdr.flags = 0; |
| 407 | whdr.type = 0; |
| 408 | whdr.userStatus = 0; |
| 409 | whdr.securityIndex = sp->hdr.securityIndex; |
| 410 | whdr._rsvd = 0; |
| 411 | whdr.serviceId = htons(sp->hdr.serviceId); |
| 412 | |
David Howells | d991b4a | 2016-06-29 14:40:39 +0100 | [diff] [blame] | 413 | if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) |
| 414 | goto drop; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 415 | |
| 416 | /* get the socket providing the service */ |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 417 | read_lock_bh(&local->services_lock); |
David Howells | de8d6c7 | 2016-09-08 11:10:11 +0100 | [diff] [blame] | 418 | hlist_for_each_entry(rx, &local->services, listen_link) { |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 419 | if (rx->srx.srx_service == sp->hdr.serviceId && |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 420 | rx->sk.sk_state != RXRPC_CLOSE) |
| 421 | goto found_service; |
| 422 | } |
| 423 | read_unlock_bh(&local->services_lock); |
| 424 | goto invalid_service; |
| 425 | |
| 426 | found_service: |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 427 | _debug("found service %hd", rx->srx.srx_service); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 428 | if (sk_acceptq_is_full(&rx->sk)) |
| 429 | goto backlog_full; |
| 430 | sk_acceptq_added(&rx->sk); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 431 | read_unlock_bh(&local->services_lock); |
| 432 | |
| 433 | ret = rxrpc_accept_incoming_call(local, rx, skb, &srx); |
| 434 | if (ret < 0) |
| 435 | sk_acceptq_removed(&rx->sk); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 436 | switch (ret) { |
| 437 | case -ECONNRESET: /* old calls are ignored */ |
| 438 | case -ECONNABORTED: /* aborted calls are reaborted or ignored */ |
| 439 | case 0: |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 440 | return; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 441 | case -ECONNREFUSED: |
| 442 | goto invalid_service; |
| 443 | case -EBUSY: |
| 444 | goto busy; |
| 445 | case -EKEYREJECTED: |
| 446 | goto security_mismatch; |
| 447 | default: |
| 448 | BUG(); |
| 449 | } |
| 450 | |
| 451 | backlog_full: |
| 452 | read_unlock_bh(&local->services_lock); |
| 453 | busy: |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 454 | rxrpc_busy(local, &srx, &whdr); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 455 | rxrpc_free_skb(skb); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 456 | return; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 457 | |
David Howells | d991b4a | 2016-06-29 14:40:39 +0100 | [diff] [blame] | 458 | drop: |
| 459 | rxrpc_free_skb(skb); |
| 460 | return; |
| 461 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 462 | invalid_service: |
| 463 | skb->priority = RX_INVALID_OPERATION; |
| 464 | rxrpc_reject_packet(local, skb); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 465 | return; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 466 | |
| 467 | /* can't change connection security type mid-flow */ |
| 468 | security_mismatch: |
| 469 | skb->priority = RX_PROTOCOL_ERROR; |
| 470 | rxrpc_reject_packet(local, skb); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 471 | return; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 472 | } |
| 473 | |
| 474 | /* |
| 475 | * handle acceptance of a call by userspace |
| 476 | * - assign the user call ID to the call at the front of the queue |
| 477 | */ |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 478 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
David Howells | d001648 | 2016-08-30 20:42:14 +0100 | [diff] [blame] | 479 | unsigned long user_call_ID, |
| 480 | rxrpc_notify_rx_t notify_rx) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 481 | { |
| 482 | struct rxrpc_call *call; |
| 483 | struct rb_node *parent, **pp; |
| 484 | int ret; |
| 485 | |
| 486 | _enter(",%lx", user_call_ID); |
| 487 | |
| 488 | ASSERT(!irqs_disabled()); |
| 489 | |
| 490 | write_lock(&rx->call_lock); |
| 491 | |
| 492 | ret = -ENODATA; |
| 493 | if (list_empty(&rx->acceptq)) |
| 494 | goto out; |
| 495 | |
| 496 | /* check the user ID isn't already in use */ |
| 497 | ret = -EBADSLT; |
| 498 | pp = &rx->calls.rb_node; |
| 499 | parent = NULL; |
| 500 | while (*pp) { |
| 501 | parent = *pp; |
| 502 | call = rb_entry(parent, struct rxrpc_call, sock_node); |
| 503 | |
| 504 | if (user_call_ID < call->user_call_ID) |
| 505 | pp = &(*pp)->rb_left; |
| 506 | else if (user_call_ID > call->user_call_ID) |
| 507 | pp = &(*pp)->rb_right; |
| 508 | else |
| 509 | goto out; |
| 510 | } |
| 511 | |
| 512 | /* dequeue the first call and check it's still valid */ |
| 513 | call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); |
| 514 | list_del_init(&call->accept_link); |
| 515 | sk_acceptq_removed(&rx->sk); |
David Howells | e34d423 | 2016-08-30 09:49:29 +0100 | [diff] [blame] | 516 | rxrpc_see_call(call); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 517 | |
| 518 | write_lock_bh(&call->state_lock); |
| 519 | switch (call->state) { |
| 520 | case RXRPC_CALL_SERVER_ACCEPTING: |
| 521 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; |
| 522 | break; |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 523 | case RXRPC_CALL_COMPLETE: |
| 524 | ret = call->error; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 525 | goto out_release; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 526 | default: |
| 527 | BUG(); |
| 528 | } |
| 529 | |
| 530 | /* formalise the acceptance */ |
David Howells | fff72429 | 2016-09-07 14:34:21 +0100 | [diff] [blame] | 531 | rxrpc_get_call(call, rxrpc_call_got_userid); |
David Howells | d001648 | 2016-08-30 20:42:14 +0100 | [diff] [blame] | 532 | call->notify_rx = notify_rx; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 533 | call->user_call_ID = user_call_ID; |
| 534 | rb_link_node(&call->sock_node, parent, pp); |
| 535 | rb_insert_color(&call->sock_node, &rx->calls); |
| 536 | if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) |
| 537 | BUG(); |
David Howells | 4c198ad | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 538 | if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 539 | BUG(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 540 | |
| 541 | write_unlock_bh(&call->state_lock); |
| 542 | write_unlock(&rx->call_lock); |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 543 | rxrpc_queue_call(call); |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 544 | _leave(" = %p{%d}", call, call->debug_id); |
| 545 | return call; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 546 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 547 | out_release: |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 548 | write_unlock_bh(&call->state_lock); |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 549 | write_unlock(&rx->call_lock); |
| 550 | _debug("release %p", call); |
| 551 | rxrpc_release_call(rx, call); |
| 552 | _leave(" = %d", ret); |
| 553 | return ERR_PTR(ret); |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 554 | out: |
| 555 | write_unlock(&rx->call_lock); |
| 556 | _leave(" = %d", ret); |
| 557 | return ERR_PTR(ret); |
| 558 | } |
| 559 | |
| 560 | /* |
David Howells | b4f1342 | 2016-03-04 15:56:19 +0000 | [diff] [blame] | 561 | * Handle rejection of a call by userspace |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 562 | * - reject the call at the front of the queue |
| 563 | */ |
| 564 | int rxrpc_reject_call(struct rxrpc_sock *rx) |
| 565 | { |
| 566 | struct rxrpc_call *call; |
| 567 | int ret; |
| 568 | |
| 569 | _enter(""); |
| 570 | |
| 571 | ASSERT(!irqs_disabled()); |
| 572 | |
| 573 | write_lock(&rx->call_lock); |
| 574 | |
| 575 | ret = -ENODATA; |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 576 | if (list_empty(&rx->acceptq)) { |
| 577 | write_unlock(&rx->call_lock); |
| 578 | _leave(" = -ENODATA"); |
| 579 | return -ENODATA; |
| 580 | } |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 581 | |
| 582 | /* dequeue the first call and check it's still valid */ |
| 583 | call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); |
| 584 | list_del_init(&call->accept_link); |
| 585 | sk_acceptq_removed(&rx->sk); |
David Howells | e34d423 | 2016-08-30 09:49:29 +0100 | [diff] [blame] | 586 | rxrpc_see_call(call); |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 587 | |
| 588 | write_lock_bh(&call->state_lock); |
| 589 | switch (call->state) { |
| 590 | case RXRPC_CALL_SERVER_ACCEPTING: |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 591 | __rxrpc_set_call_completion(call, RXRPC_CALL_SERVER_BUSY, |
| 592 | 0, ECONNABORTED); |
David Howells | 4c198ad | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 593 | if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 594 | rxrpc_queue_call(call); |
| 595 | ret = 0; |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 596 | break; |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 597 | case RXRPC_CALL_COMPLETE: |
| 598 | ret = call->error; |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 599 | break; |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 600 | default: |
| 601 | BUG(); |
| 602 | } |
| 603 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 604 | write_unlock_bh(&call->state_lock); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 605 | write_unlock(&rx->call_lock); |
David Howells | 8d94aa3 | 2016-09-07 09:19:31 +0100 | [diff] [blame] | 606 | rxrpc_release_call(rx, call); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 607 | _leave(" = %d", ret); |
| 608 | return ret; |
| 609 | } |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 610 | |
| 611 | /** |
| 612 | * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call |
| 613 | * @sock: The socket on which the impending call is waiting |
| 614 | * @user_call_ID: The tag to attach to the call |
David Howells | d001648 | 2016-08-30 20:42:14 +0100 | [diff] [blame] | 615 | * @notify_rx: Where to send notifications instead of socket queue |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 616 | * |
| 617 | * Allow a kernel service to accept an incoming call, assuming the incoming |
David Howells | d001648 | 2016-08-30 20:42:14 +0100 | [diff] [blame] | 618 | * call is still valid. The caller should immediately trigger their own |
| 619 | * notification as there must be data waiting. |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 620 | */ |
| 621 | struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock, |
David Howells | d001648 | 2016-08-30 20:42:14 +0100 | [diff] [blame] | 622 | unsigned long user_call_ID, |
| 623 | rxrpc_notify_rx_t notify_rx) |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 624 | { |
| 625 | struct rxrpc_call *call; |
| 626 | |
| 627 | _enter(",%lx", user_call_ID); |
David Howells | d001648 | 2016-08-30 20:42:14 +0100 | [diff] [blame] | 628 | call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID, notify_rx); |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 629 | _leave(" = %p", call); |
| 630 | return call; |
| 631 | } |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 632 | EXPORT_SYMBOL(rxrpc_kernel_accept_call); |
| 633 | |
| 634 | /** |
| 635 | * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call |
| 636 | * @sock: The socket on which the impending call is waiting |
| 637 | * |
| 638 | * Allow a kernel service to reject an incoming call with a BUSY message, |
| 639 | * assuming the incoming call is still valid. |
| 640 | */ |
| 641 | int rxrpc_kernel_reject_call(struct socket *sock) |
| 642 | { |
| 643 | int ret; |
| 644 | |
| 645 | _enter(""); |
| 646 | ret = rxrpc_reject_call(rxrpc_sk(sock->sk)); |
| 647 | _leave(" = %d", ret); |
| 648 | return ret; |
| 649 | } |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 650 | EXPORT_SYMBOL(rxrpc_kernel_reject_call); |
David Howells | 00e9071 | 2016-09-08 11:10:12 +0100 | [diff] [blame^] | 651 | |
| 652 | /* |
| 653 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls |
| 654 | * @sock: The socket on which to preallocate |
| 655 | * @notify_rx: Event notification function for the call |
| 656 | * @user_attach_call: Func to attach call to user_call_ID |
| 657 | * @user_call_ID: The tag to attach to the preallocated call |
| 658 | * @gfp: The allocation conditions. |
| 659 | * |
| 660 | * Charge up the socket with preallocated calls, each with a user ID. A |
| 661 | * function should be provided to effect the attachment from the user's side. |
| 662 | * The user is given a ref to hold on the call. |
| 663 | * |
| 664 | * Note that the call may be come connected before this function returns. |
| 665 | */ |
| 666 | int rxrpc_kernel_charge_accept(struct socket *sock, |
| 667 | rxrpc_notify_rx_t notify_rx, |
| 668 | rxrpc_user_attach_call_t user_attach_call, |
| 669 | unsigned long user_call_ID, gfp_t gfp) |
| 670 | { |
| 671 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); |
| 672 | struct rxrpc_backlog *b = rx->backlog; |
| 673 | |
| 674 | if (sock->sk->sk_state == RXRPC_CLOSE) |
| 675 | return -ESHUTDOWN; |
| 676 | |
| 677 | return rxrpc_service_prealloc_one(rx, b, notify_rx, |
| 678 | user_attach_call, user_call_ID, |
| 679 | gfp); |
| 680 | } |
| 681 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |