blob: a0b033954ceacc029a28d6dac086369b8ab76b78 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells17926a72007-04-26 15:48:28 -07002/* incoming call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells17926a72007-04-26 15:48:28 -07006 */
7
Joe Perches9b6d5392016-06-02 12:08:52 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
David Howells17926a72007-04-26 15:48:28 -070010#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <linux/errqueue.h>
14#include <linux/udp.h>
15#include <linux/in.h>
16#include <linux/in6.h>
17#include <linux/icmp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
David Howells00e90712016-09-08 11:10:12 +010019#include <linux/circ_buf.h>
David Howells17926a72007-04-26 15:48:28 -070020#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
David Howells0041cd52020-06-19 23:38:16 +010025static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
26 unsigned long user_call_ID)
27{
28}
29
David Howells17926a72007-04-26 15:48:28 -070030/*
David Howells00e90712016-09-08 11:10:12 +010031 * Preallocate a single service call, connection and peer and, if possible,
32 * give them a user ID and attach the user's side of the ID to them.
33 */
34static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
35 struct rxrpc_backlog *b,
36 rxrpc_notify_rx_t notify_rx,
37 rxrpc_user_attach_call_t user_attach_call,
David Howellsa25e21f2018-03-27 23:03:00 +010038 unsigned long user_call_ID, gfp_t gfp,
39 unsigned int debug_id)
David Howells00e90712016-09-08 11:10:12 +010040{
41 const void *here = __builtin_return_address(0);
David Howells2d914c12020-09-30 21:27:18 +010042 struct rxrpc_call *call, *xcall;
David Howells2baec2c2017-05-24 17:02:32 +010043 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
David Howells2d914c12020-09-30 21:27:18 +010044 struct rb_node *parent, **pp;
David Howells00e90712016-09-08 11:10:12 +010045 int max, tmp;
46 unsigned int size = RXRPC_BACKLOG_MAX;
47 unsigned int head, tail, call_head, call_tail;
48
49 max = rx->sk.sk_max_ack_backlog;
50 tmp = rx->sk.sk_ack_backlog;
51 if (tmp >= max) {
52 _leave(" = -ENOBUFS [full %u]", max);
53 return -ENOBUFS;
54 }
55 max -= tmp;
56
57 /* We don't need more conns and peers than we have calls, but on the
58 * other hand, we shouldn't ever use more peers than conns or conns
59 * than calls.
60 */
61 call_head = b->call_backlog_head;
62 call_tail = READ_ONCE(b->call_backlog_tail);
63 tmp = CIRC_CNT(call_head, call_tail, size);
64 if (tmp >= max) {
65 _leave(" = -ENOBUFS [enough %u]", tmp);
66 return -ENOBUFS;
67 }
68 max = tmp + 1;
69
70 head = b->peer_backlog_head;
71 tail = READ_ONCE(b->peer_backlog_tail);
72 if (CIRC_CNT(head, tail, size) < max) {
73 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
74 if (!peer)
75 return -ENOMEM;
76 b->peer_backlog[head] = peer;
77 smp_store_release(&b->peer_backlog_head,
78 (head + 1) & (size - 1));
79 }
80
81 head = b->conn_backlog_head;
82 tail = READ_ONCE(b->conn_backlog_tail);
83 if (CIRC_CNT(head, tail, size) < max) {
84 struct rxrpc_connection *conn;
85
David Howells2baec2c2017-05-24 17:02:32 +010086 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
David Howells00e90712016-09-08 11:10:12 +010087 if (!conn)
88 return -ENOMEM;
89 b->conn_backlog[head] = conn;
90 smp_store_release(&b->conn_backlog_head,
91 (head + 1) & (size - 1));
David Howells363deea2016-09-17 10:49:14 +010092
David Howells4c1295d2019-10-07 10:58:29 +010093 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
David Howells363deea2016-09-17 10:49:14 +010094 atomic_read(&conn->usage), here);
David Howells00e90712016-09-08 11:10:12 +010095 }
96
97 /* Now it gets complicated, because calls get registered with the
David Howells2d914c12020-09-30 21:27:18 +010098 * socket here, with a user ID preassigned by the user.
David Howells00e90712016-09-08 11:10:12 +010099 */
David Howellsa25e21f2018-03-27 23:03:00 +0100100 call = rxrpc_alloc_call(rx, gfp, debug_id);
David Howells00e90712016-09-08 11:10:12 +0100101 if (!call)
102 return -ENOMEM;
103 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
104 call->state = RXRPC_CALL_SERVER_PREALLOC;
105
David Howells48c9e0e2019-10-07 10:58:29 +0100106 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
David Howells00e90712016-09-08 11:10:12 +0100107 atomic_read(&call->usage),
108 here, (const void *)user_call_ID);
109
110 write_lock(&rx->call_lock);
David Howells2d914c12020-09-30 21:27:18 +0100111
112 /* Check the user ID isn't already in use */
113 pp = &rx->calls.rb_node;
114 parent = NULL;
115 while (*pp) {
116 parent = *pp;
117 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
118 if (user_call_ID < xcall->user_call_ID)
119 pp = &(*pp)->rb_left;
120 else if (user_call_ID > xcall->user_call_ID)
121 pp = &(*pp)->rb_right;
122 else
123 goto id_in_use;
124 }
125
126 call->user_call_ID = user_call_ID;
127 call->notify_rx = notify_rx;
David Howells00e90712016-09-08 11:10:12 +0100128 if (user_attach_call) {
David Howellscbd00892016-09-13 09:12:34 +0100129 rxrpc_get_call(call, rxrpc_call_got_kernel);
David Howells00e90712016-09-08 11:10:12 +0100130 user_attach_call(call, user_call_ID);
David Howells00e90712016-09-08 11:10:12 +0100131 }
132
David Howells2d914c12020-09-30 21:27:18 +0100133 rxrpc_get_call(call, rxrpc_call_got_userid);
134 rb_link_node(&call->sock_node, parent, pp);
135 rb_insert_color(&call->sock_node, &rx->calls);
136 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
137
David Howells248f2192016-09-08 11:10:12 +0100138 list_add(&call->sock_link, &rx->sock_calls);
139
David Howells00e90712016-09-08 11:10:12 +0100140 write_unlock(&rx->call_lock);
141
David Howellsd3be4d22018-03-30 21:05:23 +0100142 rxnet = call->rxnet;
David Howells2baec2c2017-05-24 17:02:32 +0100143 write_lock(&rxnet->call_lock);
144 list_add_tail(&call->link, &rxnet->calls);
145 write_unlock(&rxnet->call_lock);
David Howells00e90712016-09-08 11:10:12 +0100146
147 b->call_backlog[call_head] = call;
148 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
149 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
150 return 0;
151
152id_in_use:
153 write_unlock(&rx->call_lock);
154 rxrpc_cleanup_call(call);
155 _leave(" = -EBADSLT");
156 return -EBADSLT;
157}
158
159/*
David Howells2d914c12020-09-30 21:27:18 +0100160 * Allocate the preallocation buffers for incoming service calls. These must
161 * be charged manually.
David Howells00e90712016-09-08 11:10:12 +0100162 */
163int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
164{
165 struct rxrpc_backlog *b = rx->backlog;
166
167 if (!b) {
168 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
169 if (!b)
170 return -ENOMEM;
171 rx->backlog = b;
172 }
173
David Howells00e90712016-09-08 11:10:12 +0100174 return 0;
175}
176
177/*
178 * Discard the preallocation on a service.
179 */
180void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
181{
182 struct rxrpc_backlog *b = rx->backlog;
David Howells2baec2c2017-05-24 17:02:32 +0100183 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
David Howells00e90712016-09-08 11:10:12 +0100184 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
185
186 if (!b)
187 return;
188 rx->backlog = NULL;
189
David Howells248f2192016-09-08 11:10:12 +0100190 /* Make sure that there aren't any incoming calls in progress before we
191 * clear the preallocation buffers.
192 */
193 spin_lock_bh(&rx->incoming_lock);
194 spin_unlock_bh(&rx->incoming_lock);
195
David Howells00e90712016-09-08 11:10:12 +0100196 head = b->peer_backlog_head;
197 tail = b->peer_backlog_tail;
198 while (CIRC_CNT(head, tail, size) > 0) {
199 struct rxrpc_peer *peer = b->peer_backlog[tail];
Takeshi Misawa2e83a572021-01-28 10:48:36 +0000200 rxrpc_put_local(peer->local);
David Howells00e90712016-09-08 11:10:12 +0100201 kfree(peer);
202 tail = (tail + 1) & (size - 1);
203 }
204
205 head = b->conn_backlog_head;
206 tail = b->conn_backlog_tail;
207 while (CIRC_CNT(head, tail, size) > 0) {
208 struct rxrpc_connection *conn = b->conn_backlog[tail];
David Howells2baec2c2017-05-24 17:02:32 +0100209 write_lock(&rxnet->conn_lock);
David Howells00e90712016-09-08 11:10:12 +0100210 list_del(&conn->link);
211 list_del(&conn->proc_link);
David Howells2baec2c2017-05-24 17:02:32 +0100212 write_unlock(&rxnet->conn_lock);
David Howells00e90712016-09-08 11:10:12 +0100213 kfree(conn);
David Howells31f5f9a162018-03-30 21:05:33 +0100214 if (atomic_dec_and_test(&rxnet->nr_conns))
Linus Torvalds5bb053b2018-04-03 14:04:18 -0700215 wake_up_var(&rxnet->nr_conns);
David Howells00e90712016-09-08 11:10:12 +0100216 tail = (tail + 1) & (size - 1);
217 }
218
219 head = b->call_backlog_head;
220 tail = b->call_backlog_tail;
221 while (CIRC_CNT(head, tail, size) > 0) {
222 struct rxrpc_call *call = b->call_backlog[tail];
David Howells88f2a8252018-03-30 21:05:17 +0100223 rcu_assign_pointer(call->socket, rx);
David Howells00e90712016-09-08 11:10:12 +0100224 if (rx->discard_new_call) {
225 _debug("discard %lx", call->user_call_ID);
226 rx->discard_new_call(call, call->user_call_ID);
David Howells0041cd52020-06-19 23:38:16 +0100227 if (call->notify_rx)
228 call->notify_rx = rxrpc_dummy_notify;
David Howells3432a752016-09-13 09:05:14 +0100229 rxrpc_put_call(call, rxrpc_call_put_kernel);
David Howells00e90712016-09-08 11:10:12 +0100230 }
231 rxrpc_call_completed(call);
232 rxrpc_release_call(rx, call);
233 rxrpc_put_call(call, rxrpc_call_put);
234 tail = (tail + 1) & (size - 1);
235 }
236
237 kfree(b);
238}
239
240/*
David Howellsf33121c2019-12-18 16:38:49 +0000241 * Ping the other end to fill our RTT cache and to retrieve the rwind
242 * and MTU parameters.
243 */
244static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
245{
246 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
247 ktime_t now = skb->tstamp;
248
David Howellsc410bf012020-05-11 14:54:34 +0100249 if (call->peer->rtt_count < 3 ||
David Howellsf33121c2019-12-18 16:38:49 +0000250 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
251 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
252 true, true,
253 rxrpc_propose_ack_ping_for_params);
254}
255
256/*
David Howells248f2192016-09-08 11:10:12 +0100257 * Allocate a new incoming call from the prealloc pool, along with a connection
258 * and a peer as necessary.
David Howells17926a72007-04-26 15:48:28 -0700259 */
David Howells248f2192016-09-08 11:10:12 +0100260static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
261 struct rxrpc_local *local,
David Howells0099dc52018-09-27 15:13:09 +0100262 struct rxrpc_peer *peer,
David Howells248f2192016-09-08 11:10:12 +0100263 struct rxrpc_connection *conn,
David Howells063c60d2019-12-20 16:17:16 +0000264 const struct rxrpc_security *sec,
265 struct key *key,
David Howells248f2192016-09-08 11:10:12 +0100266 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700267{
David Howells248f2192016-09-08 11:10:12 +0100268 struct rxrpc_backlog *b = rx->backlog;
David Howells248f2192016-09-08 11:10:12 +0100269 struct rxrpc_call *call;
270 unsigned short call_head, conn_head, peer_head;
271 unsigned short call_tail, conn_tail, peer_tail;
272 unsigned short call_count, conn_count;
David Howells17926a72007-04-26 15:48:28 -0700273
David Howells248f2192016-09-08 11:10:12 +0100274 /* #calls >= #conns >= #peers must hold true. */
275 call_head = smp_load_acquire(&b->call_backlog_head);
276 call_tail = b->call_backlog_tail;
277 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
278 conn_head = smp_load_acquire(&b->conn_backlog_head);
279 conn_tail = b->conn_backlog_tail;
280 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
281 ASSERTCMP(conn_count, >=, call_count);
282 peer_head = smp_load_acquire(&b->peer_backlog_head);
283 peer_tail = b->peer_backlog_tail;
284 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
285 conn_count);
David Howells17926a72007-04-26 15:48:28 -0700286
David Howells248f2192016-09-08 11:10:12 +0100287 if (call_count == 0)
288 return NULL;
David Howells0d12f8a2016-03-04 15:53:46 +0000289
David Howells248f2192016-09-08 11:10:12 +0100290 if (!conn) {
David Howells0099dc52018-09-27 15:13:09 +0100291 if (peer && !rxrpc_get_peer_maybe(peer))
292 peer = NULL;
293 if (!peer) {
294 peer = b->peer_backlog[peer_tail];
David Howells5a790b72018-10-04 09:32:28 +0100295 if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
David Howells0099dc52018-09-27 15:13:09 +0100296 return NULL;
David Howells248f2192016-09-08 11:10:12 +0100297 b->peer_backlog[peer_tail] = NULL;
298 smp_store_release(&b->peer_backlog_tail,
299 (peer_tail + 1) &
300 (RXRPC_BACKLOG_MAX - 1));
David Howells0099dc52018-09-27 15:13:09 +0100301
David Howells5e33a232018-10-05 14:05:34 +0100302 rxrpc_new_incoming_peer(rx, local, peer);
David Howells248f2192016-09-08 11:10:12 +0100303 }
David Howells17926a72007-04-26 15:48:28 -0700304
David Howells248f2192016-09-08 11:10:12 +0100305 /* Now allocate and set up the connection */
306 conn = b->conn_backlog[conn_tail];
307 b->conn_backlog[conn_tail] = NULL;
308 smp_store_release(&b->conn_backlog_tail,
309 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
David Howells09d2bf52018-03-30 21:05:28 +0100310 conn->params.local = rxrpc_get_local(local);
David Howells248f2192016-09-08 11:10:12 +0100311 conn->params.peer = peer;
David Howells363deea2016-09-17 10:49:14 +0100312 rxrpc_see_connection(conn);
David Howells063c60d2019-12-20 16:17:16 +0000313 rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
David Howells248f2192016-09-08 11:10:12 +0100314 } else {
315 rxrpc_get_connection(conn);
David Howells17926a72007-04-26 15:48:28 -0700316 }
317
David Howells248f2192016-09-08 11:10:12 +0100318 /* And now we can allocate and set up a new call */
319 call = b->call_backlog[call_tail];
320 b->call_backlog[call_tail] = NULL;
321 smp_store_release(&b->call_backlog_tail,
322 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
323
David Howellscbd00892016-09-13 09:12:34 +0100324 rxrpc_see_call(call);
David Howells248f2192016-09-08 11:10:12 +0100325 call->conn = conn;
David Howells91fcfbe2019-10-07 10:58:29 +0100326 call->security = conn->security;
David Howells2d914c12020-09-30 21:27:18 +0100327 call->security_ix = conn->security_ix;
David Howells248f2192016-09-08 11:10:12 +0100328 call->peer = rxrpc_get_peer(conn->params.peer);
David Howellsf7aec122017-06-14 17:56:50 +0100329 call->cong_cwnd = call->peer->cong_cwnd;
David Howells248f2192016-09-08 11:10:12 +0100330 return call;
David Howells17926a72007-04-26 15:48:28 -0700331}
332
333/*
David Howells248f2192016-09-08 11:10:12 +0100334 * Set up a new incoming call. Called in BH context with the RCU read lock
335 * held.
336 *
337 * If this is for a kernel service, when we allocate the call, it will have
338 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
339 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
340 * services only have the ref from the backlog buffer. We want to pass this
341 * ref to non-BH context to dispose of.
342 *
343 * If we want to report an error, we mark the skb with the packet type and
344 * abort code and return NULL.
David Howells540b1c42017-02-27 15:43:06 +0000345 *
346 * The call is returned with the user access mutex held.
David Howells17926a72007-04-26 15:48:28 -0700347 */
David Howells248f2192016-09-08 11:10:12 +0100348struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
David Howells0099dc52018-09-27 15:13:09 +0100349 struct rxrpc_sock *rx,
David Howells248f2192016-09-08 11:10:12 +0100350 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700351{
David Howells248f2192016-09-08 11:10:12 +0100352 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells063c60d2019-12-20 16:17:16 +0000353 const struct rxrpc_security *sec = NULL;
David Howellsc1e15b42018-10-08 15:46:25 +0100354 struct rxrpc_connection *conn;
David Howellsd7b4c242018-10-11 22:32:31 +0100355 struct rxrpc_peer *peer = NULL;
David Howells063c60d2019-12-20 16:17:16 +0000356 struct rxrpc_call *call = NULL;
357 struct key *key = NULL;
David Howells17926a72007-04-26 15:48:28 -0700358
359 _enter("");
360
David Howells248f2192016-09-08 11:10:12 +0100361 spin_lock(&rx->incoming_lock);
David Howells210f0352017-01-05 10:38:36 +0000362 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
363 rx->sk.sk_state == RXRPC_CLOSE) {
David Howellsa25e21f2018-03-27 23:03:00 +0100364 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
David Howells248f2192016-09-08 11:10:12 +0100365 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
David Howellsece64fe2018-09-27 15:13:08 +0100366 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
David Howells248f2192016-09-08 11:10:12 +0100367 skb->priority = RX_INVALID_OPERATION;
David Howellsf33121c2019-12-18 16:38:49 +0000368 goto no_call;
David Howells248f2192016-09-08 11:10:12 +0100369 }
David Howells17926a72007-04-26 15:48:28 -0700370
David Howellsc1e15b42018-10-08 15:46:25 +0100371 /* The peer, connection and call may all have sprung into existence due
372 * to a duplicate packet being handled on another CPU in parallel, so
373 * we have to recheck the routing. However, we're now holding
374 * rx->incoming_lock, so the values should remain stable.
375 */
376 conn = rxrpc_find_connection_rcu(local, skb, &peer);
377
David Howells063c60d2019-12-20 16:17:16 +0000378 if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
379 goto no_call;
380
381 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
382 key_put(key);
David Howells248f2192016-09-08 11:10:12 +0100383 if (!call) {
David Howellsece64fe2018-09-27 15:13:08 +0100384 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
David Howellsf33121c2019-12-18 16:38:49 +0000385 goto no_call;
David Howells248f2192016-09-08 11:10:12 +0100386 }
387
David Howells58dc63c2016-09-17 10:49:13 +0100388 trace_rxrpc_receive(call, rxrpc_receive_incoming,
389 sp->hdr.serial, sp->hdr.seq);
390
David Howells248f2192016-09-08 11:10:12 +0100391 /* Make the call live. */
392 rxrpc_incoming_call(rx, call, skb);
393 conn = call->conn;
394
395 if (rx->notify_new_call)
396 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
397
398 spin_lock(&conn->state_lock);
399 switch (conn->state) {
400 case RXRPC_CONN_SERVICE_UNSECURED:
401 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
402 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
403 rxrpc_queue_conn(call->conn);
404 break;
405
406 case RXRPC_CONN_SERVICE:
407 write_lock(&call->state_lock);
David Howells2d914c12020-09-30 21:27:18 +0100408 if (call->state < RXRPC_CALL_COMPLETE)
409 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
David Howells248f2192016-09-08 11:10:12 +0100410 write_unlock(&call->state_lock);
411 break;
412
413 case RXRPC_CONN_REMOTELY_ABORTED:
414 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
David Howells64753092018-10-08 15:46:17 +0100415 conn->abort_code, conn->error);
David Howells248f2192016-09-08 11:10:12 +0100416 break;
417 case RXRPC_CONN_LOCALLY_ABORTED:
418 rxrpc_abort_call("CON", call, sp->hdr.seq,
David Howells64753092018-10-08 15:46:17 +0100419 conn->abort_code, conn->error);
David Howells248f2192016-09-08 11:10:12 +0100420 break;
David Howells17926a72007-04-26 15:48:28 -0700421 default:
422 BUG();
423 }
David Howells248f2192016-09-08 11:10:12 +0100424 spin_unlock(&conn->state_lock);
David Howells13b79552019-12-20 16:20:56 +0000425 spin_unlock(&rx->incoming_lock);
426
427 rxrpc_send_ping(call, skb);
David Howells17926a72007-04-26 15:48:28 -0700428
David Howells3432a752016-09-13 09:05:14 +0100429 /* We have to discard the prealloc queue's ref here and rely on a
430 * combination of the RCU read lock and refs held either by the socket
431 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
432 * service to prevent the call from being deallocated too early.
433 */
434 rxrpc_put_call(call, rxrpc_call_put);
435
David Howellsf33121c2019-12-18 16:38:49 +0000436 _leave(" = %p{%d}", call, call->debug_id);
David Howells248f2192016-09-08 11:10:12 +0100437 return call;
David Howellsf33121c2019-12-18 16:38:49 +0000438
439no_call:
440 spin_unlock(&rx->incoming_lock);
441 _leave(" = NULL [%u]", skb->mark);
442 return NULL;
David Howells17926a72007-04-26 15:48:28 -0700443}
444
445/*
David Howells2d914c12020-09-30 21:27:18 +0100446 * Charge up socket with preallocated calls, attaching user call IDs.
David Howells17926a72007-04-26 15:48:28 -0700447 */
David Howells2d914c12020-09-30 21:27:18 +0100448int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700449{
David Howells2d914c12020-09-30 21:27:18 +0100450 struct rxrpc_backlog *b = rx->backlog;
David Howells17926a72007-04-26 15:48:28 -0700451
David Howells2d914c12020-09-30 21:27:18 +0100452 if (rx->sk.sk_state == RXRPC_CLOSE)
453 return -ESHUTDOWN;
David Howells17926a72007-04-26 15:48:28 -0700454
David Howells2d914c12020-09-30 21:27:18 +0100455 return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
456 GFP_KERNEL,
457 atomic_inc_return(&rxrpc_debug_id));
David Howells17926a72007-04-26 15:48:28 -0700458}
David Howells651350d2007-04-26 15:50:17 -0700459
David Howells00e90712016-09-08 11:10:12 +0100460/*
461 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
462 * @sock: The socket on which to preallocate
463 * @notify_rx: Event notification function for the call
464 * @user_attach_call: Func to attach call to user_call_ID
465 * @user_call_ID: The tag to attach to the preallocated call
466 * @gfp: The allocation conditions.
David Howellsa25e21f2018-03-27 23:03:00 +0100467 * @debug_id: The tracing debug ID.
David Howells00e90712016-09-08 11:10:12 +0100468 *
469 * Charge up the socket with preallocated calls, each with a user ID. A
470 * function should be provided to effect the attachment from the user's side.
471 * The user is given a ref to hold on the call.
472 *
473 * Note that the call may be come connected before this function returns.
474 */
475int rxrpc_kernel_charge_accept(struct socket *sock,
476 rxrpc_notify_rx_t notify_rx,
477 rxrpc_user_attach_call_t user_attach_call,
David Howellsa25e21f2018-03-27 23:03:00 +0100478 unsigned long user_call_ID, gfp_t gfp,
479 unsigned int debug_id)
David Howells00e90712016-09-08 11:10:12 +0100480{
481 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
482 struct rxrpc_backlog *b = rx->backlog;
483
484 if (sock->sk->sk_state == RXRPC_CLOSE)
485 return -ESHUTDOWN;
486
487 return rxrpc_service_prealloc_one(rx, b, notify_rx,
488 user_attach_call, user_call_ID,
David Howellsa25e21f2018-03-27 23:03:00 +0100489 gfp, debug_id);
David Howells00e90712016-09-08 11:10:12 +0100490}
491EXPORT_SYMBOL(rxrpc_kernel_charge_accept);