blob: e1966dfc915274a79daaafa710e3fd69419b8f94 [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells7877a4a2016-04-04 14:00:40 +01002/* Service connection management
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells7877a4a2016-04-04 14:00:40 +01006 */
7
8#include <linux/slab.h>
9#include "ar-internal.h"
10
David Howells245500d2020-07-01 11:15:32 +010011static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
12 .usage = ATOMIC_INIT(1),
13 .debug_id = UINT_MAX,
14 .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
15};
16
David Howells7877a4a2016-04-04 14:00:40 +010017/*
David Howells8496af52016-07-01 07:51:50 +010018 * Find a service connection under RCU conditions.
19 *
20 * We could use a hash table, but that is subject to bucket stuffing by an
21 * attacker as the client gets to pick the epoch and cid values and would know
22 * the hash function. So, instead, we use a hash table for the peer and from
23 * that an rbtree to find the service connection. Under ordinary circumstances
24 * it might be slower than a large hash table, but it is at least limited in
25 * depth.
26 */
27struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
28 struct sk_buff *skb)
29{
30 struct rxrpc_connection *conn = NULL;
31 struct rxrpc_conn_proto k;
32 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
33 struct rb_node *p;
34 unsigned int seq = 0;
35
36 k.epoch = sp->hdr.epoch;
37 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
38
39 do {
40 /* Unfortunately, rbtree walking doesn't give reliable results
41 * under just the RCU read lock, so we have to check for
42 * changes.
43 */
44 read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
45
46 p = rcu_dereference_raw(peer->service_conns.rb_node);
47 while (p) {
48 conn = rb_entry(p, struct rxrpc_connection, service_node);
49
50 if (conn->proto.index_key < k.index_key)
51 p = rcu_dereference_raw(p->rb_left);
52 else if (conn->proto.index_key > k.index_key)
53 p = rcu_dereference_raw(p->rb_right);
54 else
David Howellsfdade4f2017-09-04 15:28:28 +010055 break;
David Howells8496af52016-07-01 07:51:50 +010056 conn = NULL;
57 }
58 } while (need_seqretry(&peer->service_conn_lock, seq));
59
David Howells8496af52016-07-01 07:51:50 +010060 done_seqretry(&peer->service_conn_lock, seq);
61 _leave(" = %d", conn ? conn->debug_id : -1);
62 return conn;
63}
64
65/*
66 * Insert a service connection into a peer's tree, thereby making it a target
67 * for incoming packets.
68 */
David Howells248f2192016-09-08 11:10:12 +010069static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
70 struct rxrpc_connection *conn)
David Howells8496af52016-07-01 07:51:50 +010071{
72 struct rxrpc_connection *cursor = NULL;
73 struct rxrpc_conn_proto k = conn->proto;
74 struct rb_node **pp, *parent;
75
76 write_seqlock_bh(&peer->service_conn_lock);
77
78 pp = &peer->service_conns.rb_node;
79 parent = NULL;
80 while (*pp) {
81 parent = *pp;
82 cursor = rb_entry(parent,
83 struct rxrpc_connection, service_node);
84
85 if (cursor->proto.index_key < k.index_key)
86 pp = &(*pp)->rb_left;
87 else if (cursor->proto.index_key > k.index_key)
88 pp = &(*pp)->rb_right;
89 else
90 goto found_extant_conn;
91 }
92
93 rb_link_node_rcu(&conn->service_node, parent, pp);
94 rb_insert_color(&conn->service_node, &peer->service_conns);
95conn_published:
96 set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
97 write_sequnlock_bh(&peer->service_conn_lock);
98 _leave(" = %d [new]", conn->debug_id);
David Howells248f2192016-09-08 11:10:12 +010099 return;
David Howells8496af52016-07-01 07:51:50 +0100100
101found_extant_conn:
102 if (atomic_read(&cursor->usage) == 0)
103 goto replace_old_connection;
104 write_sequnlock_bh(&peer->service_conn_lock);
105 /* We should not be able to get here. rxrpc_incoming_connection() is
106 * called in a non-reentrant context, so there can't be a race to
107 * insert a new connection.
108 */
109 BUG();
110
111replace_old_connection:
112 /* The old connection is from an outdated epoch. */
113 _debug("replace conn");
114 rb_replace_node_rcu(&cursor->service_node,
115 &conn->service_node,
116 &peer->service_conns);
117 clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
118 goto conn_published;
119}
120
121/*
David Howells00e90712016-09-08 11:10:12 +0100122 * Preallocate a service connection. The connection is placed on the proc and
123 * reap lists so that we don't have to get the lock from BH context.
124 */
David Howells2baec2c2017-05-24 17:02:32 +0100125struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
126 gfp_t gfp)
David Howells00e90712016-09-08 11:10:12 +0100127{
128 struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
129
130 if (conn) {
131 /* We maintain an extra ref on the connection whilst it is on
132 * the rxrpc_connections list.
133 */
134 conn->state = RXRPC_CONN_SERVICE_PREALLOC;
135 atomic_set(&conn->usage, 2);
David Howells245500d2020-07-01 11:15:32 +0100136 conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
David Howells00e90712016-09-08 11:10:12 +0100137
David Howells31f5f9a162018-03-30 21:05:33 +0100138 atomic_inc(&rxnet->nr_conns);
David Howells2baec2c2017-05-24 17:02:32 +0100139 write_lock(&rxnet->conn_lock);
140 list_add_tail(&conn->link, &rxnet->service_conns);
141 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
142 write_unlock(&rxnet->conn_lock);
David Howells363deea2016-09-17 10:49:14 +0100143
David Howells4c1295d2019-10-07 10:58:29 +0100144 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
David Howells363deea2016-09-17 10:49:14 +0100145 atomic_read(&conn->usage),
146 __builtin_return_address(0));
David Howells00e90712016-09-08 11:10:12 +0100147 }
148
149 return conn;
150}
151
152/*
David Howells248f2192016-09-08 11:10:12 +0100153 * Set up an incoming connection. This is called in BH context with the RCU
154 * read lock held.
David Howells7877a4a2016-04-04 14:00:40 +0100155 */
David Howells47229742017-06-05 14:30:49 +0100156void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
157 struct rxrpc_connection *conn,
David Howells063c60d2019-12-20 16:17:16 +0000158 const struct rxrpc_security *sec,
David Howells248f2192016-09-08 11:10:12 +0100159 struct sk_buff *skb)
David Howells7877a4a2016-04-04 14:00:40 +0100160{
David Howells7877a4a2016-04-04 14:00:40 +0100161 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells7877a4a2016-04-04 14:00:40 +0100162
163 _enter("");
164
David Howells8496af52016-07-01 07:51:50 +0100165 conn->proto.epoch = sp->hdr.epoch;
166 conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
David Howells8496af52016-07-01 07:51:50 +0100167 conn->params.service_id = sp->hdr.serviceId;
David Howells68d6d1a2017-06-05 14:30:49 +0100168 conn->service_id = sp->hdr.serviceId;
David Howells8496af52016-07-01 07:51:50 +0100169 conn->security_ix = sp->hdr.securityIndex;
170 conn->out_clientflag = 0;
David Howells063c60d2019-12-20 16:17:16 +0000171 conn->security = sec;
David Howells248f2192016-09-08 11:10:12 +0100172 if (conn->security_ix)
David Howells8496af52016-07-01 07:51:50 +0100173 conn->state = RXRPC_CONN_SERVICE_UNSECURED;
David Howells248f2192016-09-08 11:10:12 +0100174 else
175 conn->state = RXRPC_CONN_SERVICE;
David Howells7877a4a2016-04-04 14:00:40 +0100176
David Howells47229742017-06-05 14:30:49 +0100177 /* See if we should upgrade the service. This can only happen on the
178 * first packet on a new connection. Once done, it applies to all
179 * subsequent calls on that connection.
180 */
181 if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
182 conn->service_id == rx->service_upgrade.from)
183 conn->service_id = rx->service_upgrade.to;
184
David Howells8496af52016-07-01 07:51:50 +0100185 /* Make the connection a target for incoming packets. */
David Howells248f2192016-09-08 11:10:12 +0100186 rxrpc_publish_service_conn(conn->params.peer, conn);
David Howells8496af52016-07-01 07:51:50 +0100187
David Howells248f2192016-09-08 11:10:12 +0100188 _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
David Howells7877a4a2016-04-04 14:00:40 +0100189}
David Howells001c1122016-06-30 10:45:22 +0100190
191/*
192 * Remove the service connection from the peer's tree, thereby removing it as a
193 * target for incoming packets.
194 */
195void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
196{
197 struct rxrpc_peer *peer = conn->params.peer;
198
David Howells8496af52016-07-01 07:51:50 +0100199 write_seqlock_bh(&peer->service_conn_lock);
David Howells001c1122016-06-30 10:45:22 +0100200 if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
201 rb_erase(&conn->service_node, &peer->service_conns);
David Howells8496af52016-07-01 07:51:50 +0100202 write_sequnlock_bh(&peer->service_conn_lock);
David Howells001c1122016-06-30 10:45:22 +0100203}