blob: 7c27aa629af191d44cd1c278b888846d482f1205 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Dave Watson734942c2017-06-14 11:37:14 -07002/*
3 * Pluggable TCP upper layer protocol support.
4 *
5 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
7 *
8 */
9
Daniel Borkmann1243a512018-10-13 02:45:57 +020010#include <linux/module.h>
Dave Watson734942c2017-06-14 11:37:14 -070011#include <linux/mm.h>
12#include <linux/types.h>
13#include <linux/list.h>
14#include <linux/gfp.h>
15#include <net/tcp.h>
16
17static DEFINE_SPINLOCK(tcp_ulp_list_lock);
18static LIST_HEAD(tcp_ulp_list);
19
20/* Simple linear search, don't expect many entries! */
21static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
22{
23 struct tcp_ulp_ops *e;
24
Amol Grover958a93c2020-02-21 20:45:38 +053025 list_for_each_entry_rcu(e, &tcp_ulp_list, list,
26 lockdep_is_held(&tcp_ulp_list_lock)) {
Dave Watson734942c2017-06-14 11:37:14 -070027 if (strcmp(e->name, name) == 0)
28 return e;
29 }
30
31 return NULL;
32}
33
34static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
35{
36 const struct tcp_ulp_ops *ulp = NULL;
37
38 rcu_read_lock();
39 ulp = tcp_ulp_find(name);
40
41#ifdef CONFIG_MODULES
42 if (!ulp && capable(CAP_NET_ADMIN)) {
43 rcu_read_unlock();
Daniel Borkmann037b0b82018-08-16 21:49:06 +020044 request_module("tcp-ulp-%s", name);
Dave Watson734942c2017-06-14 11:37:14 -070045 rcu_read_lock();
46 ulp = tcp_ulp_find(name);
47 }
48#endif
49 if (!ulp || !try_module_get(ulp->owner))
50 ulp = NULL;
51
52 rcu_read_unlock();
53 return ulp;
54}
55
56/* Attach new upper layer protocol to the list
57 * of available protocols.
58 */
59int tcp_register_ulp(struct tcp_ulp_ops *ulp)
60{
61 int ret = 0;
62
63 spin_lock(&tcp_ulp_list_lock);
John Fastabendb11a6322018-02-05 10:17:43 -080064 if (tcp_ulp_find(ulp->name))
Dave Watson734942c2017-06-14 11:37:14 -070065 ret = -EEXIST;
John Fastabendb11a6322018-02-05 10:17:43 -080066 else
Dave Watson734942c2017-06-14 11:37:14 -070067 list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
Dave Watson734942c2017-06-14 11:37:14 -070068 spin_unlock(&tcp_ulp_list_lock);
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(tcp_register_ulp);
73
74void tcp_unregister_ulp(struct tcp_ulp_ops *ulp)
75{
76 spin_lock(&tcp_ulp_list_lock);
77 list_del_rcu(&ulp->list);
78 spin_unlock(&tcp_ulp_list_lock);
79
80 synchronize_rcu();
81}
82EXPORT_SYMBOL_GPL(tcp_unregister_ulp);
83
84/* Build string with list of available upper layer protocl values */
85void tcp_get_available_ulp(char *buf, size_t maxlen)
86{
87 struct tcp_ulp_ops *ulp_ops;
88 size_t offs = 0;
89
Jakub Kicinski926f38e2017-06-22 18:57:55 -070090 *buf = '\0';
Dave Watson734942c2017-06-14 11:37:14 -070091 rcu_read_lock();
92 list_for_each_entry_rcu(ulp_ops, &tcp_ulp_list, list) {
93 offs += snprintf(buf + offs, maxlen - offs,
94 "%s%s",
95 offs == 0 ? "" : " ", ulp_ops->name);
Hangbin Liu9bb59a22019-11-20 16:38:08 +080096
97 if (WARN_ON_ONCE(offs >= maxlen))
98 break;
Dave Watson734942c2017-06-14 11:37:14 -070099 }
100 rcu_read_unlock();
101}
102
John Fastabend33bfe202020-01-11 06:12:01 +0000103void tcp_update_ulp(struct sock *sk, struct proto *proto,
104 void (*write_space)(struct sock *sk))
John Fastabend95fa1452019-07-19 10:29:22 -0700105{
106 struct inet_connection_sock *icsk = inet_csk(sk);
107
John Fastabend95fa1452019-07-19 10:29:22 -0700108 if (icsk->icsk_ulp_ops->update)
John Fastabend33bfe202020-01-11 06:12:01 +0000109 icsk->icsk_ulp_ops->update(sk, proto, write_space);
John Fastabend95fa1452019-07-19 10:29:22 -0700110}
111
Dave Watson734942c2017-06-14 11:37:14 -0700112void tcp_cleanup_ulp(struct sock *sk)
113{
114 struct inet_connection_sock *icsk = inet_csk(sk);
115
Daniel Borkmannaadd4352018-10-16 21:31:35 +0200116 /* No sock_owned_by_me() check here as at the time the
117 * stack calls this function, the socket is dead and
118 * about to be destroyed.
119 */
Dave Watson734942c2017-06-14 11:37:14 -0700120 if (!icsk->icsk_ulp_ops)
121 return;
122
123 if (icsk->icsk_ulp_ops->release)
124 icsk->icsk_ulp_ops->release(sk);
125 module_put(icsk->icsk_ulp_ops->owner);
Daniel Borkmann90545cd2018-08-16 21:49:07 +0200126
127 icsk->icsk_ulp_ops = NULL;
Dave Watson734942c2017-06-14 11:37:14 -0700128}
129
Daniel Borkmann1243a512018-10-13 02:45:57 +0200130static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
Dave Watson734942c2017-06-14 11:37:14 -0700131{
132 struct inet_connection_sock *icsk = inet_csk(sk);
Daniel Borkmann1243a512018-10-13 02:45:57 +0200133 int err;
134
135 err = -EEXIST;
136 if (icsk->icsk_ulp_ops)
137 goto out_err;
138
139 err = ulp_ops->init(sk);
140 if (err)
141 goto out_err;
142
143 icsk->icsk_ulp_ops = ulp_ops;
144 return 0;
145out_err:
146 module_put(ulp_ops->owner);
147 return err;
148}
149
150int tcp_set_ulp(struct sock *sk, const char *name)
151{
Dave Watson734942c2017-06-14 11:37:14 -0700152 const struct tcp_ulp_ops *ulp_ops;
Dave Watson734942c2017-06-14 11:37:14 -0700153
Daniel Borkmann8b9088f2018-10-13 02:45:56 +0200154 sock_owned_by_me(sk);
Dave Watson734942c2017-06-14 11:37:14 -0700155
156 ulp_ops = __tcp_ulp_find_autoload(name);
157 if (!ulp_ops)
Sabrina Dubroca539a06b2017-08-14 18:04:24 +0200158 return -ENOENT;
Dave Watson734942c2017-06-14 11:37:14 -0700159
Daniel Borkmann1243a512018-10-13 02:45:57 +0200160 return __tcp_set_ulp(sk, ulp_ops);
Dave Watson734942c2017-06-14 11:37:14 -0700161}