blob: b9313c15ee3a2d1a4b2935d5fb4d464b6b213862 [file] [log] [blame]
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -04001/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040026#include <linux/sunrpc/xprt.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040027#include <linux/export.h>
Trond Myklebust09acfea2012-03-11 15:22:54 -040028#include <linux/sunrpc/bc_xprt.h>
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040029
Jeff Laytonf895b252014-11-17 16:58:04 -050030#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040031#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040034/*
35 * Helper routines that track the number of preallocation elements
36 * on the transport.
37 */
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040040 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040041}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040045 atomic_add(n, &xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040046 xprt->bc_alloc_count += n;
47}
48
49static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040051 atomic_sub(n, &xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040052 return xprt->bc_alloc_count -= n;
53}
54
55/*
56 * Free the preallocated rpc_rqst structure and the memory
57 * buffers hanging off of it.
58 */
59static void xprt_free_allocation(struct rpc_rqst *req)
60{
61 struct xdr_buf *xbufp;
62
63 dprintk("RPC: free allocations for req= %p\n", req);
Weston Andros Adamsonf30dfbb2012-10-23 10:43:33 -040064 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
Trond Myklebust88de6af2015-06-01 15:10:25 -040065 xbufp = &req->rq_rcv_buf;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040066 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040069 kfree(req);
70}
71
Trond Myklebust1dddda82015-06-01 15:05:38 -040072static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73{
74 struct page *page;
75 /* Preallocate one XDR receive buffer */
76 page = alloc_page(gfp_flags);
77 if (page == NULL)
78 return -ENOMEM;
Chuck Leverb9c5bc02016-09-15 10:55:12 -040079 xdr_buf_init(buf, page_address(page), PAGE_SIZE);
Trond Myklebust1dddda82015-06-01 15:05:38 -040080 return 0;
81}
82
83static
84struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
85{
86 struct rpc_rqst *req;
87
88 /* Pre-allocate one backchannel rpc_rqst */
89 req = kzalloc(sizeof(*req), gfp_flags);
90 if (req == NULL)
91 return NULL;
92
93 req->rq_xprt = xprt;
Trond Myklebust1dddda82015-06-01 15:05:38 -040094 INIT_LIST_HEAD(&req->rq_bc_list);
95
96 /* Preallocate one XDR receive buffer */
97 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
98 printk(KERN_ERR "Failed to create bc receive xbuf\n");
99 goto out_free;
100 }
101 req->rq_rcv_buf.len = PAGE_SIZE;
102
103 /* Preallocate one XDR send buffer */
104 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
105 printk(KERN_ERR "Failed to create bc snd xbuf\n");
106 goto out_free;
107 }
108 return req;
109out_free:
110 xprt_free_allocation(req);
111 return NULL;
112}
113
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400114/*
115 * Preallocate up to min_reqs structures and related buffers for use
116 * by the backchannel. This function can be called multiple times
117 * when creating new sessions that use the same rpc_xprt. The
118 * preallocated buffers are added to the pool of resources used by
119 * the rpc_xprt. Anyone of these resources may be used used by an
120 * incoming callback request. It's up to the higher levels in the
121 * stack to enforce that the maximum number of session slots is not
122 * being exceeded.
123 *
124 * Some callback arguments can be large. For example, a pNFS server
125 * using multiple deviceids. The list can be unbound, but the client
126 * has the ability to tell the server the maximum size of the callback
127 * requests. Each deviceID is 16 bytes, so allocate one page
128 * for the arguments to have enough room to receive a number of these
129 * deviceIDs. The NFS client indicates to the pNFS server that its
130 * callback requests can be up to 4096 bytes in size.
131 */
132int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
133{
Chuck Lever42e5c3e2015-10-24 17:27:35 -0400134 if (!xprt->ops->bc_setup)
135 return 0;
136 return xprt->ops->bc_setup(xprt, min_reqs);
137}
138EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
139
140int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
141{
Trond Myklebust1dddda82015-06-01 15:05:38 -0400142 struct rpc_rqst *req;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400143 struct list_head tmp_list;
144 int i;
145
146 dprintk("RPC: setup backchannel transport\n");
147
148 /*
149 * We use a temporary list to keep track of the preallocated
150 * buffers. Once we're done building the list we splice it
151 * into the backchannel preallocation list off of the rpc_xprt
152 * struct. This helps minimize the amount of time the list
153 * lock is held on the rpc_xprt struct. It also makes cleanup
154 * easier in case of memory allocation errors.
155 */
156 INIT_LIST_HEAD(&tmp_list);
157 for (i = 0; i < min_reqs; i++) {
158 /* Pre-allocate one backchannel rpc_rqst */
Trond Myklebust1dddda82015-06-01 15:05:38 -0400159 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400160 if (req == NULL) {
161 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
162 goto out_free;
163 }
164
165 /* Add the allocated buffer to the tmp list */
166 dprintk("RPC: adding req= %p\n", req);
167 list_add(&req->rq_bc_pa_list, &tmp_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400168 }
169
170 /*
171 * Add the temporary list to the backchannel preallocation list
172 */
Trond Myklebustc89091c2017-08-16 12:09:44 -0400173 spin_lock(&xprt->bc_pa_lock);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400174 list_splice(&tmp_list, &xprt->bc_pa_list);
175 xprt_inc_alloc_count(xprt, min_reqs);
Trond Myklebustc89091c2017-08-16 12:09:44 -0400176 spin_unlock(&xprt->bc_pa_lock);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400177
178 dprintk("RPC: setup backchannel transport done\n");
179 return 0;
180
181out_free:
182 /*
183 * Memory allocation failed, free the temporary list
184 */
Trond Myklebust1dddda82015-06-01 15:05:38 -0400185 while (!list_empty(&tmp_list)) {
186 req = list_first_entry(&tmp_list,
187 struct rpc_rqst,
188 rq_bc_pa_list);
Trond Myklebust62835672014-02-11 13:56:54 -0500189 list_del(&req->rq_bc_pa_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400190 xprt_free_allocation(req);
Trond Myklebust62835672014-02-11 13:56:54 -0500191 }
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400192
193 dprintk("RPC: setup backchannel transport failed\n");
Weston Andros Adamsond24bab92012-11-01 11:21:53 -0400194 return -ENOMEM;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400195}
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400196
Ben Hutchings2c530402012-07-10 10:55:09 +0000197/**
198 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
199 * @xprt: the transport holding the preallocated strucures
Chuck Leveracf0a392018-12-19 11:00:22 -0500200 * @max_reqs: the maximum number of preallocated structures to destroy
Ben Hutchings2c530402012-07-10 10:55:09 +0000201 *
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400202 * Since these structures may have been allocated by multiple calls
203 * to xprt_setup_backchannel, we only destroy up to the maximum number
204 * of reqs specified by the caller.
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400205 */
206void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
207{
Chuck Lever42e5c3e2015-10-24 17:27:35 -0400208 if (xprt->ops->bc_destroy)
209 xprt->ops->bc_destroy(xprt, max_reqs);
210}
211EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
212
213void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
214{
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400215 struct rpc_rqst *req = NULL, *tmp = NULL;
216
217 dprintk("RPC: destroy backchannel transport\n");
218
Weston Andros Adamsonc4ded8d2012-10-23 10:43:34 -0400219 if (max_reqs == 0)
220 goto out;
221
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400222 spin_lock_bh(&xprt->bc_pa_lock);
223 xprt_dec_alloc_count(xprt, max_reqs);
224 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
225 dprintk("RPC: req=%p\n", req);
Trond Myklebust62835672014-02-11 13:56:54 -0500226 list_del(&req->rq_bc_pa_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400227 xprt_free_allocation(req);
228 if (--max_reqs == 0)
229 break;
230 }
231 spin_unlock_bh(&xprt->bc_pa_lock);
232
Weston Andros Adamsonc4ded8d2012-10-23 10:43:34 -0400233out:
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400234 dprintk("RPC: backchannel list empty= %s\n",
235 list_empty(&xprt->bc_pa_list) ? "true" : "false");
236}
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400237
Trond Myklebust2ea24492014-02-10 11:18:39 -0500238static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400239{
Trond Myklebust2ea24492014-02-10 11:18:39 -0500240 struct rpc_rqst *req = NULL;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400241
242 dprintk("RPC: allocate a backchannel request\n");
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400243 if (atomic_read(&xprt->bc_free_slots) <= 0)
Trond Myklebust2ea24492014-02-10 11:18:39 -0500244 goto not_found;
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400245 if (list_empty(&xprt->bc_pa_list)) {
246 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
247 if (!req)
248 goto not_found;
Trond Myklebust68514472015-07-22 16:31:17 -0400249 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
250 xprt->bc_alloc_count++;
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400251 }
Trond Myklebust2ea24492014-02-10 11:18:39 -0500252 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
253 rq_bc_pa_list);
254 req->rq_reply_bytes_recvd = 0;
Trond Myklebust2ea24492014-02-10 11:18:39 -0500255 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400256 sizeof(req->rq_private_buf));
Trond Myklebust2ea24492014-02-10 11:18:39 -0500257 req->rq_xid = xid;
258 req->rq_connect_cookie = xprt->connect_cookie;
259not_found:
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400260 dprintk("RPC: backchannel req=%p\n", req);
261 return req;
262}
263
264/*
265 * Return the preallocated rpc_rqst structure and XDR buffers
266 * associated with this rpc_task.
267 */
268void xprt_free_bc_request(struct rpc_rqst *req)
269{
270 struct rpc_xprt *xprt = req->rq_xprt;
271
Chuck Lever42e5c3e2015-10-24 17:27:35 -0400272 xprt->ops->bc_free_rqst(req);
273}
274
275void xprt_free_bc_rqst(struct rpc_rqst *req)
276{
277 struct rpc_xprt *xprt = req->rq_xprt;
278
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400279 dprintk("RPC: free backchannel req=%p\n", req);
280
Trond Myklebust2ea24492014-02-10 11:18:39 -0500281 req->rq_connect_cookie = xprt->connect_cookie - 1;
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100282 smp_mb__before_atomic();
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400283 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100284 smp_mb__after_atomic();
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400285
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400286 /*
287 * Return it to the list of preallocations so that it
288 * may be reused by a new callback request.
289 */
290 spin_lock_bh(&xprt->bc_pa_lock);
291 if (xprt_need_to_requeue(xprt)) {
292 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
293 xprt->bc_alloc_count++;
294 req = NULL;
295 }
296 spin_unlock_bh(&xprt->bc_pa_lock);
297 if (req != NULL) {
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400298 /*
299 * The last remaining session was destroyed while this
300 * entry was in use. Free the entry and don't attempt
301 * to add back to the list because there is no need to
302 * have anymore preallocated entries.
303 */
304 dprintk("RPC: Last session removed req=%p\n", req);
305 xprt_free_allocation(req);
306 return;
307 }
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400308}
309
Trond Myklebust2ea24492014-02-10 11:18:39 -0500310/*
311 * One or more rpc_rqst structure have been preallocated during the
312 * backchannel setup. Buffer space for the send and private XDR buffers
313 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
314 * to this request. Use xprt_free_bc_request to return it.
315 *
316 * We know that we're called in soft interrupt context, grab the spin_lock
317 * since there is no need to grab the bottom half spin_lock.
318 *
319 * Return an available rpc_rqst, otherwise NULL if non are available.
320 */
321struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
322{
323 struct rpc_rqst *req;
324
325 spin_lock(&xprt->bc_pa_lock);
326 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
327 if (req->rq_connect_cookie != xprt->connect_cookie)
328 continue;
329 if (req->rq_xid == xid)
330 goto found;
331 }
332 req = xprt_alloc_bc_request(xprt, xid);
333found:
334 spin_unlock(&xprt->bc_pa_lock);
335 return req;
336}
337
338/*
339 * Add callback request to callback list. The callback
340 * service sleeps on the sv_cb_waitq waiting for new
341 * requests. Wake it up after adding enqueing the
342 * request.
343 */
344void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
345{
346 struct rpc_xprt *xprt = req->rq_xprt;
347 struct svc_serv *bc_serv = xprt->bc_serv;
348
Chuck Lever813b00d2015-02-13 13:08:25 -0500349 spin_lock(&xprt->bc_pa_lock);
350 list_del(&req->rq_bc_pa_list);
Trond Myklebust1980bd42015-07-22 17:05:32 -0400351 xprt_dec_alloc_count(xprt, 1);
Chuck Lever813b00d2015-02-13 13:08:25 -0500352 spin_unlock(&xprt->bc_pa_lock);
353
Trond Myklebust2ea24492014-02-10 11:18:39 -0500354 req->rq_private_buf.len = copied;
355 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
356
357 dprintk("RPC: add callback request to list\n");
358 spin_lock(&bc_serv->sv_cb_lock);
Trond Myklebust2ea24492014-02-10 11:18:39 -0500359 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
360 wake_up(&bc_serv->sv_cb_waitq);
361 spin_unlock(&bc_serv->sv_cb_lock);
362}