blob: 5f4845d1e922ffc1d7fb3d67ea246d77eab69d2e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Trond Myklebust80b14d52015-02-14 20:31:59 -05002/*
3 * Multipath support for RPC
4 *
5 * Copyright (c) 2015, 2016, Primary Data, Inc. All rights reserved.
6 *
7 * Trond Myklebust <trond.myklebust@primarydata.com>
8 *
9 */
10#include <linux/types.h>
11#include <linux/kref.h>
12#include <linux/list.h>
13#include <linux/rcupdate.h>
14#include <linux/rculist.h>
15#include <linux/slab.h>
16#include <asm/cmpxchg.h>
17#include <linux/spinlock.h>
18#include <linux/sunrpc/xprt.h>
Andy Adamson39e5d2d2016-09-09 09:22:25 -040019#include <linux/sunrpc/addr.h>
Trond Myklebust80b14d52015-02-14 20:31:59 -050020#include <linux/sunrpc/xprtmultipath.h>
21
Olga Kornievskaiabaea9942021-06-08 15:59:16 -040022#include "sysfs.h"
23
Trond Myklebustf554af22019-07-16 13:27:23 -040024typedef struct rpc_xprt *(*xprt_switch_find_xprt_t)(struct rpc_xprt_switch *xps,
Trond Myklebust80b14d52015-02-14 20:31:59 -050025 const struct rpc_xprt *cur);
26
27static const struct rpc_xprt_iter_ops rpc_xprt_iter_singular;
28static const struct rpc_xprt_iter_ops rpc_xprt_iter_roundrobin;
29static const struct rpc_xprt_iter_ops rpc_xprt_iter_listall;
30
31static void xprt_switch_add_xprt_locked(struct rpc_xprt_switch *xps,
32 struct rpc_xprt *xprt)
33{
34 if (unlikely(xprt_get(xprt) == NULL))
35 return;
36 list_add_tail_rcu(&xprt->xprt_switch, &xps->xps_xprt_list);
37 smp_wmb();
38 if (xps->xps_nxprts == 0)
39 xps->xps_net = xprt->xprt_net;
40 xps->xps_nxprts++;
Trond Myklebust21f0ffa2017-04-28 10:52:42 -040041 xps->xps_nactive++;
Trond Myklebust80b14d52015-02-14 20:31:59 -050042}
43
44/**
45 * rpc_xprt_switch_add_xprt - Add a new rpc_xprt to an rpc_xprt_switch
46 * @xps: pointer to struct rpc_xprt_switch
47 * @xprt: pointer to struct rpc_xprt
48 *
49 * Adds xprt to the end of the list of struct rpc_xprt in xps.
50 */
51void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
52 struct rpc_xprt *xprt)
53{
54 if (xprt == NULL)
55 return;
56 spin_lock(&xps->xps_lock);
Trond Myklebust612b41f2017-04-27 08:50:51 -040057 if (xps->xps_net == xprt->xprt_net || xps->xps_net == NULL)
Trond Myklebust80b14d52015-02-14 20:31:59 -050058 xprt_switch_add_xprt_locked(xps, xprt);
59 spin_unlock(&xps->xps_lock);
Olga Kornievskaiad408ebe2021-06-08 15:59:18 -040060 rpc_sysfs_xprt_setup(xps, xprt, GFP_KERNEL);
Trond Myklebust80b14d52015-02-14 20:31:59 -050061}
62
63static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps,
64 struct rpc_xprt *xprt)
65{
66 if (unlikely(xprt == NULL))
67 return;
Olga Kornievskaia5b7eb782021-06-23 23:28:50 -040068 if (!test_bit(XPRT_OFFLINE, &xprt->state))
69 xps->xps_nactive--;
Trond Myklebust80b14d52015-02-14 20:31:59 -050070 xps->xps_nxprts--;
71 if (xps->xps_nxprts == 0)
72 xps->xps_net = NULL;
73 smp_wmb();
74 list_del_rcu(&xprt->xprt_switch);
75}
76
77/**
78 * rpc_xprt_switch_remove_xprt - Removes an rpc_xprt from a rpc_xprt_switch
79 * @xps: pointer to struct rpc_xprt_switch
80 * @xprt: pointer to struct rpc_xprt
81 *
82 * Removes xprt from the list of struct rpc_xprt in xps.
83 */
84void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
85 struct rpc_xprt *xprt)
86{
87 spin_lock(&xps->xps_lock);
88 xprt_switch_remove_xprt_locked(xps, xprt);
89 spin_unlock(&xps->xps_lock);
90 xprt_put(xprt);
91}
92
Olga Kornievskaia5b926872021-06-08 15:59:14 -040093static DEFINE_IDA(rpc_xprtswitch_ids);
94
95void xprt_multipath_cleanup_ids(void)
96{
97 ida_destroy(&rpc_xprtswitch_ids);
98}
99
100static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags)
101{
102 int id;
103
104 id = ida_simple_get(&rpc_xprtswitch_ids, 0, 0, gfp_flags);
105 if (id < 0)
106 return id;
107
108 xps->xps_id = id;
109 return 0;
110}
111
112static void xprt_switch_free_id(struct rpc_xprt_switch *xps)
113{
114 ida_simple_remove(&rpc_xprtswitch_ids, xps->xps_id);
115}
116
Trond Myklebust80b14d52015-02-14 20:31:59 -0500117/**
118 * xprt_switch_alloc - Allocate a new struct rpc_xprt_switch
119 * @xprt: pointer to struct rpc_xprt
120 * @gfp_flags: allocation flags
121 *
122 * On success, returns an initialised struct rpc_xprt_switch, containing
123 * the entry xprt. Returns NULL on failure.
124 */
125struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt,
126 gfp_t gfp_flags)
127{
128 struct rpc_xprt_switch *xps;
129
130 xps = kmalloc(sizeof(*xps), gfp_flags);
131 if (xps != NULL) {
132 spin_lock_init(&xps->xps_lock);
133 kref_init(&xps->xps_kref);
Olga Kornievskaia5b926872021-06-08 15:59:14 -0400134 xprt_switch_alloc_id(xps, gfp_flags);
Trond Myklebust9f98eff2019-07-18 01:10:51 -0400135 xps->xps_nxprts = xps->xps_nactive = 0;
136 atomic_long_set(&xps->xps_queuelen, 0);
137 xps->xps_net = NULL;
Trond Myklebust80b14d52015-02-14 20:31:59 -0500138 INIT_LIST_HEAD(&xps->xps_xprt_list);
139 xps->xps_iter_ops = &rpc_xprt_iter_singular;
Olga Kornievskaiabaea9942021-06-08 15:59:16 -0400140 rpc_sysfs_xprt_switch_setup(xps, xprt, gfp_flags);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500141 xprt_switch_add_xprt_locked(xps, xprt);
Olga Kornievskaiad408ebe2021-06-08 15:59:18 -0400142 rpc_sysfs_xprt_setup(xps, xprt, gfp_flags);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500143 }
144
145 return xps;
146}
147
148static void xprt_switch_free_entries(struct rpc_xprt_switch *xps)
149{
150 spin_lock(&xps->xps_lock);
151 while (!list_empty(&xps->xps_xprt_list)) {
152 struct rpc_xprt *xprt;
153
154 xprt = list_first_entry(&xps->xps_xprt_list,
155 struct rpc_xprt, xprt_switch);
156 xprt_switch_remove_xprt_locked(xps, xprt);
157 spin_unlock(&xps->xps_lock);
158 xprt_put(xprt);
159 spin_lock(&xps->xps_lock);
160 }
161 spin_unlock(&xps->xps_lock);
162}
163
164static void xprt_switch_free(struct kref *kref)
165{
166 struct rpc_xprt_switch *xps = container_of(kref,
167 struct rpc_xprt_switch, xps_kref);
168
169 xprt_switch_free_entries(xps);
Olga Kornievskaiabaea9942021-06-08 15:59:16 -0400170 rpc_sysfs_xprt_switch_destroy(xps);
Olga Kornievskaia5b926872021-06-08 15:59:14 -0400171 xprt_switch_free_id(xps);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500172 kfree_rcu(xps, xps_rcu);
173}
174
175/**
176 * xprt_switch_get - Return a reference to a rpc_xprt_switch
177 * @xps: pointer to struct rpc_xprt_switch
178 *
179 * Returns a reference to xps unless the refcount is already zero.
180 */
181struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps)
182{
183 if (xps != NULL && kref_get_unless_zero(&xps->xps_kref))
184 return xps;
185 return NULL;
186}
187
188/**
189 * xprt_switch_put - Release a reference to a rpc_xprt_switch
190 * @xps: pointer to struct rpc_xprt_switch
191 *
192 * Release the reference to xps, and free it once the refcount is zero.
193 */
194void xprt_switch_put(struct rpc_xprt_switch *xps)
195{
196 if (xps != NULL)
197 kref_put(&xps->xps_kref, xprt_switch_free);
198}
199
200/**
201 * rpc_xprt_switch_set_roundrobin - Set a round-robin policy on rpc_xprt_switch
202 * @xps: pointer to struct rpc_xprt_switch
203 *
204 * Sets a round-robin default policy for iterators acting on xps.
205 */
206void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps)
207{
208 if (READ_ONCE(xps->xps_iter_ops) != &rpc_xprt_iter_roundrobin)
209 WRITE_ONCE(xps->xps_iter_ops, &rpc_xprt_iter_roundrobin);
210}
211
212static
213const struct rpc_xprt_iter_ops *xprt_iter_ops(const struct rpc_xprt_iter *xpi)
214{
215 if (xpi->xpi_ops != NULL)
216 return xpi->xpi_ops;
217 return rcu_dereference(xpi->xpi_xpswitch)->xps_iter_ops;
218}
219
220static
221void xprt_iter_no_rewind(struct rpc_xprt_iter *xpi)
222{
223}
224
225static
226void xprt_iter_default_rewind(struct rpc_xprt_iter *xpi)
227{
228 WRITE_ONCE(xpi->xpi_cursor, NULL);
229}
230
231static
Trond Myklebust163f8822019-07-16 07:07:28 -0400232bool xprt_is_active(const struct rpc_xprt *xprt)
233{
Olga Kornievskaia5b7eb782021-06-23 23:28:50 -0400234 return (kref_read(&xprt->kref) != 0 &&
235 !test_bit(XPRT_OFFLINE, &xprt->state));
Trond Myklebust163f8822019-07-16 07:07:28 -0400236}
237
238static
Trond Myklebust80b14d52015-02-14 20:31:59 -0500239struct rpc_xprt *xprt_switch_find_first_entry(struct list_head *head)
240{
Trond Myklebust163f8822019-07-16 07:07:28 -0400241 struct rpc_xprt *pos;
242
243 list_for_each_entry_rcu(pos, head, xprt_switch) {
244 if (xprt_is_active(pos))
245 return pos;
246 }
247 return NULL;
Trond Myklebust80b14d52015-02-14 20:31:59 -0500248}
249
250static
251struct rpc_xprt *xprt_iter_first_entry(struct rpc_xprt_iter *xpi)
252{
253 struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
254
255 if (xps == NULL)
256 return NULL;
257 return xprt_switch_find_first_entry(&xps->xps_xprt_list);
258}
259
260static
261struct rpc_xprt *xprt_switch_find_current_entry(struct list_head *head,
262 const struct rpc_xprt *cur)
263{
264 struct rpc_xprt *pos;
Trond Myklebust163f8822019-07-16 07:07:28 -0400265 bool found = false;
Trond Myklebust80b14d52015-02-14 20:31:59 -0500266
267 list_for_each_entry_rcu(pos, head, xprt_switch) {
268 if (cur == pos)
Trond Myklebust163f8822019-07-16 07:07:28 -0400269 found = true;
270 if (found && xprt_is_active(pos))
Trond Myklebust80b14d52015-02-14 20:31:59 -0500271 return pos;
272 }
273 return NULL;
274}
275
276static
277struct rpc_xprt *xprt_iter_current_entry(struct rpc_xprt_iter *xpi)
278{
279 struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
280 struct list_head *head;
281
282 if (xps == NULL)
283 return NULL;
284 head = &xps->xps_xprt_list;
285 if (xpi->xpi_cursor == NULL || xps->xps_nxprts < 2)
286 return xprt_switch_find_first_entry(head);
287 return xprt_switch_find_current_entry(head, xpi->xpi_cursor);
288}
289
Andy Adamson39e5d2d2016-09-09 09:22:25 -0400290bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
291 const struct sockaddr *sap)
292{
293 struct list_head *head;
294 struct rpc_xprt *pos;
295
296 if (xps == NULL || sap == NULL)
297 return false;
298
299 head = &xps->xps_xprt_list;
300 list_for_each_entry_rcu(pos, head, xprt_switch) {
301 if (rpc_cmp_addr_port(sap, (struct sockaddr *)&pos->addr)) {
302 pr_info("RPC: addr %s already in xprt switch\n",
303 pos->address_strings[RPC_DISPLAY_ADDR]);
304 return true;
305 }
306 }
307 return false;
308}
309
Trond Myklebust80b14d52015-02-14 20:31:59 -0500310static
311struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
312 const struct rpc_xprt *cur)
313{
314 struct rpc_xprt *pos, *prev = NULL;
Trond Myklebust163f8822019-07-16 07:07:28 -0400315 bool found = false;
Trond Myklebust80b14d52015-02-14 20:31:59 -0500316
317 list_for_each_entry_rcu(pos, head, xprt_switch) {
318 if (cur == prev)
Trond Myklebust163f8822019-07-16 07:07:28 -0400319 found = true;
320 if (found && xprt_is_active(pos))
Trond Myklebust80b14d52015-02-14 20:31:59 -0500321 return pos;
322 prev = pos;
323 }
324 return NULL;
325}
326
327static
Trond Myklebustf554af22019-07-16 13:27:23 -0400328struct rpc_xprt *xprt_switch_set_next_cursor(struct rpc_xprt_switch *xps,
Trond Myklebust80b14d52015-02-14 20:31:59 -0500329 struct rpc_xprt **cursor,
330 xprt_switch_find_xprt_t find_next)
331{
Trond Myklebustf554af22019-07-16 13:27:23 -0400332 struct rpc_xprt *pos, *old;
Trond Myklebust80b14d52015-02-14 20:31:59 -0500333
Trond Myklebustf554af22019-07-16 13:27:23 -0400334 old = smp_load_acquire(cursor);
335 pos = find_next(xps, old);
336 smp_store_release(cursor, pos);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500337 return pos;
338}
339
340static
341struct rpc_xprt *xprt_iter_next_entry_multiple(struct rpc_xprt_iter *xpi,
342 xprt_switch_find_xprt_t find_next)
343{
344 struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500345
346 if (xps == NULL)
347 return NULL;
Trond Myklebustf554af22019-07-16 13:27:23 -0400348 return xprt_switch_set_next_cursor(xps, &xpi->xpi_cursor, find_next);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500349}
350
351static
Trond Myklebustf554af22019-07-16 13:27:23 -0400352struct rpc_xprt *__xprt_switch_find_next_entry_roundrobin(struct list_head *head,
Trond Myklebust80b14d52015-02-14 20:31:59 -0500353 const struct rpc_xprt *cur)
354{
355 struct rpc_xprt *ret;
356
357 ret = xprt_switch_find_next_entry(head, cur);
358 if (ret != NULL)
359 return ret;
360 return xprt_switch_find_first_entry(head);
361}
362
363static
Trond Myklebustf554af22019-07-16 13:27:23 -0400364struct rpc_xprt *xprt_switch_find_next_entry_roundrobin(struct rpc_xprt_switch *xps,
365 const struct rpc_xprt *cur)
Trond Myklebust80b14d52015-02-14 20:31:59 -0500366{
Trond Myklebustf554af22019-07-16 13:27:23 -0400367 struct list_head *head = &xps->xps_xprt_list;
Trond Myklebust21f0ffa2017-04-28 10:52:42 -0400368 struct rpc_xprt *xprt;
Trond Myklebustf554af22019-07-16 13:27:23 -0400369 unsigned int nactive;
Trond Myklebust21f0ffa2017-04-28 10:52:42 -0400370
Trond Myklebustf554af22019-07-16 13:27:23 -0400371 for (;;) {
372 unsigned long xprt_queuelen, xps_queuelen;
373
374 xprt = __xprt_switch_find_next_entry_roundrobin(head, cur);
375 if (!xprt)
Trond Myklebust21f0ffa2017-04-28 10:52:42 -0400376 break;
377 xprt_queuelen = atomic_long_read(&xprt->queuelen);
Trond Myklebust21f0ffa2017-04-28 10:52:42 -0400378 xps_queuelen = atomic_long_read(&xps->xps_queuelen);
Trond Myklebustf554af22019-07-16 13:27:23 -0400379 nactive = READ_ONCE(xps->xps_nactive);
Trond Myklebust3cf729222019-07-15 15:12:08 -0400380 /* Exit loop if xprt_queuelen <= average queue length */
Trond Myklebustf554af22019-07-16 13:27:23 -0400381 if (xprt_queuelen * nactive <= xps_queuelen)
382 break;
383 cur = xprt;
384 }
Trond Myklebust21f0ffa2017-04-28 10:52:42 -0400385 return xprt;
Trond Myklebust80b14d52015-02-14 20:31:59 -0500386}
387
388static
Trond Myklebustf554af22019-07-16 13:27:23 -0400389struct rpc_xprt *xprt_iter_next_entry_roundrobin(struct rpc_xprt_iter *xpi)
390{
391 return xprt_iter_next_entry_multiple(xpi,
392 xprt_switch_find_next_entry_roundrobin);
393}
394
395static
396struct rpc_xprt *xprt_switch_find_next_entry_all(struct rpc_xprt_switch *xps,
397 const struct rpc_xprt *cur)
398{
399 return xprt_switch_find_next_entry(&xps->xps_xprt_list, cur);
400}
401
402static
Trond Myklebust80b14d52015-02-14 20:31:59 -0500403struct rpc_xprt *xprt_iter_next_entry_all(struct rpc_xprt_iter *xpi)
404{
Trond Myklebustf554af22019-07-16 13:27:23 -0400405 return xprt_iter_next_entry_multiple(xpi,
406 xprt_switch_find_next_entry_all);
Trond Myklebust80b14d52015-02-14 20:31:59 -0500407}
408
409/*
410 * xprt_iter_rewind - Resets the xprt iterator
411 * @xpi: pointer to rpc_xprt_iter
412 *
413 * Resets xpi to ensure that it points to the first entry in the list
414 * of transports.
415 */
416static
417void xprt_iter_rewind(struct rpc_xprt_iter *xpi)
418{
419 rcu_read_lock();
420 xprt_iter_ops(xpi)->xpi_rewind(xpi);
421 rcu_read_unlock();
422}
423
424static void __xprt_iter_init(struct rpc_xprt_iter *xpi,
425 struct rpc_xprt_switch *xps,
426 const struct rpc_xprt_iter_ops *ops)
427{
428 rcu_assign_pointer(xpi->xpi_xpswitch, xprt_switch_get(xps));
429 xpi->xpi_cursor = NULL;
430 xpi->xpi_ops = ops;
431}
432
433/**
434 * xprt_iter_init - Initialise an xprt iterator
435 * @xpi: pointer to rpc_xprt_iter
436 * @xps: pointer to rpc_xprt_switch
437 *
438 * Initialises the iterator to use the default iterator ops
439 * as set in xps. This function is mainly intended for internal
440 * use in the rpc_client.
441 */
442void xprt_iter_init(struct rpc_xprt_iter *xpi,
443 struct rpc_xprt_switch *xps)
444{
445 __xprt_iter_init(xpi, xps, NULL);
446}
447
448/**
449 * xprt_iter_init_listall - Initialise an xprt iterator
450 * @xpi: pointer to rpc_xprt_iter
451 * @xps: pointer to rpc_xprt_switch
452 *
453 * Initialises the iterator to iterate once through the entire list
454 * of entries in xps.
455 */
456void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
457 struct rpc_xprt_switch *xps)
458{
459 __xprt_iter_init(xpi, xps, &rpc_xprt_iter_listall);
460}
461
462/**
463 * xprt_iter_xchg_switch - Atomically swap out the rpc_xprt_switch
464 * @xpi: pointer to rpc_xprt_iter
Chuck Leveracf0a392018-12-19 11:00:22 -0500465 * @newswitch: pointer to a new rpc_xprt_switch or NULL
Trond Myklebust80b14d52015-02-14 20:31:59 -0500466 *
467 * Swaps out the existing xpi->xpi_xpswitch with a new value.
468 */
469struct rpc_xprt_switch *xprt_iter_xchg_switch(struct rpc_xprt_iter *xpi,
470 struct rpc_xprt_switch *newswitch)
471{
472 struct rpc_xprt_switch __rcu *oldswitch;
473
474 /* Atomically swap out the old xpswitch */
475 oldswitch = xchg(&xpi->xpi_xpswitch, RCU_INITIALIZER(newswitch));
476 if (newswitch != NULL)
477 xprt_iter_rewind(xpi);
478 return rcu_dereference_protected(oldswitch, true);
479}
480
481/**
482 * xprt_iter_destroy - Destroys the xprt iterator
Chuck Leveracf0a392018-12-19 11:00:22 -0500483 * @xpi: pointer to rpc_xprt_iter
Trond Myklebust80b14d52015-02-14 20:31:59 -0500484 */
485void xprt_iter_destroy(struct rpc_xprt_iter *xpi)
486{
487 xprt_switch_put(xprt_iter_xchg_switch(xpi, NULL));
488}
489
490/**
491 * xprt_iter_xprt - Returns the rpc_xprt pointed to by the cursor
492 * @xpi: pointer to rpc_xprt_iter
493 *
494 * Returns a pointer to the struct rpc_xprt that is currently
495 * pointed to by the cursor.
496 * Caller must be holding rcu_read_lock().
497 */
498struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi)
499{
500 WARN_ON_ONCE(!rcu_read_lock_held());
501 return xprt_iter_ops(xpi)->xpi_xprt(xpi);
502}
503
504static
505struct rpc_xprt *xprt_iter_get_helper(struct rpc_xprt_iter *xpi,
506 struct rpc_xprt *(*fn)(struct rpc_xprt_iter *))
507{
508 struct rpc_xprt *ret;
509
510 do {
511 ret = fn(xpi);
512 if (ret == NULL)
513 break;
514 ret = xprt_get(ret);
515 } while (ret == NULL);
516 return ret;
517}
518
519/**
520 * xprt_iter_get_xprt - Returns the rpc_xprt pointed to by the cursor
521 * @xpi: pointer to rpc_xprt_iter
522 *
523 * Returns a reference to the struct rpc_xprt that is currently
524 * pointed to by the cursor.
525 */
526struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi)
527{
528 struct rpc_xprt *xprt;
529
530 rcu_read_lock();
531 xprt = xprt_iter_get_helper(xpi, xprt_iter_ops(xpi)->xpi_xprt);
532 rcu_read_unlock();
533 return xprt;
534}
535
536/**
537 * xprt_iter_get_next - Returns the next rpc_xprt following the cursor
538 * @xpi: pointer to rpc_xprt_iter
539 *
540 * Returns a reference to the struct rpc_xprt that immediately follows the
541 * entry pointed to by the cursor.
542 */
543struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi)
544{
545 struct rpc_xprt *xprt;
546
547 rcu_read_lock();
548 xprt = xprt_iter_get_helper(xpi, xprt_iter_ops(xpi)->xpi_next);
549 rcu_read_unlock();
550 return xprt;
551}
552
553/* Policy for always returning the first entry in the rpc_xprt_switch */
554static
555const struct rpc_xprt_iter_ops rpc_xprt_iter_singular = {
556 .xpi_rewind = xprt_iter_no_rewind,
557 .xpi_xprt = xprt_iter_first_entry,
558 .xpi_next = xprt_iter_first_entry,
559};
560
561/* Policy for round-robin iteration of entries in the rpc_xprt_switch */
562static
563const struct rpc_xprt_iter_ops rpc_xprt_iter_roundrobin = {
564 .xpi_rewind = xprt_iter_default_rewind,
565 .xpi_xprt = xprt_iter_current_entry,
566 .xpi_next = xprt_iter_next_entry_roundrobin,
567};
568
569/* Policy for once-through iteration of entries in the rpc_xprt_switch */
570static
571const struct rpc_xprt_iter_ops rpc_xprt_iter_listall = {
572 .xpi_rewind = xprt_iter_default_rewind,
573 .xpi_xprt = xprt_iter_current_entry,
574 .xpi_next = xprt_iter_next_entry_all,
575};