blob: 63da9d2f142d19016e1094f7f4136781da8d6037 [file] [log] [blame]
Andy Grover922cb172009-02-24 15:30:20 +00001/*
2 * Copyright (c) 2007 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/slab.h>
Andy Grover922cb172009-02-24 15:30:20 +000034#include <linux/types.h>
35#include <linux/rbtree.h>
Akinobu Mita12ce2242011-03-23 16:41:45 -070036#include <linux/bitops.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040037#include <linux/export.h>
David S. Millerc3b32402009-03-02 01:49:28 -080038
Andy Grover922cb172009-02-24 15:30:20 +000039#include "rds.h"
40
41/*
42 * This file implements the receive side of the unconventional congestion
43 * management in RDS.
44 *
45 * Messages waiting in the receive queue on the receiving socket are accounted
46 * against the sockets SO_RCVBUF option value. Only the payload bytes in the
47 * message are accounted for. If the number of bytes queued equals or exceeds
48 * rcvbuf then the socket is congested. All sends attempted to this socket's
49 * address should return block or return -EWOULDBLOCK.
50 *
51 * Applications are expected to be reasonably tuned such that this situation
52 * very rarely occurs. An application encountering this "back-pressure" is
53 * considered a bug.
54 *
55 * This is implemented by having each node maintain bitmaps which indicate
56 * which ports on bound addresses are congested. As the bitmap changes it is
57 * sent through all the connections which terminate in the local address of the
58 * bitmap which changed.
59 *
60 * The bitmaps are allocated as connections are brought up. This avoids
61 * allocation in the interrupt handling path which queues messages on sockets.
62 * The dense bitmaps let transports send the entire bitmap on any bitmap change
63 * reasonably efficiently. This is much easier to implement than some
64 * finer-grained communication of per-port congestion. The sender does a very
65 * inexpensive bit test to test if the port it's about to send to is congested
66 * or not.
67 */
68
69/*
70 * Interaction with poll is a tad tricky. We want all processes stuck in
71 * poll to wake up and check whether a congested destination became uncongested.
72 * The really sad thing is we have no idea which destinations the application
73 * wants to send to - we don't even know which rds_connections are involved.
74 * So until we implement a more flexible rds poll interface, we have to make
75 * do with this:
76 * We maintain a global counter that is incremented each time a congestion map
77 * update is received. Each rds socket tracks this value, and if rds_poll
78 * finds that the saved generation number is smaller than the global generation
79 * number, it wakes up the process.
80 */
81static atomic_t rds_cong_generation = ATOMIC_INIT(0);
82
83/*
84 * Congestion monitoring
85 */
86static LIST_HEAD(rds_cong_monitor);
87static DEFINE_RWLOCK(rds_cong_monitor_lock);
88
89/*
90 * Yes, a global lock. It's used so infrequently that it's worth keeping it
91 * global to simplify the locking. It's only used in the following
92 * circumstances:
93 *
94 * - on connection buildup to associate a conn with its maps
95 * - on map changes to inform conns of a new map to send
96 *
97 * It's sadly ordered under the socket callback lock and the connection lock.
98 * Receive paths can mark ports congested from interrupt context so the
99 * lock masks interrupts.
100 */
101static DEFINE_SPINLOCK(rds_cong_lock);
102static struct rb_root rds_cong_tree = RB_ROOT;
103
104static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
105 struct rds_cong_map *insert)
106{
107 struct rb_node **p = &rds_cong_tree.rb_node;
108 struct rb_node *parent = NULL;
109 struct rds_cong_map *map;
110
111 while (*p) {
112 parent = *p;
113 map = rb_entry(parent, struct rds_cong_map, m_rb_node);
114
115 if (addr < map->m_addr)
116 p = &(*p)->rb_left;
117 else if (addr > map->m_addr)
118 p = &(*p)->rb_right;
119 else
120 return map;
121 }
122
123 if (insert) {
124 rb_link_node(&insert->m_rb_node, parent, p);
125 rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
126 }
127 return NULL;
128}
129
130/*
131 * There is only ever one bitmap for any address. Connections try and allocate
132 * these bitmaps in the process getting pointers to them. The bitmaps are only
133 * ever freed as the module is removed after all connections have been freed.
134 */
135static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
136{
137 struct rds_cong_map *map;
138 struct rds_cong_map *ret = NULL;
139 unsigned long zp;
140 unsigned long i;
141 unsigned long flags;
142
143 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800144 if (!map)
Andy Grover922cb172009-02-24 15:30:20 +0000145 return NULL;
146
147 map->m_addr = addr;
148 init_waitqueue_head(&map->m_waitq);
149 INIT_LIST_HEAD(&map->m_conn_list);
150
151 for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
152 zp = get_zeroed_page(GFP_KERNEL);
153 if (zp == 0)
154 goto out;
155 map->m_page_addrs[i] = zp;
156 }
157
158 spin_lock_irqsave(&rds_cong_lock, flags);
159 ret = rds_cong_tree_walk(addr, map);
160 spin_unlock_irqrestore(&rds_cong_lock, flags);
161
Andy Grover8690bfa2010-01-12 11:56:44 -0800162 if (!ret) {
Andy Grover922cb172009-02-24 15:30:20 +0000163 ret = map;
164 map = NULL;
165 }
166
167out:
168 if (map) {
169 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
170 free_page(map->m_page_addrs[i]);
171 kfree(map);
172 }
173
174 rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
175
176 return ret;
177}
178
179/*
180 * Put the conn on its local map's list. This is called when the conn is
181 * really added to the hash. It's nested under the rds_conn_lock, sadly.
182 */
183void rds_cong_add_conn(struct rds_connection *conn)
184{
185 unsigned long flags;
186
187 rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
188 spin_lock_irqsave(&rds_cong_lock, flags);
189 list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
190 spin_unlock_irqrestore(&rds_cong_lock, flags);
191}
192
193void rds_cong_remove_conn(struct rds_connection *conn)
194{
195 unsigned long flags;
196
197 rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
198 spin_lock_irqsave(&rds_cong_lock, flags);
199 list_del_init(&conn->c_map_item);
200 spin_unlock_irqrestore(&rds_cong_lock, flags);
201}
202
203int rds_cong_get_maps(struct rds_connection *conn)
204{
205 conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
206 conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
207
Andy Grover8690bfa2010-01-12 11:56:44 -0800208 if (!(conn->c_lcong && conn->c_fcong))
Andy Grover922cb172009-02-24 15:30:20 +0000209 return -ENOMEM;
210
211 return 0;
212}
213
214void rds_cong_queue_updates(struct rds_cong_map *map)
215{
216 struct rds_connection *conn;
217 unsigned long flags;
218
219 spin_lock_irqsave(&rds_cong_lock, flags);
220
221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800222 struct rds_conn_path *cp = &conn->c_path[0];
223
224 rcu_read_lock();
225 if (!test_and_set_bit(0, &conn->c_map_queued) &&
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800226 !rds_destroy_pending(cp->cp_conn)) {
Andy Grover922cb172009-02-24 15:30:20 +0000227 rds_stats_inc(s_cong_update_queued);
Sowmini Varadhan80ad0d42015-02-10 13:33:37 -0500228 /* We cannot inline the call to rds_send_xmit() here
229 * for two reasons (both pertaining to a TCP transport):
230 * 1. When we get here from the receive path, we
231 * are already holding the sock_lock (held by
232 * tcp_v4_rcv()). So inlining calls to
233 * tcp_setsockopt and/or tcp_sendmsg will deadlock
234 * when it tries to get the sock_lock())
235 * 2. Interrupts are masked so that we can mark the
236 * the port congested from both send and recv paths.
237 * (See comment around declaration of rdc_cong_lock).
238 * An attempt to get the sock_lock() here will
239 * therefore trigger warnings.
240 * Defer the xmit to rds_send_worker() instead.
241 */
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800242 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
Andy Grover922cb172009-02-24 15:30:20 +0000243 }
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800244 rcu_read_unlock();
Andy Grover922cb172009-02-24 15:30:20 +0000245 }
246
247 spin_unlock_irqrestore(&rds_cong_lock, flags);
248}
249
250void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
251{
252 rdsdebug("waking map %p for %pI4\n",
253 map, &map->m_addr);
254 rds_stats_inc(s_cong_update_received);
255 atomic_inc(&rds_cong_generation);
256 if (waitqueue_active(&map->m_waitq))
257 wake_up(&map->m_waitq);
258 if (waitqueue_active(&rds_poll_waitq))
259 wake_up_all(&rds_poll_waitq);
260
261 if (portmask && !list_empty(&rds_cong_monitor)) {
262 unsigned long flags;
263 struct rds_sock *rs;
264
265 read_lock_irqsave(&rds_cong_monitor_lock, flags);
266 list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
267 spin_lock(&rs->rs_lock);
268 rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
269 rs->rs_cong_mask &= ~portmask;
270 spin_unlock(&rs->rs_lock);
271 if (rs->rs_cong_notify)
272 rds_wake_sk_sleep(rs);
273 }
274 read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
275 }
276}
Andy Grover616b7572009-08-21 12:28:32 +0000277EXPORT_SYMBOL_GPL(rds_cong_map_updated);
Andy Grover922cb172009-02-24 15:30:20 +0000278
279int rds_cong_updated_since(unsigned long *recent)
280{
281 unsigned long gen = atomic_read(&rds_cong_generation);
282
283 if (likely(*recent == gen))
284 return 0;
285 *recent = gen;
286 return 1;
287}
288
289/*
290 * We're called under the locking that protects the sockets receive buffer
291 * consumption. This makes it a lot easier for the caller to only call us
292 * when it knows that an existing set bit needs to be cleared, and vice versa.
293 * We can't block and we need to deal with concurrent sockets working against
294 * the same per-address map.
295 */
296void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
297{
298 unsigned long i;
299 unsigned long off;
300
301 rdsdebug("setting congestion for %pI4:%u in map %p\n",
302 &map->m_addr, ntohs(port), map);
303
304 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
305 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
306
santosh.shilimkar@oracle.come47db942016-04-14 10:43:27 -0700307 set_bit_le(off, (void *)map->m_page_addrs[i]);
Andy Grover922cb172009-02-24 15:30:20 +0000308}
309
310void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
311{
312 unsigned long i;
313 unsigned long off;
314
315 rdsdebug("clearing congestion for %pI4:%u in map %p\n",
316 &map->m_addr, ntohs(port), map);
317
318 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
319 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
320
santosh.shilimkar@oracle.come47db942016-04-14 10:43:27 -0700321 clear_bit_le(off, (void *)map->m_page_addrs[i]);
Andy Grover922cb172009-02-24 15:30:20 +0000322}
323
324static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
325{
326 unsigned long i;
327 unsigned long off;
328
329 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
330 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
331
Akinobu Mitae1dc1c82011-03-23 16:42:05 -0700332 return test_bit_le(off, (void *)map->m_page_addrs[i]);
Andy Grover922cb172009-02-24 15:30:20 +0000333}
334
335void rds_cong_add_socket(struct rds_sock *rs)
336{
337 unsigned long flags;
338
339 write_lock_irqsave(&rds_cong_monitor_lock, flags);
340 if (list_empty(&rs->rs_cong_list))
341 list_add(&rs->rs_cong_list, &rds_cong_monitor);
342 write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
343}
344
345void rds_cong_remove_socket(struct rds_sock *rs)
346{
347 unsigned long flags;
348 struct rds_cong_map *map;
349
350 write_lock_irqsave(&rds_cong_monitor_lock, flags);
351 list_del_init(&rs->rs_cong_list);
352 write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
353
354 /* update congestion map for now-closed port */
355 spin_lock_irqsave(&rds_cong_lock, flags);
356 map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
357 spin_unlock_irqrestore(&rds_cong_lock, flags);
358
359 if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
360 rds_cong_clear_bit(map, rs->rs_bound_port);
361 rds_cong_queue_updates(map);
362 }
363}
364
365int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
366 struct rds_sock *rs)
367{
368 if (!rds_cong_test_bit(map, port))
369 return 0;
370 if (nonblock) {
371 if (rs && rs->rs_cong_monitor) {
372 unsigned long flags;
373
374 /* It would have been nice to have an atomic set_bit on
375 * a uint64_t. */
376 spin_lock_irqsave(&rs->rs_lock, flags);
377 rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
378 spin_unlock_irqrestore(&rs->rs_lock, flags);
379
380 /* Test again - a congestion update may have arrived in
381 * the meantime. */
382 if (!rds_cong_test_bit(map, port))
383 return 0;
384 }
385 rds_stats_inc(s_cong_send_error);
386 return -ENOBUFS;
387 }
388
389 rds_stats_inc(s_cong_send_blocked);
390 rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
391
392 return wait_event_interruptible(map->m_waitq,
393 !rds_cong_test_bit(map, port));
394}
395
396void rds_cong_exit(void)
397{
398 struct rb_node *node;
399 struct rds_cong_map *map;
400 unsigned long i;
401
402 while ((node = rb_first(&rds_cong_tree))) {
403 map = rb_entry(node, struct rds_cong_map, m_rb_node);
404 rdsdebug("freeing map %p\n", map);
405 rb_erase(&map->m_rb_node, &rds_cong_tree);
406 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
407 free_page(map->m_page_addrs[i]);
408 kfree(map);
409 }
410}
411
412/*
413 * Allocate a RDS message containing a congestion update.
414 */
415struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
416{
417 struct rds_cong_map *map = conn->c_lcong;
418 struct rds_message *rm;
419
420 rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
421 if (!IS_ERR(rm))
422 rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
423
424 return rm;
425}