blob: 0e848433597f86ea1f24a97910b69de602cf850a [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason764f2dd2010-04-22 21:59:15 -040035#include <linux/rculist.h>
Huang Ying1bc144b2011-08-30 15:21:30 +000036#include <linux/llist.h>
Andy Grover08b48a12009-02-24 15:30:32 +000037
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -080038#include "ib_mr.h"
39
40struct workqueue_struct *rds_ib_mr_wq;
Andy Grover08b48a12009-02-24 15:30:32 +000041
Chris Mason6fa70da2010-06-11 11:17:59 -070042static DEFINE_PER_CPU(unsigned long, clean_list_grace);
43#define CLEAN_LIST_BUSY_BIT 0
Andy Grover08b48a12009-02-24 15:30:32 +000044
Andy Grover08b48a12009-02-24 15:30:32 +000045static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
46{
47 struct rds_ib_device *rds_ibdev;
48 struct rds_ib_ipaddr *i_ipaddr;
49
Zach Brownea819862010-07-15 12:34:33 -070050 rcu_read_lock();
51 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
Chris Mason764f2dd2010-04-22 21:59:15 -040052 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +000053 if (i_ipaddr->ipaddr == ipaddr) {
Zach Brown3e0249f2010-05-18 15:48:51 -070054 atomic_inc(&rds_ibdev->refcount);
Chris Mason764f2dd2010-04-22 21:59:15 -040055 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +000056 return rds_ibdev;
57 }
58 }
Andy Grover08b48a12009-02-24 15:30:32 +000059 }
Zach Brownea819862010-07-15 12:34:33 -070060 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +000061
62 return NULL;
63}
64
65static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
66{
67 struct rds_ib_ipaddr *i_ipaddr;
68
69 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
70 if (!i_ipaddr)
71 return -ENOMEM;
72
73 i_ipaddr->ipaddr = ipaddr;
74
75 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -040076 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
Andy Grover08b48a12009-02-24 15:30:32 +000077 spin_unlock_irq(&rds_ibdev->spinlock);
78
79 return 0;
80}
81
82static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
83{
Andy Grover4a818022010-04-23 11:04:21 -070084 struct rds_ib_ipaddr *i_ipaddr;
Chris Mason764f2dd2010-04-22 21:59:15 -040085 struct rds_ib_ipaddr *to_free = NULL;
86
Andy Grover08b48a12009-02-24 15:30:32 +000087
88 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -040089 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +000090 if (i_ipaddr->ipaddr == ipaddr) {
Chris Mason764f2dd2010-04-22 21:59:15 -040091 list_del_rcu(&i_ipaddr->list);
92 to_free = i_ipaddr;
Andy Grover08b48a12009-02-24 15:30:32 +000093 break;
94 }
95 }
96 spin_unlock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -040097
Santosh Shilimkar59fe4602012-02-03 11:09:23 -050098 if (to_free)
99 kfree_rcu(to_free, rcu);
Andy Grover08b48a12009-02-24 15:30:32 +0000100}
101
102int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
103{
104 struct rds_ib_device *rds_ibdev_old;
105
106 rds_ibdev_old = rds_ib_get_device(ipaddr);
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700107 if (!rds_ibdev_old)
108 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
109
110 if (rds_ibdev_old != rds_ibdev) {
Andy Grover08b48a12009-02-24 15:30:32 +0000111 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700112 rds_ib_dev_put(rds_ibdev_old);
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700113 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700114 }
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700115 rds_ib_dev_put(rds_ibdev_old);
Andy Grover08b48a12009-02-24 15:30:32 +0000116
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700117 return 0;
Andy Grover08b48a12009-02-24 15:30:32 +0000118}
119
Andy Grover745cbcc2009-04-01 08:20:19 +0000120void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000121{
122 struct rds_ib_connection *ic = conn->c_transport_data;
123
124 /* conn was previously on the nodev_conns_list */
125 spin_lock_irq(&ib_nodev_conns_lock);
126 BUG_ON(list_empty(&ib_nodev_conns));
127 BUG_ON(list_empty(&ic->ib_node));
128 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000129
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000130 spin_lock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000131 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000132 spin_unlock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000133 spin_unlock_irq(&ib_nodev_conns_lock);
134
Andy Grover745cbcc2009-04-01 08:20:19 +0000135 ic->rds_ibdev = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700136 atomic_inc(&rds_ibdev->refcount);
Andy Grover08b48a12009-02-24 15:30:32 +0000137}
138
Andy Grover745cbcc2009-04-01 08:20:19 +0000139void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
140{
141 struct rds_ib_connection *ic = conn->c_transport_data;
142
143 /* place conn on nodev_conns_list */
144 spin_lock(&ib_nodev_conns_lock);
145
146 spin_lock_irq(&rds_ibdev->spinlock);
147 BUG_ON(list_empty(&ic->ib_node));
148 list_del(&ic->ib_node);
149 spin_unlock_irq(&rds_ibdev->spinlock);
150
151 list_add_tail(&ic->ib_node, &ib_nodev_conns);
152
153 spin_unlock(&ib_nodev_conns_lock);
154
155 ic->rds_ibdev = NULL;
Zach Brown3e0249f2010-05-18 15:48:51 -0700156 rds_ib_dev_put(rds_ibdev);
Andy Grover745cbcc2009-04-01 08:20:19 +0000157}
158
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700159void rds_ib_destroy_nodev_conns(void)
Andy Grover08b48a12009-02-24 15:30:32 +0000160{
161 struct rds_ib_connection *ic, *_ic;
162 LIST_HEAD(tmp_list);
163
164 /* avoid calling conn_destroy with irqs off */
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700165 spin_lock_irq(&ib_nodev_conns_lock);
166 list_splice(&ib_nodev_conns, &tmp_list);
167 spin_unlock_irq(&ib_nodev_conns_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000168
Andy Grover433d3082009-10-30 08:51:55 +0000169 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000170 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000171}
172
Andy Grover08b48a12009-02-24 15:30:32 +0000173void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
174{
Santosh Shilimkar06766512015-09-10 21:20:57 -0700175 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000176
Santosh Shilimkar06766512015-09-10 21:20:57 -0700177 iinfo->rdma_mr_max = pool_1m->max_items;
178 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
Andy Grover08b48a12009-02-24 15:30:32 +0000179}
180
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800181struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
Andy Grover08b48a12009-02-24 15:30:32 +0000182{
183 struct rds_ib_mr *ibmr = NULL;
Huang Ying1bc144b2011-08-30 15:21:30 +0000184 struct llist_node *ret;
Chris Mason6fa70da2010-06-11 11:17:59 -0700185 unsigned long *flag;
Andy Grover08b48a12009-02-24 15:30:32 +0000186
Chris Mason6fa70da2010-06-11 11:17:59 -0700187 preempt_disable();
Christoph Lameter903ceff2014-08-17 12:30:35 -0500188 flag = this_cpu_ptr(&clean_list_grace);
Chris Mason6fa70da2010-06-11 11:17:59 -0700189 set_bit(CLEAN_LIST_BUSY_BIT, flag);
Huang Ying1bc144b2011-08-30 15:21:30 +0000190 ret = llist_del_first(&pool->clean_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700191 if (ret)
Huang Ying1bc144b2011-08-30 15:21:30 +0000192 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
Andy Grover08b48a12009-02-24 15:30:32 +0000193
Chris Mason6fa70da2010-06-11 11:17:59 -0700194 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
195 preempt_enable();
Andy Grover08b48a12009-02-24 15:30:32 +0000196 return ibmr;
197}
198
Chris Mason6fa70da2010-06-11 11:17:59 -0700199static inline void wait_clean_list_grace(void)
200{
201 int cpu;
202 unsigned long *flag;
203
204 for_each_online_cpu(cpu) {
205 flag = &per_cpu(clean_list_grace, cpu);
206 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
207 cpu_relax();
208 }
209}
210
Andy Grover08b48a12009-02-24 15:30:32 +0000211void rds_ib_sync_mr(void *trans_private, int direction)
212{
213 struct rds_ib_mr *ibmr = trans_private;
214 struct rds_ib_device *rds_ibdev = ibmr->device;
215
216 switch (direction) {
217 case DMA_FROM_DEVICE:
218 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
219 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
220 break;
221 case DMA_TO_DEVICE:
222 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
223 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
224 break;
225 }
226}
227
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800228void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
Andy Grover08b48a12009-02-24 15:30:32 +0000229{
230 struct rds_ib_device *rds_ibdev = ibmr->device;
231
232 if (ibmr->sg_dma_len) {
233 ib_dma_unmap_sg(rds_ibdev->dev,
234 ibmr->sg, ibmr->sg_len,
235 DMA_BIDIRECTIONAL);
236 ibmr->sg_dma_len = 0;
237 }
238
239 /* Release the s/g list */
240 if (ibmr->sg_len) {
241 unsigned int i;
242
243 for (i = 0; i < ibmr->sg_len; ++i) {
244 struct page *page = sg_page(&ibmr->sg[i]);
245
246 /* FIXME we need a way to tell a r/w MR
247 * from a r/o MR */
santosh.shilimkar@oracle.com5c240fa2015-08-22 15:45:31 -0700248 WARN_ON(!page->mapping && irqs_disabled());
Andy Grover08b48a12009-02-24 15:30:32 +0000249 set_page_dirty(page);
250 put_page(page);
251 }
252 kfree(ibmr->sg);
253
254 ibmr->sg = NULL;
255 ibmr->sg_len = 0;
256 }
257}
258
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800259void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
Andy Grover08b48a12009-02-24 15:30:32 +0000260{
261 unsigned int pinned = ibmr->sg_len;
262
263 __rds_ib_teardown_mr(ibmr);
264 if (pinned) {
Santosh Shilimkar26139dc2015-09-15 18:20:35 -0700265 struct rds_ib_mr_pool *pool = ibmr->pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000266
267 atomic_sub(pinned, &pool->free_pinned);
268 }
269}
270
271static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
272{
273 unsigned int item_count;
274
275 item_count = atomic_read(&pool->item_count);
276 if (free_all)
277 return item_count;
278
279 return 0;
280}
281
282/*
Huang Ying1bc144b2011-08-30 15:21:30 +0000283 * given an llist of mrs, put them all into the list_head for more processing
Chris Mason6fa70da2010-06-11 11:17:59 -0700284 */
Wengang Wang6116c202015-08-25 12:02:00 -0700285static unsigned int llist_append_to_list(struct llist_head *llist,
286 struct list_head *list)
Chris Mason6fa70da2010-06-11 11:17:59 -0700287{
288 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000289 struct llist_node *node;
290 struct llist_node *next;
Wengang Wang6116c202015-08-25 12:02:00 -0700291 unsigned int count = 0;
Chris Mason6fa70da2010-06-11 11:17:59 -0700292
Huang Ying1bc144b2011-08-30 15:21:30 +0000293 node = llist_del_all(llist);
294 while (node) {
295 next = node->next;
296 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
Chris Mason6fa70da2010-06-11 11:17:59 -0700297 list_add_tail(&ibmr->unmap_list, list);
Huang Ying1bc144b2011-08-30 15:21:30 +0000298 node = next;
Wengang Wang6116c202015-08-25 12:02:00 -0700299 count++;
Chris Mason6fa70da2010-06-11 11:17:59 -0700300 }
Wengang Wang6116c202015-08-25 12:02:00 -0700301 return count;
Chris Mason6fa70da2010-06-11 11:17:59 -0700302}
303
304/*
Huang Ying1bc144b2011-08-30 15:21:30 +0000305 * this takes a list head of mrs and turns it into linked llist nodes
306 * of clusters. Each cluster has linked llist nodes of
307 * MR_CLUSTER_SIZE mrs that are ready for reuse.
Chris Mason6fa70da2010-06-11 11:17:59 -0700308 */
Huang Ying1bc144b2011-08-30 15:21:30 +0000309static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
310 struct list_head *list,
311 struct llist_node **nodes_head,
312 struct llist_node **nodes_tail)
Chris Mason6fa70da2010-06-11 11:17:59 -0700313{
314 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000315 struct llist_node *cur = NULL;
316 struct llist_node **next = nodes_head;
Chris Mason6fa70da2010-06-11 11:17:59 -0700317
318 list_for_each_entry(ibmr, list, unmap_list) {
Huang Ying1bc144b2011-08-30 15:21:30 +0000319 cur = &ibmr->llnode;
320 *next = cur;
321 next = &cur->next;
Chris Mason6fa70da2010-06-11 11:17:59 -0700322 }
Huang Ying1bc144b2011-08-30 15:21:30 +0000323 *next = NULL;
324 *nodes_tail = cur;
Chris Mason6fa70da2010-06-11 11:17:59 -0700325}
326
327/*
Andy Grover08b48a12009-02-24 15:30:32 +0000328 * Flush our pool of MRs.
329 * At a minimum, all currently unused MRs are unmapped.
330 * If the number of MRs allocated exceeds the limit, we also try
331 * to free as many MRs as needed to get back to this limit.
332 */
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800333int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
334 int free_all, struct rds_ib_mr **ibmr_ret)
Andy Grover08b48a12009-02-24 15:30:32 +0000335{
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800336 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000337 struct llist_node *clean_nodes;
338 struct llist_node *clean_tail;
Andy Grover08b48a12009-02-24 15:30:32 +0000339 LIST_HEAD(unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000340 unsigned long unpinned = 0;
Wengang Wang6116c202015-08-25 12:02:00 -0700341 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
Andy Grover08b48a12009-02-24 15:30:32 +0000342
Santosh Shilimkar06766512015-09-10 21:20:57 -0700343 if (pool->pool_type == RDS_IB_MR_8K_POOL)
344 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
345 else
346 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
Andy Grover08b48a12009-02-24 15:30:32 +0000347
Chris Mason6fa70da2010-06-11 11:17:59 -0700348 if (ibmr_ret) {
349 DEFINE_WAIT(wait);
Santosh Shilimkar06766512015-09-10 21:20:57 -0700350 while (!mutex_trylock(&pool->flush_lock)) {
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800351 ibmr = rds_ib_reuse_mr(pool);
Chris Mason6fa70da2010-06-11 11:17:59 -0700352 if (ibmr) {
353 *ibmr_ret = ibmr;
354 finish_wait(&pool->flush_wait, &wait);
355 goto out_nolock;
356 }
Andy Grover08b48a12009-02-24 15:30:32 +0000357
Chris Mason6fa70da2010-06-11 11:17:59 -0700358 prepare_to_wait(&pool->flush_wait, &wait,
359 TASK_UNINTERRUPTIBLE);
Huang Ying1bc144b2011-08-30 15:21:30 +0000360 if (llist_empty(&pool->clean_list))
Chris Mason6fa70da2010-06-11 11:17:59 -0700361 schedule();
362
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800363 ibmr = rds_ib_reuse_mr(pool);
Chris Mason6fa70da2010-06-11 11:17:59 -0700364 if (ibmr) {
365 *ibmr_ret = ibmr;
366 finish_wait(&pool->flush_wait, &wait);
367 goto out_nolock;
368 }
369 }
370 finish_wait(&pool->flush_wait, &wait);
371 } else
372 mutex_lock(&pool->flush_lock);
373
374 if (ibmr_ret) {
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800375 ibmr = rds_ib_reuse_mr(pool);
Chris Mason6fa70da2010-06-11 11:17:59 -0700376 if (ibmr) {
377 *ibmr_ret = ibmr;
378 goto out;
379 }
380 }
381
Andy Grover08b48a12009-02-24 15:30:32 +0000382 /* Get the list of all MRs to be dropped. Ordering matters -
Chris Mason6fa70da2010-06-11 11:17:59 -0700383 * we want to put drop_list ahead of free_list.
384 */
Wengang Wang6116c202015-08-25 12:02:00 -0700385 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
386 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000387 if (free_all)
Huang Ying1bc144b2011-08-30 15:21:30 +0000388 llist_append_to_list(&pool->clean_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000389
390 free_goal = rds_ib_flush_goal(pool, free_all);
391
392 if (list_empty(&unmap_list))
393 goto out;
394
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800395 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
Andy Grover08b48a12009-02-24 15:30:32 +0000396
Chris Mason6fa70da2010-06-11 11:17:59 -0700397 if (!list_empty(&unmap_list)) {
398 /* we have to make sure that none of the things we're about
399 * to put on the clean list would race with other cpus trying
Huang Ying1bc144b2011-08-30 15:21:30 +0000400 * to pull items off. The llist would explode if we managed to
Chris Mason6fa70da2010-06-11 11:17:59 -0700401 * remove something from the clean list and then add it back again
Huang Ying1bc144b2011-08-30 15:21:30 +0000402 * while another CPU was spinning on that same item in llist_del_first.
Chris Mason6fa70da2010-06-11 11:17:59 -0700403 *
Huang Ying1bc144b2011-08-30 15:21:30 +0000404 * This is pretty unlikely, but just in case wait for an llist grace period
Chris Mason6fa70da2010-06-11 11:17:59 -0700405 * here before adding anything back into the clean list.
406 */
407 wait_clean_list_grace();
408
Huang Ying1bc144b2011-08-30 15:21:30 +0000409 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
Chris Mason6fa70da2010-06-11 11:17:59 -0700410 if (ibmr_ret)
Huang Ying1bc144b2011-08-30 15:21:30 +0000411 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
Chris Mason6fa70da2010-06-11 11:17:59 -0700412
Huang Ying1bc144b2011-08-30 15:21:30 +0000413 /* more than one entry in llist nodes */
414 if (clean_nodes->next)
415 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700416
417 }
Andy Grover08b48a12009-02-24 15:30:32 +0000418
419 atomic_sub(unpinned, &pool->free_pinned);
Wengang Wang6116c202015-08-25 12:02:00 -0700420 atomic_sub(dirty_to_clean, &pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000421 atomic_sub(nfreed, &pool->item_count);
422
423out:
424 mutex_unlock(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700425 if (waitqueue_active(&pool->flush_wait))
426 wake_up(&pool->flush_wait);
427out_nolock:
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800428 return 0;
429}
430
431struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
432{
433 struct rds_ib_mr *ibmr = NULL;
434 int iter = 0;
435
436 if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
437 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
438
439 while (1) {
440 ibmr = rds_ib_reuse_mr(pool);
441 if (ibmr)
442 return ibmr;
443
444 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
445 break;
446
447 atomic_dec(&pool->item_count);
448
449 if (++iter > 2) {
450 if (pool->pool_type == RDS_IB_MR_8K_POOL)
451 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
452 else
453 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
454 return ERR_PTR(-EAGAIN);
455 }
456
457 /* We do have some empty MRs. Flush them out. */
458 if (pool->pool_type == RDS_IB_MR_8K_POOL)
459 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
460 else
461 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
462
463 rds_ib_flush_mr_pool(pool, 0, &ibmr);
464 if (ibmr)
465 return ibmr;
466 }
467
468 return ibmr;
Andy Grover08b48a12009-02-24 15:30:32 +0000469}
470
471static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
472{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700473 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
Andy Grover08b48a12009-02-24 15:30:32 +0000474
Chris Mason6fa70da2010-06-11 11:17:59 -0700475 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000476}
477
478void rds_ib_free_mr(void *trans_private, int invalidate)
479{
480 struct rds_ib_mr *ibmr = trans_private;
Santosh Shilimkar26139dc2015-09-15 18:20:35 -0700481 struct rds_ib_mr_pool *pool = ibmr->pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000482 struct rds_ib_device *rds_ibdev = ibmr->device;
Andy Grover08b48a12009-02-24 15:30:32 +0000483
484 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
485
486 /* Return it to the pool's free list */
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800487 rds_ib_free_fmr_list(ibmr);
Andy Grover08b48a12009-02-24 15:30:32 +0000488
489 atomic_add(ibmr->sg_len, &pool->free_pinned);
490 atomic_inc(&pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000491
492 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800493 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
santosh.shilimkar@oracle.comef5217a2015-08-25 12:02:02 -0700494 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800495 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000496
497 if (invalidate) {
498 if (likely(!in_interrupt())) {
Chris Mason6fa70da2010-06-11 11:17:59 -0700499 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000500 } else {
501 /* We get here if the user created a MR marked
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700502 * as use_once and invalidate at the same time.
503 */
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800504 queue_delayed_work(rds_ib_mr_wq,
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700505 &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000506 }
507 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700508
509 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000510}
511
512void rds_ib_flush_mrs(void)
513{
514 struct rds_ib_device *rds_ibdev;
515
Zach Brownea819862010-07-15 12:34:33 -0700516 down_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000517 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
Santosh Shilimkar06766512015-09-10 21:20:57 -0700518 if (rds_ibdev->mr_8k_pool)
519 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000520
Santosh Shilimkar06766512015-09-10 21:20:57 -0700521 if (rds_ibdev->mr_1m_pool)
522 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000523 }
Zach Brownea819862010-07-15 12:34:33 -0700524 up_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000525}
526
527void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
528 struct rds_sock *rs, u32 *key_ret)
529{
530 struct rds_ib_device *rds_ibdev;
531 struct rds_ib_mr *ibmr = NULL;
532 int ret;
533
534 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
535 if (!rds_ibdev) {
536 ret = -ENODEV;
537 goto out;
538 }
539
Santosh Shilimkar06766512015-09-10 21:20:57 -0700540 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
Andy Grover08b48a12009-02-24 15:30:32 +0000541 ret = -ENODEV;
542 goto out;
543 }
544
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800545 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
546 if (ibmr)
547 rds_ibdev = NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000548
549 out:
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800550 if (!ibmr)
551 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
552
Zach Brown3e0249f2010-05-18 15:48:51 -0700553 if (rds_ibdev)
554 rds_ib_dev_put(rds_ibdev);
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800555
Andy Grover08b48a12009-02-24 15:30:32 +0000556 return ibmr;
557}
Chris Mason6fa70da2010-06-11 11:17:59 -0700558
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800559void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
560{
561 cancel_delayed_work_sync(&pool->flush_worker);
562 rds_ib_flush_mr_pool(pool, 1, NULL);
563 WARN_ON(atomic_read(&pool->item_count));
564 WARN_ON(atomic_read(&pool->free_pinned));
565 kfree(pool);
566}
567
568struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
569 int pool_type)
570{
571 struct rds_ib_mr_pool *pool;
572
573 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
574 if (!pool)
575 return ERR_PTR(-ENOMEM);
576
577 pool->pool_type = pool_type;
578 init_llist_head(&pool->free_list);
579 init_llist_head(&pool->drop_list);
580 init_llist_head(&pool->clean_list);
581 mutex_init(&pool->flush_lock);
582 init_waitqueue_head(&pool->flush_wait);
583 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
584
585 if (pool_type == RDS_IB_MR_1M_POOL) {
586 /* +1 allows for unaligned MRs */
587 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
588 pool->max_items = RDS_MR_1M_POOL_SIZE;
589 } else {
590 /* pool_type == RDS_IB_MR_8K_POOL */
591 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
592 pool->max_items = RDS_MR_8K_POOL_SIZE;
593 }
594
595 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
596 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
597 pool->fmr_attr.page_shift = PAGE_SHIFT;
598 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
599
600 return pool;
601}
602
603int rds_ib_mr_init(void)
604{
605 rds_ib_mr_wq = create_workqueue("rds_mr_flushd");
606 if (!rds_ib_mr_wq)
607 return -ENOMEM;
608 return 0;
609}
610
611/* By the time this is called all the IB devices should have been torn down and
612 * had their pools freed. As each pool is freed its work struct is waited on,
613 * so the pool flushing work queue should be idle by the time we get here.
614 */
615void rds_ib_mr_exit(void)
616{
617 destroy_workqueue(rds_ib_mr_wq);
618}