blob: 872f523630f29e2ecbfd83b7fd17ea3134346071 [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason764f2dd2010-04-22 21:59:15 -040035#include <linux/rculist.h>
Huang Ying1bc144b2011-08-30 15:21:30 +000036#include <linux/llist.h>
Andy Grover08b48a12009-02-24 15:30:32 +000037
38#include "rds.h"
Andy Grover08b48a12009-02-24 15:30:32 +000039#include "ib.h"
40
Chris Mason6fa70da2010-06-11 11:17:59 -070041static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42#define CLEAN_LIST_BUSY_BIT 0
Andy Grover08b48a12009-02-24 15:30:32 +000043
44/*
45 * This is stored as mr->r_trans_private.
46 */
47struct rds_ib_mr {
48 struct rds_ib_device *device;
49 struct rds_ib_mr_pool *pool;
50 struct ib_fmr *fmr;
Chris Mason6fa70da2010-06-11 11:17:59 -070051
Huang Ying1bc144b2011-08-30 15:21:30 +000052 struct llist_node llnode;
Chris Mason6fa70da2010-06-11 11:17:59 -070053
54 /* unmap_list is for freeing */
55 struct list_head unmap_list;
Andy Grover08b48a12009-02-24 15:30:32 +000056 unsigned int remap_count;
57
58 struct scatterlist *sg;
59 unsigned int sg_len;
60 u64 *dma;
61 int sg_dma_len;
62};
63
64/*
65 * Our own little FMR pool
66 */
67struct rds_ib_mr_pool {
68 struct mutex flush_lock; /* serialize fmr invalidate */
Chris Mason7a0ff5d2010-06-11 11:26:02 -070069 struct delayed_work flush_worker; /* flush worker */
Andy Grover08b48a12009-02-24 15:30:32 +000070
Andy Grover08b48a12009-02-24 15:30:32 +000071 atomic_t item_count; /* total # of MRs */
72 atomic_t dirty_count; /* # dirty of MRs */
Chris Mason6fa70da2010-06-11 11:17:59 -070073
Huang Ying1bc144b2011-08-30 15:21:30 +000074 struct llist_head drop_list; /* MRs that have reached their max_maps limit */
75 struct llist_head free_list; /* unused MRs */
76 struct llist_head clean_list; /* global unused & unamapped MRs */
Chris Mason6fa70da2010-06-11 11:17:59 -070077 wait_queue_head_t flush_wait;
78
Andy Grover08b48a12009-02-24 15:30:32 +000079 atomic_t free_pinned; /* memory pinned by free MRs */
80 unsigned long max_items;
81 unsigned long max_items_soft;
82 unsigned long max_free_pinned;
83 struct ib_fmr_attr fmr_attr;
84};
85
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -070086struct workqueue_struct *rds_ib_fmr_wq;
87
88int rds_ib_fmr_init(void)
89{
90 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
91 if (!rds_ib_fmr_wq)
92 return -ENOMEM;
93 return 0;
94}
95
96/* By the time this is called all the IB devices should have been torn down and
97 * had their pools freed. As each pool is freed its work struct is waited on,
98 * so the pool flushing work queue should be idle by the time we get here.
99 */
100void rds_ib_fmr_exit(void)
101{
102 destroy_workqueue(rds_ib_fmr_wq);
103}
104
Chris Mason6fa70da2010-06-11 11:17:59 -0700105static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
Andy Grover08b48a12009-02-24 15:30:32 +0000106static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
107static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
108
109static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
110{
111 struct rds_ib_device *rds_ibdev;
112 struct rds_ib_ipaddr *i_ipaddr;
113
Zach Brownea819862010-07-15 12:34:33 -0700114 rcu_read_lock();
115 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
Chris Mason764f2dd2010-04-22 21:59:15 -0400116 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000117 if (i_ipaddr->ipaddr == ipaddr) {
Zach Brown3e0249f2010-05-18 15:48:51 -0700118 atomic_inc(&rds_ibdev->refcount);
Chris Mason764f2dd2010-04-22 21:59:15 -0400119 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +0000120 return rds_ibdev;
121 }
122 }
Andy Grover08b48a12009-02-24 15:30:32 +0000123 }
Zach Brownea819862010-07-15 12:34:33 -0700124 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +0000125
126 return NULL;
127}
128
129static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
130{
131 struct rds_ib_ipaddr *i_ipaddr;
132
133 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
134 if (!i_ipaddr)
135 return -ENOMEM;
136
137 i_ipaddr->ipaddr = ipaddr;
138
139 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400140 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000141 spin_unlock_irq(&rds_ibdev->spinlock);
142
143 return 0;
144}
145
146static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
147{
Andy Grover4a818022010-04-23 11:04:21 -0700148 struct rds_ib_ipaddr *i_ipaddr;
Chris Mason764f2dd2010-04-22 21:59:15 -0400149 struct rds_ib_ipaddr *to_free = NULL;
150
Andy Grover08b48a12009-02-24 15:30:32 +0000151
152 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400153 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000154 if (i_ipaddr->ipaddr == ipaddr) {
Chris Mason764f2dd2010-04-22 21:59:15 -0400155 list_del_rcu(&i_ipaddr->list);
156 to_free = i_ipaddr;
Andy Grover08b48a12009-02-24 15:30:32 +0000157 break;
158 }
159 }
160 spin_unlock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400161
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500162 if (to_free)
163 kfree_rcu(to_free, rcu);
Andy Grover08b48a12009-02-24 15:30:32 +0000164}
165
166int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
167{
168 struct rds_ib_device *rds_ibdev_old;
169
170 rds_ibdev_old = rds_ib_get_device(ipaddr);
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700171 if (!rds_ibdev_old)
172 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
173
174 if (rds_ibdev_old != rds_ibdev) {
Andy Grover08b48a12009-02-24 15:30:32 +0000175 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700176 rds_ib_dev_put(rds_ibdev_old);
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700177 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700178 }
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700179 rds_ib_dev_put(rds_ibdev_old);
Andy Grover08b48a12009-02-24 15:30:32 +0000180
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700181 return 0;
Andy Grover08b48a12009-02-24 15:30:32 +0000182}
183
Andy Grover745cbcc2009-04-01 08:20:19 +0000184void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000185{
186 struct rds_ib_connection *ic = conn->c_transport_data;
187
188 /* conn was previously on the nodev_conns_list */
189 spin_lock_irq(&ib_nodev_conns_lock);
190 BUG_ON(list_empty(&ib_nodev_conns));
191 BUG_ON(list_empty(&ic->ib_node));
192 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000193
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000194 spin_lock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000195 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000196 spin_unlock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000197 spin_unlock_irq(&ib_nodev_conns_lock);
198
Andy Grover745cbcc2009-04-01 08:20:19 +0000199 ic->rds_ibdev = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700200 atomic_inc(&rds_ibdev->refcount);
Andy Grover08b48a12009-02-24 15:30:32 +0000201}
202
Andy Grover745cbcc2009-04-01 08:20:19 +0000203void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
204{
205 struct rds_ib_connection *ic = conn->c_transport_data;
206
207 /* place conn on nodev_conns_list */
208 spin_lock(&ib_nodev_conns_lock);
209
210 spin_lock_irq(&rds_ibdev->spinlock);
211 BUG_ON(list_empty(&ic->ib_node));
212 list_del(&ic->ib_node);
213 spin_unlock_irq(&rds_ibdev->spinlock);
214
215 list_add_tail(&ic->ib_node, &ib_nodev_conns);
216
217 spin_unlock(&ib_nodev_conns_lock);
218
219 ic->rds_ibdev = NULL;
Zach Brown3e0249f2010-05-18 15:48:51 -0700220 rds_ib_dev_put(rds_ibdev);
Andy Grover745cbcc2009-04-01 08:20:19 +0000221}
222
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700223void rds_ib_destroy_nodev_conns(void)
Andy Grover08b48a12009-02-24 15:30:32 +0000224{
225 struct rds_ib_connection *ic, *_ic;
226 LIST_HEAD(tmp_list);
227
228 /* avoid calling conn_destroy with irqs off */
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700229 spin_lock_irq(&ib_nodev_conns_lock);
230 list_splice(&ib_nodev_conns, &tmp_list);
231 spin_unlock_irq(&ib_nodev_conns_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000232
Andy Grover433d3082009-10-30 08:51:55 +0000233 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000234 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000235}
236
237struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
238{
239 struct rds_ib_mr_pool *pool;
240
241 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
242 if (!pool)
243 return ERR_PTR(-ENOMEM);
244
Huang Ying1bc144b2011-08-30 15:21:30 +0000245 init_llist_head(&pool->free_list);
246 init_llist_head(&pool->drop_list);
247 init_llist_head(&pool->clean_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000248 mutex_init(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700249 init_waitqueue_head(&pool->flush_wait);
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700250 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
Andy Grover08b48a12009-02-24 15:30:32 +0000251
252 pool->fmr_attr.max_pages = fmr_message_size;
253 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
Andy Grovera870d622009-07-17 13:13:33 +0000254 pool->fmr_attr.page_shift = PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000255 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
256
257 /* We never allow more than max_items MRs to be allocated.
258 * When we exceed more than max_items_soft, we start freeing
259 * items more aggressively.
260 * Make sure that max_items > max_items_soft > max_items / 2
261 */
262 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
263 pool->max_items = rds_ibdev->max_fmrs;
264
265 return pool;
266}
267
268void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
269{
270 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
271
272 iinfo->rdma_mr_max = pool->max_items;
273 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
274}
275
276void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
277{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700278 cancel_delayed_work_sync(&pool->flush_worker);
Chris Mason6fa70da2010-06-11 11:17:59 -0700279 rds_ib_flush_mr_pool(pool, 1, NULL);
Andy Grover571c02f2010-03-11 13:50:01 +0000280 WARN_ON(atomic_read(&pool->item_count));
281 WARN_ON(atomic_read(&pool->free_pinned));
Andy Grover08b48a12009-02-24 15:30:32 +0000282 kfree(pool);
283}
284
285static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
286{
287 struct rds_ib_mr *ibmr = NULL;
Huang Ying1bc144b2011-08-30 15:21:30 +0000288 struct llist_node *ret;
Chris Mason6fa70da2010-06-11 11:17:59 -0700289 unsigned long *flag;
Andy Grover08b48a12009-02-24 15:30:32 +0000290
Chris Mason6fa70da2010-06-11 11:17:59 -0700291 preempt_disable();
Christoph Lameter903ceff2014-08-17 12:30:35 -0500292 flag = this_cpu_ptr(&clean_list_grace);
Chris Mason6fa70da2010-06-11 11:17:59 -0700293 set_bit(CLEAN_LIST_BUSY_BIT, flag);
Huang Ying1bc144b2011-08-30 15:21:30 +0000294 ret = llist_del_first(&pool->clean_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700295 if (ret)
Huang Ying1bc144b2011-08-30 15:21:30 +0000296 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
Andy Grover08b48a12009-02-24 15:30:32 +0000297
Chris Mason6fa70da2010-06-11 11:17:59 -0700298 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
299 preempt_enable();
Andy Grover08b48a12009-02-24 15:30:32 +0000300 return ibmr;
301}
302
Chris Mason6fa70da2010-06-11 11:17:59 -0700303static inline void wait_clean_list_grace(void)
304{
305 int cpu;
306 unsigned long *flag;
307
308 for_each_online_cpu(cpu) {
309 flag = &per_cpu(clean_list_grace, cpu);
310 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
311 cpu_relax();
312 }
313}
314
Andy Grover08b48a12009-02-24 15:30:32 +0000315static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
316{
317 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
318 struct rds_ib_mr *ibmr = NULL;
319 int err = 0, iter = 0;
320
Chris Mason8576f372010-07-19 17:06:46 -0700321 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
Tejun Heoc534a102011-02-01 11:42:43 +0100322 schedule_delayed_work(&pool->flush_worker, 10);
Chris Mason8576f372010-07-19 17:06:46 -0700323
Andy Grover08b48a12009-02-24 15:30:32 +0000324 while (1) {
325 ibmr = rds_ib_reuse_fmr(pool);
326 if (ibmr)
327 return ibmr;
328
329 /* No clean MRs - now we have the choice of either
330 * allocating a fresh MR up to the limit imposed by the
331 * driver, or flush any dirty unused MRs.
332 * We try to avoid stalling in the send path if possible,
333 * so we allocate as long as we're allowed to.
334 *
335 * We're fussy with enforcing the FMR limit, though. If the driver
336 * tells us we can't use more than N fmrs, we shouldn't start
337 * arguing with it */
338 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
339 break;
340
341 atomic_dec(&pool->item_count);
342
343 if (++iter > 2) {
344 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
345 return ERR_PTR(-EAGAIN);
346 }
347
348 /* We do have some empty MRs. Flush them out. */
349 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
Chris Mason6fa70da2010-06-11 11:17:59 -0700350 rds_ib_flush_mr_pool(pool, 0, &ibmr);
351 if (ibmr)
352 return ibmr;
Andy Grover08b48a12009-02-24 15:30:32 +0000353 }
354
Andy Grovere4c52c92010-04-23 10:49:53 -0700355 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000356 if (!ibmr) {
357 err = -ENOMEM;
358 goto out_no_cigar;
359 }
360
361 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
362 (IB_ACCESS_LOCAL_WRITE |
363 IB_ACCESS_REMOTE_READ |
Andy Grover15133f62010-01-12 14:33:38 -0800364 IB_ACCESS_REMOTE_WRITE|
365 IB_ACCESS_REMOTE_ATOMIC),
Andy Grover08b48a12009-02-24 15:30:32 +0000366 &pool->fmr_attr);
367 if (IS_ERR(ibmr->fmr)) {
368 err = PTR_ERR(ibmr->fmr);
369 ibmr->fmr = NULL;
370 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
371 goto out_no_cigar;
372 }
373
374 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
375 return ibmr;
376
377out_no_cigar:
378 if (ibmr) {
379 if (ibmr->fmr)
380 ib_dealloc_fmr(ibmr->fmr);
381 kfree(ibmr);
382 }
383 atomic_dec(&pool->item_count);
384 return ERR_PTR(err);
385}
386
387static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
388 struct scatterlist *sg, unsigned int nents)
389{
390 struct ib_device *dev = rds_ibdev->dev;
391 struct scatterlist *scat = sg;
392 u64 io_addr = 0;
393 u64 *dma_pages;
394 u32 len;
395 int page_cnt, sg_dma_len;
396 int i, j;
397 int ret;
398
399 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
400 DMA_BIDIRECTIONAL);
401 if (unlikely(!sg_dma_len)) {
402 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
403 return -EBUSY;
404 }
405
406 len = 0;
407 page_cnt = 0;
408
409 for (i = 0; i < sg_dma_len; ++i) {
410 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
411 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
412
Andy Grovera870d622009-07-17 13:13:33 +0000413 if (dma_addr & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000414 if (i > 0)
415 return -EINVAL;
416 else
417 ++page_cnt;
418 }
Andy Grovera870d622009-07-17 13:13:33 +0000419 if ((dma_addr + dma_len) & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000420 if (i < sg_dma_len - 1)
421 return -EINVAL;
422 else
423 ++page_cnt;
424 }
425
426 len += dma_len;
427 }
428
Andy Grovera870d622009-07-17 13:13:33 +0000429 page_cnt += len >> PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000430 if (page_cnt > fmr_message_size)
431 return -EINVAL;
432
Andy Grovere4c52c92010-04-23 10:49:53 -0700433 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
434 rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000435 if (!dma_pages)
436 return -ENOMEM;
437
438 page_cnt = 0;
439 for (i = 0; i < sg_dma_len; ++i) {
440 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
441 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
442
Andy Grovera870d622009-07-17 13:13:33 +0000443 for (j = 0; j < dma_len; j += PAGE_SIZE)
Andy Grover08b48a12009-02-24 15:30:32 +0000444 dma_pages[page_cnt++] =
Andy Grovera870d622009-07-17 13:13:33 +0000445 (dma_addr & PAGE_MASK) + j;
Andy Grover08b48a12009-02-24 15:30:32 +0000446 }
447
448 ret = ib_map_phys_fmr(ibmr->fmr,
449 dma_pages, page_cnt, io_addr);
450 if (ret)
451 goto out;
452
453 /* Success - we successfully remapped the MR, so we can
454 * safely tear down the old mapping. */
455 rds_ib_teardown_mr(ibmr);
456
457 ibmr->sg = scat;
458 ibmr->sg_len = nents;
459 ibmr->sg_dma_len = sg_dma_len;
460 ibmr->remap_count++;
461
462 rds_ib_stats_inc(s_ib_rdma_mr_used);
463 ret = 0;
464
465out:
466 kfree(dma_pages);
467
468 return ret;
469}
470
471void rds_ib_sync_mr(void *trans_private, int direction)
472{
473 struct rds_ib_mr *ibmr = trans_private;
474 struct rds_ib_device *rds_ibdev = ibmr->device;
475
476 switch (direction) {
477 case DMA_FROM_DEVICE:
478 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
479 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
480 break;
481 case DMA_TO_DEVICE:
482 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
483 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
484 break;
485 }
486}
487
488static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
489{
490 struct rds_ib_device *rds_ibdev = ibmr->device;
491
492 if (ibmr->sg_dma_len) {
493 ib_dma_unmap_sg(rds_ibdev->dev,
494 ibmr->sg, ibmr->sg_len,
495 DMA_BIDIRECTIONAL);
496 ibmr->sg_dma_len = 0;
497 }
498
499 /* Release the s/g list */
500 if (ibmr->sg_len) {
501 unsigned int i;
502
503 for (i = 0; i < ibmr->sg_len; ++i) {
504 struct page *page = sg_page(&ibmr->sg[i]);
505
506 /* FIXME we need a way to tell a r/w MR
507 * from a r/o MR */
santosh.shilimkar@oracle.com5c240fa2015-08-22 15:45:31 -0700508 WARN_ON(!page->mapping && irqs_disabled());
Andy Grover08b48a12009-02-24 15:30:32 +0000509 set_page_dirty(page);
510 put_page(page);
511 }
512 kfree(ibmr->sg);
513
514 ibmr->sg = NULL;
515 ibmr->sg_len = 0;
516 }
517}
518
519static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
520{
521 unsigned int pinned = ibmr->sg_len;
522
523 __rds_ib_teardown_mr(ibmr);
524 if (pinned) {
525 struct rds_ib_device *rds_ibdev = ibmr->device;
526 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
527
528 atomic_sub(pinned, &pool->free_pinned);
529 }
530}
531
532static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
533{
534 unsigned int item_count;
535
536 item_count = atomic_read(&pool->item_count);
537 if (free_all)
538 return item_count;
539
540 return 0;
541}
542
543/*
Huang Ying1bc144b2011-08-30 15:21:30 +0000544 * given an llist of mrs, put them all into the list_head for more processing
Chris Mason6fa70da2010-06-11 11:17:59 -0700545 */
Wengang Wang6116c202015-08-25 12:02:00 -0700546static unsigned int llist_append_to_list(struct llist_head *llist,
547 struct list_head *list)
Chris Mason6fa70da2010-06-11 11:17:59 -0700548{
549 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000550 struct llist_node *node;
551 struct llist_node *next;
Wengang Wang6116c202015-08-25 12:02:00 -0700552 unsigned int count = 0;
Chris Mason6fa70da2010-06-11 11:17:59 -0700553
Huang Ying1bc144b2011-08-30 15:21:30 +0000554 node = llist_del_all(llist);
555 while (node) {
556 next = node->next;
557 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
Chris Mason6fa70da2010-06-11 11:17:59 -0700558 list_add_tail(&ibmr->unmap_list, list);
Huang Ying1bc144b2011-08-30 15:21:30 +0000559 node = next;
Wengang Wang6116c202015-08-25 12:02:00 -0700560 count++;
Chris Mason6fa70da2010-06-11 11:17:59 -0700561 }
Wengang Wang6116c202015-08-25 12:02:00 -0700562 return count;
Chris Mason6fa70da2010-06-11 11:17:59 -0700563}
564
565/*
Huang Ying1bc144b2011-08-30 15:21:30 +0000566 * this takes a list head of mrs and turns it into linked llist nodes
567 * of clusters. Each cluster has linked llist nodes of
568 * MR_CLUSTER_SIZE mrs that are ready for reuse.
Chris Mason6fa70da2010-06-11 11:17:59 -0700569 */
Huang Ying1bc144b2011-08-30 15:21:30 +0000570static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
571 struct list_head *list,
572 struct llist_node **nodes_head,
573 struct llist_node **nodes_tail)
Chris Mason6fa70da2010-06-11 11:17:59 -0700574{
575 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000576 struct llist_node *cur = NULL;
577 struct llist_node **next = nodes_head;
Chris Mason6fa70da2010-06-11 11:17:59 -0700578
579 list_for_each_entry(ibmr, list, unmap_list) {
Huang Ying1bc144b2011-08-30 15:21:30 +0000580 cur = &ibmr->llnode;
581 *next = cur;
582 next = &cur->next;
Chris Mason6fa70da2010-06-11 11:17:59 -0700583 }
Huang Ying1bc144b2011-08-30 15:21:30 +0000584 *next = NULL;
585 *nodes_tail = cur;
Chris Mason6fa70da2010-06-11 11:17:59 -0700586}
587
588/*
Andy Grover08b48a12009-02-24 15:30:32 +0000589 * Flush our pool of MRs.
590 * At a minimum, all currently unused MRs are unmapped.
591 * If the number of MRs allocated exceeds the limit, we also try
592 * to free as many MRs as needed to get back to this limit.
593 */
Chris Mason6fa70da2010-06-11 11:17:59 -0700594static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
595 int free_all, struct rds_ib_mr **ibmr_ret)
Andy Grover08b48a12009-02-24 15:30:32 +0000596{
597 struct rds_ib_mr *ibmr, *next;
Huang Ying1bc144b2011-08-30 15:21:30 +0000598 struct llist_node *clean_nodes;
599 struct llist_node *clean_tail;
Andy Grover08b48a12009-02-24 15:30:32 +0000600 LIST_HEAD(unmap_list);
601 LIST_HEAD(fmr_list);
602 unsigned long unpinned = 0;
Wengang Wang6116c202015-08-25 12:02:00 -0700603 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
Andy Grover08b48a12009-02-24 15:30:32 +0000604 int ret = 0;
605
606 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
607
Chris Mason6fa70da2010-06-11 11:17:59 -0700608 if (ibmr_ret) {
609 DEFINE_WAIT(wait);
610 while(!mutex_trylock(&pool->flush_lock)) {
611 ibmr = rds_ib_reuse_fmr(pool);
612 if (ibmr) {
613 *ibmr_ret = ibmr;
614 finish_wait(&pool->flush_wait, &wait);
615 goto out_nolock;
616 }
Andy Grover08b48a12009-02-24 15:30:32 +0000617
Chris Mason6fa70da2010-06-11 11:17:59 -0700618 prepare_to_wait(&pool->flush_wait, &wait,
619 TASK_UNINTERRUPTIBLE);
Huang Ying1bc144b2011-08-30 15:21:30 +0000620 if (llist_empty(&pool->clean_list))
Chris Mason6fa70da2010-06-11 11:17:59 -0700621 schedule();
622
623 ibmr = rds_ib_reuse_fmr(pool);
624 if (ibmr) {
625 *ibmr_ret = ibmr;
626 finish_wait(&pool->flush_wait, &wait);
627 goto out_nolock;
628 }
629 }
630 finish_wait(&pool->flush_wait, &wait);
631 } else
632 mutex_lock(&pool->flush_lock);
633
634 if (ibmr_ret) {
635 ibmr = rds_ib_reuse_fmr(pool);
636 if (ibmr) {
637 *ibmr_ret = ibmr;
638 goto out;
639 }
640 }
641
Andy Grover08b48a12009-02-24 15:30:32 +0000642 /* Get the list of all MRs to be dropped. Ordering matters -
Chris Mason6fa70da2010-06-11 11:17:59 -0700643 * we want to put drop_list ahead of free_list.
644 */
Wengang Wang6116c202015-08-25 12:02:00 -0700645 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
646 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000647 if (free_all)
Huang Ying1bc144b2011-08-30 15:21:30 +0000648 llist_append_to_list(&pool->clean_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000649
650 free_goal = rds_ib_flush_goal(pool, free_all);
651
652 if (list_empty(&unmap_list))
653 goto out;
654
655 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
Chris Mason6fa70da2010-06-11 11:17:59 -0700656 list_for_each_entry(ibmr, &unmap_list, unmap_list)
Andy Grover08b48a12009-02-24 15:30:32 +0000657 list_add(&ibmr->fmr->list, &fmr_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700658
Andy Grover08b48a12009-02-24 15:30:32 +0000659 ret = ib_unmap_fmr(&fmr_list);
660 if (ret)
661 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
662
663 /* Now we can destroy the DMA mapping and unpin any pages */
Chris Mason6fa70da2010-06-11 11:17:59 -0700664 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000665 unpinned += ibmr->sg_len;
666 __rds_ib_teardown_mr(ibmr);
667 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
668 rds_ib_stats_inc(s_ib_rdma_mr_free);
Chris Mason6fa70da2010-06-11 11:17:59 -0700669 list_del(&ibmr->unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000670 ib_dealloc_fmr(ibmr->fmr);
671 kfree(ibmr);
672 nfreed++;
673 }
Andy Grover08b48a12009-02-24 15:30:32 +0000674 }
675
Chris Mason6fa70da2010-06-11 11:17:59 -0700676 if (!list_empty(&unmap_list)) {
677 /* we have to make sure that none of the things we're about
678 * to put on the clean list would race with other cpus trying
Huang Ying1bc144b2011-08-30 15:21:30 +0000679 * to pull items off. The llist would explode if we managed to
Chris Mason6fa70da2010-06-11 11:17:59 -0700680 * remove something from the clean list and then add it back again
Huang Ying1bc144b2011-08-30 15:21:30 +0000681 * while another CPU was spinning on that same item in llist_del_first.
Chris Mason6fa70da2010-06-11 11:17:59 -0700682 *
Huang Ying1bc144b2011-08-30 15:21:30 +0000683 * This is pretty unlikely, but just in case wait for an llist grace period
Chris Mason6fa70da2010-06-11 11:17:59 -0700684 * here before adding anything back into the clean list.
685 */
686 wait_clean_list_grace();
687
Huang Ying1bc144b2011-08-30 15:21:30 +0000688 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
Chris Mason6fa70da2010-06-11 11:17:59 -0700689 if (ibmr_ret)
Huang Ying1bc144b2011-08-30 15:21:30 +0000690 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
Chris Mason6fa70da2010-06-11 11:17:59 -0700691
Huang Ying1bc144b2011-08-30 15:21:30 +0000692 /* more than one entry in llist nodes */
693 if (clean_nodes->next)
694 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700695
696 }
Andy Grover08b48a12009-02-24 15:30:32 +0000697
698 atomic_sub(unpinned, &pool->free_pinned);
Wengang Wang6116c202015-08-25 12:02:00 -0700699 atomic_sub(dirty_to_clean, &pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000700 atomic_sub(nfreed, &pool->item_count);
701
702out:
703 mutex_unlock(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700704 if (waitqueue_active(&pool->flush_wait))
705 wake_up(&pool->flush_wait);
706out_nolock:
Andy Grover08b48a12009-02-24 15:30:32 +0000707 return ret;
708}
709
710static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
711{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700712 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
Andy Grover08b48a12009-02-24 15:30:32 +0000713
Chris Mason6fa70da2010-06-11 11:17:59 -0700714 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000715}
716
717void rds_ib_free_mr(void *trans_private, int invalidate)
718{
719 struct rds_ib_mr *ibmr = trans_private;
720 struct rds_ib_device *rds_ibdev = ibmr->device;
721 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000722
723 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
724
725 /* Return it to the pool's free list */
Andy Grover08b48a12009-02-24 15:30:32 +0000726 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
Huang Ying1bc144b2011-08-30 15:21:30 +0000727 llist_add(&ibmr->llnode, &pool->drop_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000728 else
Huang Ying1bc144b2011-08-30 15:21:30 +0000729 llist_add(&ibmr->llnode, &pool->free_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000730
731 atomic_add(ibmr->sg_len, &pool->free_pinned);
732 atomic_inc(&pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000733
734 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800735 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
santosh.shilimkar@oracle.comef5217a2015-08-25 12:02:02 -0700736 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700737 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000738
739 if (invalidate) {
740 if (likely(!in_interrupt())) {
Chris Mason6fa70da2010-06-11 11:17:59 -0700741 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000742 } else {
743 /* We get here if the user created a MR marked
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700744 * as use_once and invalidate at the same time.
745 */
746 queue_delayed_work(rds_ib_fmr_wq,
747 &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000748 }
749 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700750
751 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000752}
753
754void rds_ib_flush_mrs(void)
755{
756 struct rds_ib_device *rds_ibdev;
757
Zach Brownea819862010-07-15 12:34:33 -0700758 down_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000759 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
760 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
761
762 if (pool)
Chris Mason6fa70da2010-06-11 11:17:59 -0700763 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000764 }
Zach Brownea819862010-07-15 12:34:33 -0700765 up_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000766}
767
768void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
769 struct rds_sock *rs, u32 *key_ret)
770{
771 struct rds_ib_device *rds_ibdev;
772 struct rds_ib_mr *ibmr = NULL;
773 int ret;
774
775 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
776 if (!rds_ibdev) {
777 ret = -ENODEV;
778 goto out;
779 }
780
781 if (!rds_ibdev->mr_pool) {
782 ret = -ENODEV;
783 goto out;
784 }
785
786 ibmr = rds_ib_alloc_fmr(rds_ibdev);
Wengang Wang4fabb592015-07-06 14:35:11 +0800787 if (IS_ERR(ibmr)) {
788 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000789 return ibmr;
Wengang Wang4fabb592015-07-06 14:35:11 +0800790 }
Andy Grover08b48a12009-02-24 15:30:32 +0000791
792 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
793 if (ret == 0)
794 *key_ret = ibmr->fmr->rkey;
795 else
796 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
797
798 ibmr->device = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700799 rds_ibdev = NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000800
801 out:
802 if (ret) {
803 if (ibmr)
804 rds_ib_free_mr(ibmr, 0);
805 ibmr = ERR_PTR(ret);
806 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700807 if (rds_ibdev)
808 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000809 return ibmr;
810}
Chris Mason6fa70da2010-06-11 11:17:59 -0700811