blob: 8f070ee7e74268fbda392d9b818d8853adc66170 [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -07002 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
Andy Grover08b48a12009-02-24 15:30:32 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason764f2dd2010-04-22 21:59:15 -040035#include <linux/rculist.h>
Huang Ying1bc144b2011-08-30 15:21:30 +000036#include <linux/llist.h>
Andy Grover08b48a12009-02-24 15:30:32 +000037
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070038#include "rds_single_path.h"
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -080039#include "ib_mr.h"
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +020040#include "rds.h"
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -080041
42struct workqueue_struct *rds_ib_mr_wq;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +020043struct rds_ib_dereg_odp_mr {
44 struct work_struct work;
45 struct ib_mr *mr;
46};
47
48static void rds_ib_odp_mr_worker(struct work_struct *work);
Andy Grover08b48a12009-02-24 15:30:32 +000049
Andy Grover08b48a12009-02-24 15:30:32 +000050static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
51{
52 struct rds_ib_device *rds_ibdev;
53 struct rds_ib_ipaddr *i_ipaddr;
54
Zach Brownea819862010-07-15 12:34:33 -070055 rcu_read_lock();
56 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
Chris Mason764f2dd2010-04-22 21:59:15 -040057 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +000058 if (i_ipaddr->ipaddr == ipaddr) {
Reshetova, Elena50d61ff2017-07-04 15:53:15 +030059 refcount_inc(&rds_ibdev->refcount);
Chris Mason764f2dd2010-04-22 21:59:15 -040060 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +000061 return rds_ibdev;
62 }
63 }
Andy Grover08b48a12009-02-24 15:30:32 +000064 }
Zach Brownea819862010-07-15 12:34:33 -070065 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +000066
67 return NULL;
68}
69
70static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
71{
72 struct rds_ib_ipaddr *i_ipaddr;
73
74 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
75 if (!i_ipaddr)
76 return -ENOMEM;
77
78 i_ipaddr->ipaddr = ipaddr;
79
80 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -040081 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
Andy Grover08b48a12009-02-24 15:30:32 +000082 spin_unlock_irq(&rds_ibdev->spinlock);
83
84 return 0;
85}
86
87static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
88{
Andy Grover4a818022010-04-23 11:04:21 -070089 struct rds_ib_ipaddr *i_ipaddr;
Chris Mason764f2dd2010-04-22 21:59:15 -040090 struct rds_ib_ipaddr *to_free = NULL;
91
Andy Grover08b48a12009-02-24 15:30:32 +000092
93 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -040094 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +000095 if (i_ipaddr->ipaddr == ipaddr) {
Chris Mason764f2dd2010-04-22 21:59:15 -040096 list_del_rcu(&i_ipaddr->list);
97 to_free = i_ipaddr;
Andy Grover08b48a12009-02-24 15:30:32 +000098 break;
99 }
100 }
101 spin_unlock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400102
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500103 if (to_free)
104 kfree_rcu(to_free, rcu);
Andy Grover08b48a12009-02-24 15:30:32 +0000105}
106
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700107int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
108 struct in6_addr *ipaddr)
Andy Grover08b48a12009-02-24 15:30:32 +0000109{
110 struct rds_ib_device *rds_ibdev_old;
111
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700112 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700113 if (!rds_ibdev_old)
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700114 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700115
116 if (rds_ibdev_old != rds_ibdev) {
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700117 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
Zach Brown3e0249f2010-05-18 15:48:51 -0700118 rds_ib_dev_put(rds_ibdev_old);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700119 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
Zach Brown3e0249f2010-05-18 15:48:51 -0700120 }
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700121 rds_ib_dev_put(rds_ibdev_old);
Andy Grover08b48a12009-02-24 15:30:32 +0000122
santosh.shilimkar@oracle.come1f475a2015-08-22 15:45:25 -0700123 return 0;
Andy Grover08b48a12009-02-24 15:30:32 +0000124}
125
Andy Grover745cbcc2009-04-01 08:20:19 +0000126void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000127{
128 struct rds_ib_connection *ic = conn->c_transport_data;
129
130 /* conn was previously on the nodev_conns_list */
131 spin_lock_irq(&ib_nodev_conns_lock);
132 BUG_ON(list_empty(&ib_nodev_conns));
133 BUG_ON(list_empty(&ic->ib_node));
134 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000135
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000136 spin_lock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000137 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000138 spin_unlock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000139 spin_unlock_irq(&ib_nodev_conns_lock);
140
Andy Grover745cbcc2009-04-01 08:20:19 +0000141 ic->rds_ibdev = rds_ibdev;
Reshetova, Elena50d61ff2017-07-04 15:53:15 +0300142 refcount_inc(&rds_ibdev->refcount);
Andy Grover08b48a12009-02-24 15:30:32 +0000143}
144
Andy Grover745cbcc2009-04-01 08:20:19 +0000145void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
146{
147 struct rds_ib_connection *ic = conn->c_transport_data;
148
149 /* place conn on nodev_conns_list */
150 spin_lock(&ib_nodev_conns_lock);
151
152 spin_lock_irq(&rds_ibdev->spinlock);
153 BUG_ON(list_empty(&ic->ib_node));
154 list_del(&ic->ib_node);
155 spin_unlock_irq(&rds_ibdev->spinlock);
156
157 list_add_tail(&ic->ib_node, &ib_nodev_conns);
158
159 spin_unlock(&ib_nodev_conns_lock);
160
161 ic->rds_ibdev = NULL;
Zach Brown3e0249f2010-05-18 15:48:51 -0700162 rds_ib_dev_put(rds_ibdev);
Andy Grover745cbcc2009-04-01 08:20:19 +0000163}
164
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700165void rds_ib_destroy_nodev_conns(void)
Andy Grover08b48a12009-02-24 15:30:32 +0000166{
167 struct rds_ib_connection *ic, *_ic;
168 LIST_HEAD(tmp_list);
169
170 /* avoid calling conn_destroy with irqs off */
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700171 spin_lock_irq(&ib_nodev_conns_lock);
172 list_splice(&ib_nodev_conns, &tmp_list);
173 spin_unlock_irq(&ib_nodev_conns_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000174
Andy Grover433d3082009-10-30 08:51:55 +0000175 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000176 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000177}
178
Andy Grover08b48a12009-02-24 15:30:32 +0000179void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
180{
Santosh Shilimkar06766512015-09-10 21:20:57 -0700181 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000182
Santosh Shilimkar06766512015-09-10 21:20:57 -0700183 iinfo->rdma_mr_max = pool_1m->max_items;
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300184 iinfo->rdma_mr_size = pool_1m->max_pages;
Andy Grover08b48a12009-02-24 15:30:32 +0000185}
186
Ka-Cheong Poone65d4d92018-07-30 22:48:42 -0700187#if IS_ENABLED(CONFIG_IPV6)
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700188void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
189 struct rds6_info_rdma_connection *iinfo6)
190{
191 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
192
193 iinfo6->rdma_mr_max = pool_1m->max_items;
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300194 iinfo6->rdma_mr_size = pool_1m->max_pages;
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700195}
Ka-Cheong Poone65d4d92018-07-30 22:48:42 -0700196#endif
Ka-Cheong Poonb7ff8b12018-07-23 20:51:23 -0700197
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800198struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
Andy Grover08b48a12009-02-24 15:30:32 +0000199{
200 struct rds_ib_mr *ibmr = NULL;
Huang Ying1bc144b2011-08-30 15:21:30 +0000201 struct llist_node *ret;
Gerd Rauschc9467442019-07-16 15:28:57 -0700202 unsigned long flags;
Andy Grover08b48a12009-02-24 15:30:32 +0000203
Gerd Rauschc9467442019-07-16 15:28:57 -0700204 spin_lock_irqsave(&pool->clean_lock, flags);
Huang Ying1bc144b2011-08-30 15:21:30 +0000205 ret = llist_del_first(&pool->clean_list);
Gerd Rauschc9467442019-07-16 15:28:57 -0700206 spin_unlock_irqrestore(&pool->clean_lock, flags);
santosh.shilimkar@oracle.comdb427532016-03-01 15:20:51 -0800207 if (ret) {
Huang Ying1bc144b2011-08-30 15:21:30 +0000208 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
santosh.shilimkar@oracle.comdb427532016-03-01 15:20:51 -0800209 if (pool->pool_type == RDS_IB_MR_8K_POOL)
210 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
211 else
212 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
213 }
Andy Grover08b48a12009-02-24 15:30:32 +0000214
215 return ibmr;
216}
217
Andy Grover08b48a12009-02-24 15:30:32 +0000218void rds_ib_sync_mr(void *trans_private, int direction)
219{
220 struct rds_ib_mr *ibmr = trans_private;
221 struct rds_ib_device *rds_ibdev = ibmr->device;
222
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200223 if (ibmr->odp)
224 return;
225
Andy Grover08b48a12009-02-24 15:30:32 +0000226 switch (direction) {
227 case DMA_FROM_DEVICE:
228 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
229 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
230 break;
231 case DMA_TO_DEVICE:
232 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
233 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
234 break;
235 }
236}
237
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800238void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
Andy Grover08b48a12009-02-24 15:30:32 +0000239{
240 struct rds_ib_device *rds_ibdev = ibmr->device;
241
242 if (ibmr->sg_dma_len) {
243 ib_dma_unmap_sg(rds_ibdev->dev,
244 ibmr->sg, ibmr->sg_len,
245 DMA_BIDIRECTIONAL);
246 ibmr->sg_dma_len = 0;
247 }
248
249 /* Release the s/g list */
250 if (ibmr->sg_len) {
251 unsigned int i;
252
253 for (i = 0; i < ibmr->sg_len; ++i) {
254 struct page *page = sg_page(&ibmr->sg[i]);
255
256 /* FIXME we need a way to tell a r/w MR
257 * from a r/o MR */
santosh.shilimkar@oracle.com5c240fa2015-08-22 15:45:31 -0700258 WARN_ON(!page->mapping && irqs_disabled());
Andy Grover08b48a12009-02-24 15:30:32 +0000259 set_page_dirty(page);
260 put_page(page);
261 }
262 kfree(ibmr->sg);
263
264 ibmr->sg = NULL;
265 ibmr->sg_len = 0;
266 }
267}
268
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800269void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
Andy Grover08b48a12009-02-24 15:30:32 +0000270{
271 unsigned int pinned = ibmr->sg_len;
272
273 __rds_ib_teardown_mr(ibmr);
274 if (pinned) {
Santosh Shilimkar26139dc2015-09-15 18:20:35 -0700275 struct rds_ib_mr_pool *pool = ibmr->pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000276
277 atomic_sub(pinned, &pool->free_pinned);
278 }
279}
280
281static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
282{
283 unsigned int item_count;
284
285 item_count = atomic_read(&pool->item_count);
286 if (free_all)
287 return item_count;
288
289 return 0;
290}
291
292/*
Huang Ying1bc144b2011-08-30 15:21:30 +0000293 * given an llist of mrs, put them all into the list_head for more processing
Chris Mason6fa70da2010-06-11 11:17:59 -0700294 */
Wengang Wang6116c202015-08-25 12:02:00 -0700295static unsigned int llist_append_to_list(struct llist_head *llist,
296 struct list_head *list)
Chris Mason6fa70da2010-06-11 11:17:59 -0700297{
298 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000299 struct llist_node *node;
300 struct llist_node *next;
Wengang Wang6116c202015-08-25 12:02:00 -0700301 unsigned int count = 0;
Chris Mason6fa70da2010-06-11 11:17:59 -0700302
Huang Ying1bc144b2011-08-30 15:21:30 +0000303 node = llist_del_all(llist);
304 while (node) {
305 next = node->next;
306 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
Chris Mason6fa70da2010-06-11 11:17:59 -0700307 list_add_tail(&ibmr->unmap_list, list);
Huang Ying1bc144b2011-08-30 15:21:30 +0000308 node = next;
Wengang Wang6116c202015-08-25 12:02:00 -0700309 count++;
Chris Mason6fa70da2010-06-11 11:17:59 -0700310 }
Wengang Wang6116c202015-08-25 12:02:00 -0700311 return count;
Chris Mason6fa70da2010-06-11 11:17:59 -0700312}
313
314/*
Huang Ying1bc144b2011-08-30 15:21:30 +0000315 * this takes a list head of mrs and turns it into linked llist nodes
316 * of clusters. Each cluster has linked llist nodes of
317 * MR_CLUSTER_SIZE mrs that are ready for reuse.
Chris Mason6fa70da2010-06-11 11:17:59 -0700318 */
Gerd Rauschc9467442019-07-16 15:28:57 -0700319static void list_to_llist_nodes(struct list_head *list,
Huang Ying1bc144b2011-08-30 15:21:30 +0000320 struct llist_node **nodes_head,
321 struct llist_node **nodes_tail)
Chris Mason6fa70da2010-06-11 11:17:59 -0700322{
323 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000324 struct llist_node *cur = NULL;
325 struct llist_node **next = nodes_head;
Chris Mason6fa70da2010-06-11 11:17:59 -0700326
327 list_for_each_entry(ibmr, list, unmap_list) {
Huang Ying1bc144b2011-08-30 15:21:30 +0000328 cur = &ibmr->llnode;
329 *next = cur;
330 next = &cur->next;
Chris Mason6fa70da2010-06-11 11:17:59 -0700331 }
Huang Ying1bc144b2011-08-30 15:21:30 +0000332 *next = NULL;
333 *nodes_tail = cur;
Chris Mason6fa70da2010-06-11 11:17:59 -0700334}
335
336/*
Andy Grover08b48a12009-02-24 15:30:32 +0000337 * Flush our pool of MRs.
338 * At a minimum, all currently unused MRs are unmapped.
339 * If the number of MRs allocated exceeds the limit, we also try
340 * to free as many MRs as needed to get back to this limit.
341 */
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800342int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
343 int free_all, struct rds_ib_mr **ibmr_ret)
Andy Grover08b48a12009-02-24 15:30:32 +0000344{
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800345 struct rds_ib_mr *ibmr;
Huang Ying1bc144b2011-08-30 15:21:30 +0000346 struct llist_node *clean_nodes;
347 struct llist_node *clean_tail;
Andy Grover08b48a12009-02-24 15:30:32 +0000348 LIST_HEAD(unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000349 unsigned long unpinned = 0;
Wengang Wang6116c202015-08-25 12:02:00 -0700350 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
Andy Grover08b48a12009-02-24 15:30:32 +0000351
Santosh Shilimkar06766512015-09-10 21:20:57 -0700352 if (pool->pool_type == RDS_IB_MR_8K_POOL)
353 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
354 else
355 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
Andy Grover08b48a12009-02-24 15:30:32 +0000356
Chris Mason6fa70da2010-06-11 11:17:59 -0700357 if (ibmr_ret) {
358 DEFINE_WAIT(wait);
Santosh Shilimkar06766512015-09-10 21:20:57 -0700359 while (!mutex_trylock(&pool->flush_lock)) {
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800360 ibmr = rds_ib_reuse_mr(pool);
Chris Mason6fa70da2010-06-11 11:17:59 -0700361 if (ibmr) {
362 *ibmr_ret = ibmr;
363 finish_wait(&pool->flush_wait, &wait);
364 goto out_nolock;
365 }
Andy Grover08b48a12009-02-24 15:30:32 +0000366
Chris Mason6fa70da2010-06-11 11:17:59 -0700367 prepare_to_wait(&pool->flush_wait, &wait,
368 TASK_UNINTERRUPTIBLE);
Huang Ying1bc144b2011-08-30 15:21:30 +0000369 if (llist_empty(&pool->clean_list))
Chris Mason6fa70da2010-06-11 11:17:59 -0700370 schedule();
371
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800372 ibmr = rds_ib_reuse_mr(pool);
Chris Mason6fa70da2010-06-11 11:17:59 -0700373 if (ibmr) {
374 *ibmr_ret = ibmr;
375 finish_wait(&pool->flush_wait, &wait);
376 goto out_nolock;
377 }
378 }
379 finish_wait(&pool->flush_wait, &wait);
380 } else
381 mutex_lock(&pool->flush_lock);
382
383 if (ibmr_ret) {
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800384 ibmr = rds_ib_reuse_mr(pool);
Chris Mason6fa70da2010-06-11 11:17:59 -0700385 if (ibmr) {
386 *ibmr_ret = ibmr;
387 goto out;
388 }
389 }
390
Andy Grover08b48a12009-02-24 15:30:32 +0000391 /* Get the list of all MRs to be dropped. Ordering matters -
Chris Mason6fa70da2010-06-11 11:17:59 -0700392 * we want to put drop_list ahead of free_list.
393 */
Wengang Wang6116c202015-08-25 12:02:00 -0700394 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
395 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
Gerd Rauschc9467442019-07-16 15:28:57 -0700396 if (free_all) {
397 unsigned long flags;
398
399 spin_lock_irqsave(&pool->clean_lock, flags);
Huang Ying1bc144b2011-08-30 15:21:30 +0000400 llist_append_to_list(&pool->clean_list, &unmap_list);
Gerd Rauschc9467442019-07-16 15:28:57 -0700401 spin_unlock_irqrestore(&pool->clean_lock, flags);
402 }
Andy Grover08b48a12009-02-24 15:30:32 +0000403
404 free_goal = rds_ib_flush_goal(pool, free_all);
405
406 if (list_empty(&unmap_list))
407 goto out;
408
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300409 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
Andy Grover08b48a12009-02-24 15:30:32 +0000410
Chris Mason6fa70da2010-06-11 11:17:59 -0700411 if (!list_empty(&unmap_list)) {
Gerd Rauschc9467442019-07-16 15:28:57 -0700412 unsigned long flags;
Chris Mason6fa70da2010-06-11 11:17:59 -0700413
Gerd Rauschc9467442019-07-16 15:28:57 -0700414 list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
Zhu Yanjun85cb9282019-06-06 04:00:03 -0400415 if (ibmr_ret) {
Huang Ying1bc144b2011-08-30 15:21:30 +0000416 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
Zhu Yanjun85cb9282019-06-06 04:00:03 -0400417 clean_nodes = clean_nodes->next;
418 }
Huang Ying1bc144b2011-08-30 15:21:30 +0000419 /* more than one entry in llist nodes */
Gerd Rauschc9467442019-07-16 15:28:57 -0700420 if (clean_nodes) {
421 spin_lock_irqsave(&pool->clean_lock, flags);
Zhu Yanjun85cb9282019-06-06 04:00:03 -0400422 llist_add_batch(clean_nodes, clean_tail,
423 &pool->clean_list);
Gerd Rauschc9467442019-07-16 15:28:57 -0700424 spin_unlock_irqrestore(&pool->clean_lock, flags);
425 }
Chris Mason6fa70da2010-06-11 11:17:59 -0700426 }
Andy Grover08b48a12009-02-24 15:30:32 +0000427
428 atomic_sub(unpinned, &pool->free_pinned);
Wengang Wang6116c202015-08-25 12:02:00 -0700429 atomic_sub(dirty_to_clean, &pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000430 atomic_sub(nfreed, &pool->item_count);
431
432out:
433 mutex_unlock(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700434 if (waitqueue_active(&pool->flush_wait))
435 wake_up(&pool->flush_wait);
436out_nolock:
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800437 return 0;
438}
439
440struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
441{
442 struct rds_ib_mr *ibmr = NULL;
443 int iter = 0;
444
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800445 while (1) {
446 ibmr = rds_ib_reuse_mr(pool);
447 if (ibmr)
448 return ibmr;
449
450 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
451 break;
452
453 atomic_dec(&pool->item_count);
454
455 if (++iter > 2) {
456 if (pool->pool_type == RDS_IB_MR_8K_POOL)
457 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
458 else
459 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
Gerd Rauschaea01a22019-07-16 15:29:07 -0700460 break;
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800461 }
462
463 /* We do have some empty MRs. Flush them out. */
464 if (pool->pool_type == RDS_IB_MR_8K_POOL)
465 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
466 else
467 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
468
469 rds_ib_flush_mr_pool(pool, 0, &ibmr);
470 if (ibmr)
471 return ibmr;
472 }
473
Gerd Rauschaea01a22019-07-16 15:29:07 -0700474 return NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000475}
476
477static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
478{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700479 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
Andy Grover08b48a12009-02-24 15:30:32 +0000480
Chris Mason6fa70da2010-06-11 11:17:59 -0700481 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000482}
483
484void rds_ib_free_mr(void *trans_private, int invalidate)
485{
486 struct rds_ib_mr *ibmr = trans_private;
Santosh Shilimkar26139dc2015-09-15 18:20:35 -0700487 struct rds_ib_mr_pool *pool = ibmr->pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000488 struct rds_ib_device *rds_ibdev = ibmr->device;
Andy Grover08b48a12009-02-24 15:30:32 +0000489
490 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
491
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200492 if (ibmr->odp) {
493 /* A MR created and marked as use_once. We use delayed work,
494 * because there is a change that we are in interrupt and can't
495 * call to ib_dereg_mr() directly.
496 */
497 INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
498 queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
499 return;
500 }
501
Andy Grover08b48a12009-02-24 15:30:32 +0000502 /* Return it to the pool's free list */
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300503 rds_ib_free_frmr_list(ibmr);
Andy Grover08b48a12009-02-24 15:30:32 +0000504
505 atomic_add(ibmr->sg_len, &pool->free_pinned);
506 atomic_inc(&pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000507
508 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800509 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
santosh.shilimkar@oracle.comef5217a2015-08-25 12:02:02 -0700510 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800511 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000512
513 if (invalidate) {
514 if (likely(!in_interrupt())) {
Chris Mason6fa70da2010-06-11 11:17:59 -0700515 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000516 } else {
517 /* We get here if the user created a MR marked
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700518 * as use_once and invalidate at the same time.
519 */
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800520 queue_delayed_work(rds_ib_mr_wq,
santosh.shilimkar@oracle.comad1d7dc2015-08-25 12:02:01 -0700521 &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000522 }
523 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700524
525 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000526}
527
528void rds_ib_flush_mrs(void)
529{
530 struct rds_ib_device *rds_ibdev;
531
Zach Brownea819862010-07-15 12:34:33 -0700532 down_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000533 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
Santosh Shilimkar06766512015-09-10 21:20:57 -0700534 if (rds_ibdev->mr_8k_pool)
535 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000536
Santosh Shilimkar06766512015-09-10 21:20:57 -0700537 if (rds_ibdev->mr_1m_pool)
538 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000539 }
Zach Brownea819862010-07-15 12:34:33 -0700540 up_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000541}
542
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200543u32 rds_ib_get_lkey(void *trans_private)
544{
545 struct rds_ib_mr *ibmr = trans_private;
546
547 return ibmr->u.mr->lkey;
548}
549
Andy Grover08b48a12009-02-24 15:30:32 +0000550void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700551 struct rds_sock *rs, u32 *key_ret,
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200552 struct rds_connection *conn,
553 u64 start, u64 length, int need_odp)
Andy Grover08b48a12009-02-24 15:30:32 +0000554{
555 struct rds_ib_device *rds_ibdev;
556 struct rds_ib_mr *ibmr = NULL;
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700557 struct rds_ib_connection *ic = NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000558 int ret;
559
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700560 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
Andy Grover08b48a12009-02-24 15:30:32 +0000561 if (!rds_ibdev) {
562 ret = -ENODEV;
563 goto out;
564 }
565
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200566 if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
567 u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
568 int access_flags =
569 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
570 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
571 IB_ACCESS_ON_DEMAND);
Hans Westgaard Ryb2dfc672020-01-15 14:43:40 +0200572 struct ib_sge sge = {};
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200573 struct ib_mr *ib_mr;
574
575 if (!rds_ibdev->odp_capable) {
576 ret = -EOPNOTSUPP;
577 goto out;
578 }
579
580 ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
581 access_flags);
582
583 if (IS_ERR(ib_mr)) {
584 rdsdebug("rds_ib_get_user_mr returned %d\n",
585 IS_ERR(ib_mr));
586 ret = PTR_ERR(ib_mr);
587 goto out;
588 }
589 if (key_ret)
590 *key_ret = ib_mr->rkey;
591
592 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
593 if (!ibmr) {
594 ib_dereg_mr(ib_mr);
595 ret = -ENOMEM;
596 goto out;
597 }
598 ibmr->u.mr = ib_mr;
599 ibmr->odp = 1;
Hans Westgaard Ryb2dfc672020-01-15 14:43:40 +0200600
601 sge.addr = virt_addr;
602 sge.length = length;
603 sge.lkey = ib_mr->lkey;
604
605 ib_advise_mr(rds_ibdev->pd,
606 IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
607 IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200608 return ibmr;
609 }
610
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700611 if (conn)
612 ic = conn->c_transport_data;
613
Santosh Shilimkar06766512015-09-10 21:20:57 -0700614 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
Andy Grover08b48a12009-02-24 15:30:32 +0000615 ret = -ENODEV;
616 goto out;
617 }
618
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300619 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700620 if (IS_ERR(ibmr)) {
621 ret = PTR_ERR(ibmr);
622 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
623 } else {
624 return ibmr;
625 }
Andy Grover08b48a12009-02-24 15:30:32 +0000626
627 out:
Zach Brown3e0249f2010-05-18 15:48:51 -0700628 if (rds_ibdev)
629 rds_ib_dev_put(rds_ibdev);
santosh.shilimkar@oracle.com490ea592016-03-01 15:20:48 -0800630
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700631 return ERR_PTR(ret);
Andy Grover08b48a12009-02-24 15:30:32 +0000632}
Chris Mason6fa70da2010-06-11 11:17:59 -0700633
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800634void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
635{
636 cancel_delayed_work_sync(&pool->flush_worker);
637 rds_ib_flush_mr_pool(pool, 1, NULL);
638 WARN_ON(atomic_read(&pool->item_count));
639 WARN_ON(atomic_read(&pool->free_pinned));
640 kfree(pool);
641}
642
643struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
644 int pool_type)
645{
646 struct rds_ib_mr_pool *pool;
647
648 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
649 if (!pool)
650 return ERR_PTR(-ENOMEM);
651
652 pool->pool_type = pool_type;
653 init_llist_head(&pool->free_list);
654 init_llist_head(&pool->drop_list);
655 init_llist_head(&pool->clean_list);
Gerd Rauschc9467442019-07-16 15:28:57 -0700656 spin_lock_init(&pool->clean_lock);
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800657 mutex_init(&pool->flush_lock);
658 init_waitqueue_head(&pool->flush_wait);
659 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
660
661 if (pool_type == RDS_IB_MR_1M_POOL) {
662 /* +1 allows for unaligned MRs */
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300663 pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
Avinash Repakab1fb67f2017-10-04 12:11:29 -0700664 pool->max_items = rds_ibdev->max_1m_mrs;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800665 } else {
666 /* pool_type == RDS_IB_MR_8K_POOL */
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300667 pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
Avinash Repakab1fb67f2017-10-04 12:11:29 -0700668 pool->max_items = rds_ibdev->max_8k_mrs;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800669 }
670
Max Gurtovoy07549ee2020-05-28 16:45:45 -0300671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800672 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
673
674 return pool;
675}
676
677int rds_ib_mr_init(void)
678{
Bhaktipriya Shridhar231edca2016-06-08 01:03:45 +0530679 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800680 if (!rds_ib_mr_wq)
681 return -ENOMEM;
682 return 0;
683}
684
685/* By the time this is called all the IB devices should have been torn down and
686 * had their pools freed. As each pool is freed its work struct is waited on,
687 * so the pool flushing work queue should be idle by the time we get here.
688 */
689void rds_ib_mr_exit(void)
690{
691 destroy_workqueue(rds_ib_mr_wq);
692}
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200693
694static void rds_ib_odp_mr_worker(struct work_struct *work)
695{
696 struct rds_ib_mr *ibmr;
697
698 ibmr = container_of(work, struct rds_ib_mr, work.work);
699 ib_dereg_mr(ibmr->u.mr);
700 kfree(ibmr);
701}