blob: b30d4ae0fb610bf3b0f0d7784a03066e87466e4d [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
49#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50static __be64 mlx5_ib_update_mtt_emergency_buffer[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
52 __aligned(MLX5_UMR_ALIGN);
53static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
54#endif
Eli Cohenfe45f822013-09-11 16:35:35 +030055
Haggai Eran6aec21f2014-12-11 17:04:23 +020056static int clean_mr(struct mlx5_ib_mr *mr);
57
Haggai Eranb4cfe442014-12-11 17:04:26 +020058static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
59{
60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
61
62#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev->mr_srcu);
65#endif
66
67 return err;
68}
69
Eli Cohene126ba92013-07-07 17:25:49 +030070static int order2idx(struct mlx5_ib_dev *dev, int order)
71{
72 struct mlx5_mr_cache *cache = &dev->cache;
73
74 if (order < cache->ent[0].order)
75 return 0;
76 else
77 return order - cache->ent[0].order;
78}
79
Eli Cohen746b5582013-10-23 09:53:14 +030080static void reg_mr_callback(int status, void *context)
81{
82 struct mlx5_ib_mr *mr = context;
83 struct mlx5_ib_dev *dev = mr->dev;
84 struct mlx5_mr_cache *cache = &dev->cache;
85 int c = order2idx(dev, mr->order);
86 struct mlx5_cache_ent *ent = &cache->ent[c];
87 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +030088 unsigned long flags;
Jack Morgenstein9603b612014-07-28 23:30:22 +030089 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
Haggai Eran86059332014-05-22 14:50:09 +030090 int err;
Eli Cohen746b5582013-10-23 09:53:14 +030091
Eli Cohen746b5582013-10-23 09:53:14 +030092 spin_lock_irqsave(&ent->lock, flags);
93 ent->pending--;
94 spin_unlock_irqrestore(&ent->lock, flags);
95 if (status) {
96 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
97 kfree(mr);
98 dev->fill_delay = 1;
99 mod_timer(&dev->delay_timer, jiffies + HZ);
100 return;
101 }
102
103 if (mr->out.hdr.status) {
104 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
105 mr->out.hdr.status,
106 be32_to_cpu(mr->out.hdr.syndrome));
107 kfree(mr);
108 dev->fill_delay = 1;
109 mod_timer(&dev->delay_timer, jiffies + HZ);
110 return;
111 }
112
Jack Morgenstein9603b612014-07-28 23:30:22 +0300113 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
114 key = dev->mdev->priv.mkey_key++;
115 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300116 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
117
118 cache->last_add = jiffies;
119
120 spin_lock_irqsave(&ent->lock, flags);
121 list_add_tail(&mr->list, &ent->head);
122 ent->cur++;
123 ent->size++;
124 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300125
126 write_lock_irqsave(&table->lock, flags);
127 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
128 &mr->mmr);
129 if (err)
130 pr_err("Error inserting to mr tree. 0x%x\n", -err);
131 write_unlock_irqrestore(&table->lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300132}
133
Eli Cohene126ba92013-07-07 17:25:49 +0300134static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
135{
Eli Cohene126ba92013-07-07 17:25:49 +0300136 struct mlx5_mr_cache *cache = &dev->cache;
137 struct mlx5_cache_ent *ent = &cache->ent[c];
138 struct mlx5_create_mkey_mbox_in *in;
139 struct mlx5_ib_mr *mr;
140 int npages = 1 << ent->order;
Eli Cohene126ba92013-07-07 17:25:49 +0300141 int err = 0;
142 int i;
143
144 in = kzalloc(sizeof(*in), GFP_KERNEL);
145 if (!in)
146 return -ENOMEM;
147
148 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300149 if (ent->pending >= MAX_PENDING_REG_MR) {
150 err = -EAGAIN;
151 break;
152 }
153
Eli Cohene126ba92013-07-07 17:25:49 +0300154 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
155 if (!mr) {
156 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300157 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300158 }
159 mr->order = ent->order;
160 mr->umred = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300161 mr->dev = dev;
Haggai Eran968e78d2014-12-11 17:04:11 +0200162 in->seg.status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +0300163 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
164 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
165 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
166 in->seg.log2_page_size = 12;
167
Eli Cohen746b5582013-10-23 09:53:14 +0300168 spin_lock_irq(&ent->lock);
169 ent->pending++;
170 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300171 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
Eli Cohen746b5582013-10-23 09:53:14 +0300172 sizeof(*in), reg_mr_callback,
173 mr, &mr->out);
Eli Cohene126ba92013-07-07 17:25:49 +0300174 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200175 spin_lock_irq(&ent->lock);
176 ent->pending--;
177 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300178 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300179 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300180 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300181 }
Eli Cohene126ba92013-07-07 17:25:49 +0300182 }
183
Eli Cohene126ba92013-07-07 17:25:49 +0300184 kfree(in);
185 return err;
186}
187
188static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
189{
Eli Cohene126ba92013-07-07 17:25:49 +0300190 struct mlx5_mr_cache *cache = &dev->cache;
191 struct mlx5_cache_ent *ent = &cache->ent[c];
192 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300193 int err;
194 int i;
195
196 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300197 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300198 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300199 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300200 return;
201 }
202 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
203 list_del(&mr->list);
204 ent->cur--;
205 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300206 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200207 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300208 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300209 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300210 else
Eli Cohene126ba92013-07-07 17:25:49 +0300211 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
213}
214
215static ssize_t size_write(struct file *filp, const char __user *buf,
216 size_t count, loff_t *pos)
217{
218 struct mlx5_cache_ent *ent = filp->private_data;
219 struct mlx5_ib_dev *dev = ent->dev;
220 char lbuf[20];
221 u32 var;
222 int err;
223 int c;
224
225 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300226 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300227
228 c = order2idx(dev, ent->order);
229 lbuf[sizeof(lbuf) - 1] = 0;
230
231 if (sscanf(lbuf, "%u", &var) != 1)
232 return -EINVAL;
233
234 if (var < ent->limit)
235 return -EINVAL;
236
237 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300238 do {
239 err = add_keys(dev, c, var - ent->size);
240 if (err && err != -EAGAIN)
241 return err;
242
243 usleep_range(3000, 5000);
244 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300245 } else if (var < ent->size) {
246 remove_keys(dev, c, ent->size - var);
247 }
248
249 return count;
250}
251
252static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
253 loff_t *pos)
254{
255 struct mlx5_cache_ent *ent = filp->private_data;
256 char lbuf[20];
257 int err;
258
259 if (*pos)
260 return 0;
261
262 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
263 if (err < 0)
264 return err;
265
266 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300267 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300268
269 *pos += err;
270
271 return err;
272}
273
274static const struct file_operations size_fops = {
275 .owner = THIS_MODULE,
276 .open = simple_open,
277 .write = size_write,
278 .read = size_read,
279};
280
281static ssize_t limit_write(struct file *filp, const char __user *buf,
282 size_t count, loff_t *pos)
283{
284 struct mlx5_cache_ent *ent = filp->private_data;
285 struct mlx5_ib_dev *dev = ent->dev;
286 char lbuf[20];
287 u32 var;
288 int err;
289 int c;
290
291 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300292 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300293
294 c = order2idx(dev, ent->order);
295 lbuf[sizeof(lbuf) - 1] = 0;
296
297 if (sscanf(lbuf, "%u", &var) != 1)
298 return -EINVAL;
299
300 if (var > ent->size)
301 return -EINVAL;
302
303 ent->limit = var;
304
305 if (ent->cur < ent->limit) {
306 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
307 if (err)
308 return err;
309 }
310
311 return count;
312}
313
314static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
315 loff_t *pos)
316{
317 struct mlx5_cache_ent *ent = filp->private_data;
318 char lbuf[20];
319 int err;
320
321 if (*pos)
322 return 0;
323
324 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
325 if (err < 0)
326 return err;
327
328 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300329 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300330
331 *pos += err;
332
333 return err;
334}
335
336static const struct file_operations limit_fops = {
337 .owner = THIS_MODULE,
338 .open = simple_open,
339 .write = limit_write,
340 .read = limit_read,
341};
342
343static int someone_adding(struct mlx5_mr_cache *cache)
344{
345 int i;
346
347 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
348 if (cache->ent[i].cur < cache->ent[i].limit)
349 return 1;
350 }
351
352 return 0;
353}
354
355static void __cache_work_func(struct mlx5_cache_ent *ent)
356{
357 struct mlx5_ib_dev *dev = ent->dev;
358 struct mlx5_mr_cache *cache = &dev->cache;
359 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300360 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300361
362 if (cache->stopped)
363 return;
364
365 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300366 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
367 err = add_keys(dev, i, 1);
368 if (ent->cur < 2 * ent->limit) {
369 if (err == -EAGAIN) {
370 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
371 i + 2);
372 queue_delayed_work(cache->wq, &ent->dwork,
373 msecs_to_jiffies(3));
374 } else if (err) {
375 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
376 i + 2, err);
377 queue_delayed_work(cache->wq, &ent->dwork,
378 msecs_to_jiffies(1000));
379 } else {
380 queue_work(cache->wq, &ent->work);
381 }
382 }
Eli Cohene126ba92013-07-07 17:25:49 +0300383 } else if (ent->cur > 2 * ent->limit) {
384 if (!someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300385 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300386 remove_keys(dev, i, 1);
387 if (ent->cur > ent->limit)
388 queue_work(cache->wq, &ent->work);
389 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300390 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300391 }
392 }
393}
394
395static void delayed_cache_work_func(struct work_struct *work)
396{
397 struct mlx5_cache_ent *ent;
398
399 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
400 __cache_work_func(ent);
401}
402
403static void cache_work_func(struct work_struct *work)
404{
405 struct mlx5_cache_ent *ent;
406
407 ent = container_of(work, struct mlx5_cache_ent, work);
408 __cache_work_func(ent);
409}
410
411static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
412{
413 struct mlx5_mr_cache *cache = &dev->cache;
414 struct mlx5_ib_mr *mr = NULL;
415 struct mlx5_cache_ent *ent;
416 int c;
417 int i;
418
419 c = order2idx(dev, order);
420 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
421 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
422 return NULL;
423 }
424
425 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
426 ent = &cache->ent[i];
427
428 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
429
Eli Cohen746b5582013-10-23 09:53:14 +0300430 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300431 if (!list_empty(&ent->head)) {
432 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
433 list);
434 list_del(&mr->list);
435 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300436 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300437 if (ent->cur < ent->limit)
438 queue_work(cache->wq, &ent->work);
439 break;
440 }
Eli Cohen746b5582013-10-23 09:53:14 +0300441 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300442
443 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300444 }
445
446 if (!mr)
447 cache->ent[c].miss++;
448
449 return mr;
450}
451
452static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
453{
454 struct mlx5_mr_cache *cache = &dev->cache;
455 struct mlx5_cache_ent *ent;
456 int shrink = 0;
457 int c;
458
459 c = order2idx(dev, mr->order);
460 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
461 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
462 return;
463 }
464 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300465 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300466 list_add_tail(&mr->list, &ent->head);
467 ent->cur++;
468 if (ent->cur > 2 * ent->limit)
469 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300470 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300471
472 if (shrink)
473 queue_work(cache->wq, &ent->work);
474}
475
476static void clean_keys(struct mlx5_ib_dev *dev, int c)
477{
Eli Cohene126ba92013-07-07 17:25:49 +0300478 struct mlx5_mr_cache *cache = &dev->cache;
479 struct mlx5_cache_ent *ent = &cache->ent[c];
480 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300481 int err;
482
Moshe Lazer3c461912013-09-11 16:35:23 +0300483 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300484 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300485 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300486 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300487 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300488 return;
489 }
490 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
491 list_del(&mr->list);
492 ent->cur--;
493 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300494 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200495 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300496 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300497 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300498 else
Eli Cohene126ba92013-07-07 17:25:49 +0300499 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300500 }
501}
502
503static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
504{
505 struct mlx5_mr_cache *cache = &dev->cache;
506 struct mlx5_cache_ent *ent;
507 int i;
508
509 if (!mlx5_debugfs_root)
510 return 0;
511
Jack Morgenstein9603b612014-07-28 23:30:22 +0300512 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300513 if (!cache->root)
514 return -ENOMEM;
515
516 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
517 ent = &cache->ent[i];
518 sprintf(ent->name, "%d", ent->order);
519 ent->dir = debugfs_create_dir(ent->name, cache->root);
520 if (!ent->dir)
521 return -ENOMEM;
522
523 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
524 &size_fops);
525 if (!ent->fsize)
526 return -ENOMEM;
527
528 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
529 &limit_fops);
530 if (!ent->flimit)
531 return -ENOMEM;
532
533 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
534 &ent->cur);
535 if (!ent->fcur)
536 return -ENOMEM;
537
538 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
539 &ent->miss);
540 if (!ent->fmiss)
541 return -ENOMEM;
542 }
543
544 return 0;
545}
546
547static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
548{
549 if (!mlx5_debugfs_root)
550 return;
551
552 debugfs_remove_recursive(dev->cache.root);
553}
554
Eli Cohen746b5582013-10-23 09:53:14 +0300555static void delay_time_func(unsigned long ctx)
556{
557 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
558
559 dev->fill_delay = 0;
560}
561
Eli Cohene126ba92013-07-07 17:25:49 +0300562int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
563{
564 struct mlx5_mr_cache *cache = &dev->cache;
565 struct mlx5_cache_ent *ent;
566 int limit;
Eli Cohene126ba92013-07-07 17:25:49 +0300567 int err;
568 int i;
569
570 cache->wq = create_singlethread_workqueue("mkey_cache");
571 if (!cache->wq) {
572 mlx5_ib_warn(dev, "failed to create work queue\n");
573 return -ENOMEM;
574 }
575
Eli Cohen746b5582013-10-23 09:53:14 +0300576 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300577 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
578 INIT_LIST_HEAD(&cache->ent[i].head);
579 spin_lock_init(&cache->ent[i].lock);
580
581 ent = &cache->ent[i];
582 INIT_LIST_HEAD(&ent->head);
583 spin_lock_init(&ent->lock);
584 ent->order = i + 2;
585 ent->dev = dev;
586
Jack Morgenstein9603b612014-07-28 23:30:22 +0300587 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
588 limit = dev->mdev->profile->mr_cache[i].limit;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300589 else
Eli Cohene126ba92013-07-07 17:25:49 +0300590 limit = 0;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300591
Eli Cohene126ba92013-07-07 17:25:49 +0300592 INIT_WORK(&ent->work, cache_work_func);
593 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
594 ent->limit = limit;
595 queue_work(cache->wq, &ent->work);
596 }
597
598 err = mlx5_mr_cache_debugfs_init(dev);
599 if (err)
600 mlx5_ib_warn(dev, "cache debugfs failure\n");
601
602 return 0;
603}
604
605int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
606{
607 int i;
608
609 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300610 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300611
612 mlx5_mr_cache_debugfs_cleanup(dev);
613
614 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
615 clean_keys(dev, i);
616
Moshe Lazer3c461912013-09-11 16:35:23 +0300617 destroy_workqueue(dev->cache.wq);
Eli Cohen746b5582013-10-23 09:53:14 +0300618 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300619
Eli Cohene126ba92013-07-07 17:25:49 +0300620 return 0;
621}
622
623struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
624{
625 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300626 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300627 struct mlx5_create_mkey_mbox_in *in;
628 struct mlx5_mkey_seg *seg;
629 struct mlx5_ib_mr *mr;
630 int err;
631
632 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
633 if (!mr)
634 return ERR_PTR(-ENOMEM);
635
636 in = kzalloc(sizeof(*in), GFP_KERNEL);
637 if (!in) {
638 err = -ENOMEM;
639 goto err_free;
640 }
641
642 seg = &in->seg;
643 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
644 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
645 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
646 seg->start_addr = 0;
647
Eli Cohen746b5582013-10-23 09:53:14 +0300648 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
649 NULL);
Eli Cohene126ba92013-07-07 17:25:49 +0300650 if (err)
651 goto err_in;
652
653 kfree(in);
654 mr->ibmr.lkey = mr->mmr.key;
655 mr->ibmr.rkey = mr->mmr.key;
656 mr->umem = NULL;
657
658 return &mr->ibmr;
659
660err_in:
661 kfree(in);
662
663err_free:
664 kfree(mr);
665
666 return ERR_PTR(err);
667}
668
669static int get_octo_len(u64 addr, u64 len, int page_size)
670{
671 u64 offset;
672 int npages;
673
674 offset = addr & (page_size - 1);
675 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
676 return (npages + 1) / 2;
677}
678
679static int use_umr(int order)
680{
Haggai Erancc149f752014-12-11 17:04:21 +0200681 return order <= MLX5_MAX_UMR_SHIFT;
Eli Cohene126ba92013-07-07 17:25:49 +0300682}
683
684static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
685 struct ib_sge *sg, u64 dma, int n, u32 key,
686 int page_shift, u64 virt_addr, u64 len,
687 int access_flags)
688{
689 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100690 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Eli Cohene126ba92013-07-07 17:25:49 +0300691
692 sg->addr = dma;
693 sg->length = ALIGN(sizeof(u64) * n, 64);
Jason Gunthorpeb37c7882015-07-30 17:22:19 -0600694 sg->lkey = dev->umrc.pd->local_dma_lkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300695
696 wr->next = NULL;
697 wr->send_flags = 0;
698 wr->sg_list = sg;
699 if (n)
700 wr->num_sge = 1;
701 else
702 wr->num_sge = 0;
703
704 wr->opcode = MLX5_IB_WR_UMR;
Haggai Eran968e78d2014-12-11 17:04:11 +0200705
706 umrwr->npages = n;
707 umrwr->page_shift = page_shift;
708 umrwr->mkey = key;
709 umrwr->target.virt_addr = virt_addr;
710 umrwr->length = len;
711 umrwr->access_flags = access_flags;
712 umrwr->pd = pd;
Eli Cohene126ba92013-07-07 17:25:49 +0300713}
714
715static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
716 struct ib_send_wr *wr, u32 key)
717{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100718 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Haggai Eran968e78d2014-12-11 17:04:11 +0200719
720 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +0300721 wr->opcode = MLX5_IB_WR_UMR;
Haggai Eran968e78d2014-12-11 17:04:11 +0200722 umrwr->mkey = key;
Eli Cohene126ba92013-07-07 17:25:49 +0300723}
724
725void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
726{
Shachar Raindela74d2412014-05-22 14:50:12 +0300727 struct mlx5_ib_umr_context *context;
Eli Cohene126ba92013-07-07 17:25:49 +0300728 struct ib_wc wc;
729 int err;
730
731 while (1) {
732 err = ib_poll_cq(cq, 1, &wc);
733 if (err < 0) {
734 pr_warn("poll cq error %d\n", err);
735 return;
736 }
737 if (err == 0)
738 break;
739
Roland Dreier6c9b5d92014-05-28 09:23:03 -0700740 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
Shachar Raindela74d2412014-05-22 14:50:12 +0300741 context->status = wc.status;
742 complete(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300743 }
744 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
745}
746
747static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
748 u64 virt_addr, u64 len, int npages,
749 int page_shift, int order, int access_flags)
750{
751 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohen203099f2013-09-11 16:35:26 +0300752 struct device *ddev = dev->ib_dev.dma_device;
Eli Cohene126ba92013-07-07 17:25:49 +0300753 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300754 struct mlx5_ib_umr_context umr_context;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100755 struct mlx5_umr_wr umrwr;
756 struct ib_send_wr *bad;
Eli Cohene126ba92013-07-07 17:25:49 +0300757 struct mlx5_ib_mr *mr;
758 struct ib_sge sg;
Haggai Erancc149f752014-12-11 17:04:21 +0200759 int size;
Haggai Eran21af2c32014-12-11 17:04:10 +0200760 __be64 *mr_pas;
Haggai Erancc149f752014-12-11 17:04:21 +0200761 __be64 *pas;
Haggai Eran21af2c32014-12-11 17:04:10 +0200762 dma_addr_t dma;
Haggai Eran096f7e72014-05-22 14:50:08 +0300763 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300764 int i;
765
Eli Cohen746b5582013-10-23 09:53:14 +0300766 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300767 mr = alloc_cached_mr(dev, order);
768 if (mr)
769 break;
770
771 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300772 if (err && err != -EAGAIN) {
773 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300774 break;
775 }
776 }
777
778 if (!mr)
779 return ERR_PTR(-EAGAIN);
780
Haggai Erancc149f752014-12-11 17:04:21 +0200781 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
782 * To avoid copying garbage after the pas array, we allocate
783 * a little more. */
784 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran21af2c32014-12-11 17:04:10 +0200785 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
786 if (!mr_pas) {
Eli Cohen203099f2013-09-11 16:35:26 +0300787 err = -ENOMEM;
Haggai Eran096f7e72014-05-22 14:50:08 +0300788 goto free_mr;
Eli Cohen203099f2013-09-11 16:35:26 +0300789 }
Eli Cohen543139072013-09-11 16:35:36 +0300790
Haggai Erancc149f752014-12-11 17:04:21 +0200791 pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
792 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
793 /* Clear padding after the actual pages. */
794 memset(pas + npages, 0, size - npages * sizeof(u64));
Eli Cohen543139072013-09-11 16:35:36 +0300795
Haggai Erancc149f752014-12-11 17:04:21 +0200796 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
Haggai Eran21af2c32014-12-11 17:04:10 +0200797 if (dma_mapping_error(ddev, dma)) {
Eli Cohen203099f2013-09-11 16:35:26 +0300798 err = -ENOMEM;
Haggai Eran096f7e72014-05-22 14:50:08 +0300799 goto free_pas;
Eli Cohen203099f2013-09-11 16:35:26 +0300800 }
801
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100802 memset(&umrwr, 0, sizeof(umrwr));
803 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
804 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
805 page_shift, virt_addr, len, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300806
Shachar Raindela74d2412014-05-22 14:50:12 +0300807 mlx5_ib_init_umr_context(&umr_context);
Eli Cohene126ba92013-07-07 17:25:49 +0300808 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100809 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
Eli Cohene126ba92013-07-07 17:25:49 +0300810 if (err) {
811 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
Haggai Eran096f7e72014-05-22 14:50:08 +0300812 goto unmap_dma;
Shachar Raindela74d2412014-05-22 14:50:12 +0300813 } else {
814 wait_for_completion(&umr_context.done);
815 if (umr_context.status != IB_WC_SUCCESS) {
816 mlx5_ib_warn(dev, "reg umr failed\n");
817 err = -EFAULT;
818 }
Haggai Eran096f7e72014-05-22 14:50:08 +0300819 }
820
Haggai Eranb4755982014-05-22 14:50:10 +0300821 mr->mmr.iova = virt_addr;
822 mr->mmr.size = len;
823 mr->mmr.pd = to_mpd(pd)->pdn;
824
Haggai Eranb4cfe442014-12-11 17:04:26 +0200825 mr->live = 1;
826
Haggai Eran096f7e72014-05-22 14:50:08 +0300827unmap_dma:
828 up(&umrc->sem);
Haggai Eran21af2c32014-12-11 17:04:10 +0200829 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
Haggai Eran096f7e72014-05-22 14:50:08 +0300830
831free_pas:
Haggai Eran21af2c32014-12-11 17:04:10 +0200832 kfree(mr_pas);
Haggai Eran096f7e72014-05-22 14:50:08 +0300833
834free_mr:
835 if (err) {
836 free_cached_mr(dev, mr);
837 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +0300838 }
839
840 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300841}
842
Haggai Eran832a6b02014-12-11 17:04:22 +0200843#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
844int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
845 int zap)
846{
847 struct mlx5_ib_dev *dev = mr->dev;
848 struct device *ddev = dev->ib_dev.dma_device;
849 struct umr_common *umrc = &dev->umrc;
850 struct mlx5_ib_umr_context umr_context;
851 struct ib_umem *umem = mr->umem;
852 int size;
853 __be64 *pas;
854 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100855 struct ib_send_wr *bad;
856 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200857 struct ib_sge sg;
858 int err = 0;
859 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
860 const int page_index_mask = page_index_alignment - 1;
861 size_t pages_mapped = 0;
862 size_t pages_to_map = 0;
863 size_t pages_iter = 0;
864 int use_emergency_buf = 0;
865
866 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
867 * so we need to align the offset and length accordingly */
868 if (start_page_index & page_index_mask) {
869 npages += start_page_index & page_index_mask;
870 start_page_index &= ~page_index_mask;
871 }
872
873 pages_to_map = ALIGN(npages, page_index_alignment);
874
875 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
876 return -EINVAL;
877
878 size = sizeof(u64) * pages_to_map;
879 size = min_t(int, PAGE_SIZE, size);
880 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
881 * code, when we are called from an invalidation. The pas buffer must
882 * be 2k-aligned for Connect-IB. */
883 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
884 if (!pas) {
885 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
886 pas = mlx5_ib_update_mtt_emergency_buffer;
887 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
888 use_emergency_buf = 1;
889 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
890 memset(pas, 0, size);
891 }
892 pages_iter = size / sizeof(u64);
893 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
894 if (dma_mapping_error(ddev, dma)) {
895 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
896 err = -ENOMEM;
897 goto free_pas;
898 }
899
900 for (pages_mapped = 0;
901 pages_mapped < pages_to_map && !err;
902 pages_mapped += pages_iter, start_page_index += pages_iter) {
903 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
904
905 npages = min_t(size_t,
906 pages_iter,
907 ib_umem_num_pages(umem) - start_page_index);
908
909 if (!zap) {
910 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
911 start_page_index, npages, pas,
912 MLX5_IB_MTT_PRESENT);
913 /* Clear padding after the pages brought from the
914 * umem. */
915 memset(pas + npages, 0, size - npages * sizeof(u64));
916 }
917
918 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
919
920 memset(&wr, 0, sizeof(wr));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100921 wr.wr.wr_id = (u64)(unsigned long)&umr_context;
Haggai Eran832a6b02014-12-11 17:04:22 +0200922
923 sg.addr = dma;
924 sg.length = ALIGN(npages * sizeof(u64),
925 MLX5_UMR_MTT_ALIGNMENT);
Jason Gunthorpeb37c7882015-07-30 17:22:19 -0600926 sg.lkey = dev->umrc.pd->local_dma_lkey;
Haggai Eran832a6b02014-12-11 17:04:22 +0200927
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100928 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
Haggai Eran832a6b02014-12-11 17:04:22 +0200929 MLX5_IB_SEND_UMR_UPDATE_MTT;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100930 wr.wr.sg_list = &sg;
931 wr.wr.num_sge = 1;
932 wr.wr.opcode = MLX5_IB_WR_UMR;
933 wr.npages = sg.length / sizeof(u64);
934 wr.page_shift = PAGE_SHIFT;
935 wr.mkey = mr->mmr.key;
936 wr.target.offset = start_page_index;
Haggai Eran832a6b02014-12-11 17:04:22 +0200937
938 mlx5_ib_init_umr_context(&umr_context);
939 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100940 err = ib_post_send(umrc->qp, &wr.wr, &bad);
Haggai Eran832a6b02014-12-11 17:04:22 +0200941 if (err) {
942 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
943 } else {
944 wait_for_completion(&umr_context.done);
945 if (umr_context.status != IB_WC_SUCCESS) {
946 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
947 umr_context.status);
948 err = -EFAULT;
949 }
950 }
951 up(&umrc->sem);
952 }
953 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
954
955free_pas:
956 if (!use_emergency_buf)
957 free_page((unsigned long)pas);
958 else
959 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
960
961 return err;
962}
963#endif
964
Eli Cohene126ba92013-07-07 17:25:49 +0300965static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
966 u64 length, struct ib_umem *umem,
967 int npages, int page_shift,
968 int access_flags)
969{
970 struct mlx5_ib_dev *dev = to_mdev(pd->device);
971 struct mlx5_create_mkey_mbox_in *in;
972 struct mlx5_ib_mr *mr;
973 int inlen;
974 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300975 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +0300976
977 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
978 if (!mr)
979 return ERR_PTR(-ENOMEM);
980
981 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
982 in = mlx5_vzalloc(inlen);
983 if (!in) {
984 err = -ENOMEM;
985 goto err_1;
986 }
Haggai Erancc149f752014-12-11 17:04:21 +0200987 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
988 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300989
Haggai Erancc149f752014-12-11 17:04:21 +0200990 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
991 * in the page list submitted with the command. */
992 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300993 in->seg.flags = convert_access(access_flags) |
994 MLX5_ACCESS_MODE_MTT;
995 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
996 in->seg.start_addr = cpu_to_be64(virt_addr);
997 in->seg.len = cpu_to_be64(length);
998 in->seg.bsfs_octo_size = 0;
999 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
1000 in->seg.log2_page_size = page_shift;
1001 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
Eli Cohen746b5582013-10-23 09:53:14 +03001002 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1003 1 << page_shift));
Jack Morgenstein9603b612014-07-28 23:30:22 +03001004 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
Eli Cohen746b5582013-10-23 09:53:14 +03001005 NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03001006 if (err) {
1007 mlx5_ib_warn(dev, "create mkey failed\n");
1008 goto err_2;
1009 }
1010 mr->umem = umem;
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001011 mr->dev = dev;
Haggai Eranb4cfe442014-12-11 17:04:26 +02001012 mr->live = 1;
Al Viro479163f2014-11-20 08:13:57 +00001013 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001014
1015 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
1016
1017 return mr;
1018
1019err_2:
Al Viro479163f2014-11-20 08:13:57 +00001020 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001021
1022err_1:
1023 kfree(mr);
1024
1025 return ERR_PTR(err);
1026}
1027
1028struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1029 u64 virt_addr, int access_flags,
1030 struct ib_udata *udata)
1031{
1032 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1033 struct mlx5_ib_mr *mr = NULL;
1034 struct ib_umem *umem;
1035 int page_shift;
1036 int npages;
1037 int ncont;
1038 int order;
1039 int err;
1040
Eli Cohen900a6d72014-09-14 16:47:51 +03001041 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1042 start, virt_addr, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001043 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
1044 0);
1045 if (IS_ERR(umem)) {
Eli Cohen900a6d72014-09-14 16:47:51 +03001046 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
Eli Cohene126ba92013-07-07 17:25:49 +03001047 return (void *)umem;
1048 }
1049
1050 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
1051 if (!npages) {
1052 mlx5_ib_warn(dev, "avoid zero region\n");
1053 err = -EINVAL;
1054 goto error;
1055 }
1056
1057 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
1058 npages, ncont, order, page_shift);
1059
1060 if (use_umr(order)) {
1061 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1062 order, access_flags);
1063 if (PTR_ERR(mr) == -EAGAIN) {
1064 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1065 mr = NULL;
1066 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001067 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1068 err = -EINVAL;
1069 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1070 goto error;
Eli Cohene126ba92013-07-07 17:25:49 +03001071 }
1072
1073 if (!mr)
1074 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
1075 access_flags);
1076
1077 if (IS_ERR(mr)) {
1078 err = PTR_ERR(mr);
1079 goto error;
1080 }
1081
1082 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
1083
1084 mr->umem = umem;
1085 mr->npages = npages;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001086 atomic_add(npages, &dev->mdev->priv.reg_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03001087 mr->ibmr.lkey = mr->mmr.key;
1088 mr->ibmr.rkey = mr->mmr.key;
1089
Haggai Eranb4cfe442014-12-11 17:04:26 +02001090#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1091 if (umem->odp_data) {
1092 /*
1093 * This barrier prevents the compiler from moving the
1094 * setting of umem->odp_data->private to point to our
1095 * MR, before reg_umr finished, to ensure that the MR
1096 * initialization have finished before starting to
1097 * handle invalidations.
1098 */
1099 smp_wmb();
1100 mr->umem->odp_data->private = mr;
1101 /*
1102 * Make sure we will see the new
1103 * umem->odp_data->private value in the invalidation
1104 * routines, before we can get page faults on the
1105 * MR. Page faults can happen once we put the MR in
1106 * the tree, below this line. Without the barrier,
1107 * there can be a fault handling and an invalidation
1108 * before umem->odp_data->private == mr is visible to
1109 * the invalidation handler.
1110 */
1111 smp_wmb();
1112 }
1113#endif
1114
Eli Cohene126ba92013-07-07 17:25:49 +03001115 return &mr->ibmr;
1116
1117error:
1118 ib_umem_release(umem);
1119 return ERR_PTR(err);
1120}
1121
1122static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1123{
1124 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +03001125 struct mlx5_ib_umr_context umr_context;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001126 struct mlx5_umr_wr umrwr;
1127 struct ib_send_wr *bad;
Eli Cohene126ba92013-07-07 17:25:49 +03001128 int err;
1129
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001130 memset(&umrwr.wr, 0, sizeof(umrwr));
1131 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
1132 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001133
Shachar Raindela74d2412014-05-22 14:50:12 +03001134 mlx5_ib_init_umr_context(&umr_context);
Eli Cohene126ba92013-07-07 17:25:49 +03001135 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001136 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
Eli Cohene126ba92013-07-07 17:25:49 +03001137 if (err) {
1138 up(&umrc->sem);
1139 mlx5_ib_dbg(dev, "err %d\n", err);
1140 goto error;
Shachar Raindela74d2412014-05-22 14:50:12 +03001141 } else {
1142 wait_for_completion(&umr_context.done);
1143 up(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +03001144 }
Shachar Raindela74d2412014-05-22 14:50:12 +03001145 if (umr_context.status != IB_WC_SUCCESS) {
Eli Cohene126ba92013-07-07 17:25:49 +03001146 mlx5_ib_warn(dev, "unreg umr failed\n");
1147 err = -EFAULT;
1148 goto error;
1149 }
1150 return 0;
1151
1152error:
1153 return err;
1154}
1155
Haggai Eran6aec21f2014-12-11 17:04:23 +02001156static int clean_mr(struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001157{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001158 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
Eli Cohene126ba92013-07-07 17:25:49 +03001159 int umred = mr->umred;
1160 int err;
1161
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001162 if (mr->sig) {
1163 if (mlx5_core_destroy_psv(dev->mdev,
1164 mr->sig->psv_memory.psv_idx))
1165 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1166 mr->sig->psv_memory.psv_idx);
1167 if (mlx5_core_destroy_psv(dev->mdev,
1168 mr->sig->psv_wire.psv_idx))
1169 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1170 mr->sig->psv_wire.psv_idx);
1171 kfree(mr->sig);
1172 mr->sig = NULL;
1173 }
1174
Eli Cohene126ba92013-07-07 17:25:49 +03001175 if (!umred) {
Haggai Eranb4cfe442014-12-11 17:04:26 +02001176 err = destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001177 if (err) {
1178 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1179 mr->mmr.key, err);
1180 return err;
1181 }
1182 } else {
1183 err = unreg_umr(dev, mr);
1184 if (err) {
1185 mlx5_ib_warn(dev, "failed unregister\n");
1186 return err;
1187 }
1188 free_cached_mr(dev, mr);
1189 }
1190
Eli Cohene126ba92013-07-07 17:25:49 +03001191 if (!umred)
1192 kfree(mr);
1193
1194 return 0;
1195}
1196
Haggai Eran6aec21f2014-12-11 17:04:23 +02001197int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1198{
1199 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1200 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1201 int npages = mr->npages;
1202 struct ib_umem *umem = mr->umem;
1203
1204#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001205 if (umem && umem->odp_data) {
1206 /* Prevent new page faults from succeeding */
1207 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001208 /* Wait for all running page-fault handlers to finish. */
1209 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001210 /* Destroy all page mappings */
1211 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1212 ib_umem_end(umem));
1213 /*
1214 * We kill the umem before the MR for ODP,
1215 * so that there will not be any invalidations in
1216 * flight, looking at the *mr struct.
1217 */
1218 ib_umem_release(umem);
1219 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1220
1221 /* Avoid double-freeing the umem. */
1222 umem = NULL;
1223 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001224#endif
1225
1226 clean_mr(mr);
1227
1228 if (umem) {
1229 ib_umem_release(umem);
1230 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1231 }
1232
1233 return 0;
1234}
1235
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001236struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1237 enum ib_mr_type mr_type,
1238 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001239{
1240 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1241 struct mlx5_create_mkey_mbox_in *in;
1242 struct mlx5_ib_mr *mr;
1243 int access_mode, err;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001244 int ndescs = roundup(max_num_sg, 4);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001245
1246 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1247 if (!mr)
1248 return ERR_PTR(-ENOMEM);
1249
1250 in = kzalloc(sizeof(*in), GFP_KERNEL);
1251 if (!in) {
1252 err = -ENOMEM;
1253 goto err_free;
1254 }
1255
Haggai Eran968e78d2014-12-11 17:04:11 +02001256 in->seg.status = MLX5_MKEY_STATUS_FREE;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001257 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1258 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1259 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001260
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001261 if (mr_type == IB_MR_TYPE_MEM_REG) {
1262 access_mode = MLX5_ACCESS_MODE_MTT;
1263 in->seg.log2_page_size = PAGE_SHIFT;
1264 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001265 u32 psv_index[2];
1266
1267 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1268 MLX5_MKEY_BSF_EN);
1269 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1270 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1271 if (!mr->sig) {
1272 err = -ENOMEM;
1273 goto err_free_in;
1274 }
1275
1276 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001277 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001278 2, psv_index);
1279 if (err)
1280 goto err_free_sig;
1281
1282 access_mode = MLX5_ACCESS_MODE_KLM;
1283 mr->sig->psv_memory.psv_idx = psv_index[0];
1284 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001285
1286 mr->sig->sig_status_checked = true;
1287 mr->sig->sig_err_exists = false;
1288 /* Next UMR, Arm SIGERR */
1289 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001290 } else {
1291 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1292 err = -EINVAL;
1293 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001294 }
1295
1296 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
Jack Morgenstein9603b612014-07-28 23:30:22 +03001297 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001298 NULL, NULL, NULL);
1299 if (err)
1300 goto err_destroy_psv;
1301
1302 mr->ibmr.lkey = mr->mmr.key;
1303 mr->ibmr.rkey = mr->mmr.key;
1304 mr->umem = NULL;
1305 kfree(in);
1306
1307 return &mr->ibmr;
1308
1309err_destroy_psv:
1310 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001311 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001312 mr->sig->psv_memory.psv_idx))
1313 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1314 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001315 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001316 mr->sig->psv_wire.psv_idx))
1317 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1318 mr->sig->psv_wire.psv_idx);
1319 }
1320err_free_sig:
1321 kfree(mr->sig);
1322err_free_in:
1323 kfree(in);
1324err_free:
1325 kfree(mr);
1326 return ERR_PTR(err);
1327}
1328
Eli Cohene126ba92013-07-07 17:25:49 +03001329struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1330 int page_list_len)
1331{
1332 struct mlx5_ib_fast_reg_page_list *mfrpl;
1333 int size = page_list_len * sizeof(u64);
1334
1335 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1336 if (!mfrpl)
1337 return ERR_PTR(-ENOMEM);
1338
1339 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1340 if (!mfrpl->ibfrpl.page_list)
1341 goto err_free;
1342
1343 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1344 size, &mfrpl->map,
1345 GFP_KERNEL);
1346 if (!mfrpl->mapped_page_list)
1347 goto err_free;
1348
1349 WARN_ON(mfrpl->map & 0x3f);
1350
1351 return &mfrpl->ibfrpl;
1352
1353err_free:
1354 kfree(mfrpl->ibfrpl.page_list);
1355 kfree(mfrpl);
1356 return ERR_PTR(-ENOMEM);
1357}
1358
1359void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1360{
1361 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1362 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1363 int size = page_list->max_page_list_len * sizeof(u64);
1364
Jack Morgenstein9603b612014-07-28 23:30:22 +03001365 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
Eli Cohene126ba92013-07-07 17:25:49 +03001366 mfrpl->map);
1367 kfree(mfrpl->ibfrpl.page_list);
1368 kfree(mfrpl);
1369}
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001370
1371int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1372 struct ib_mr_status *mr_status)
1373{
1374 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1375 int ret = 0;
1376
1377 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1378 pr_err("Invalid status check mask\n");
1379 ret = -EINVAL;
1380 goto done;
1381 }
1382
1383 mr_status->fail_status = 0;
1384 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1385 if (!mmr->sig) {
1386 ret = -EINVAL;
1387 pr_err("signature status check requested on a non-signature enabled MR\n");
1388 goto done;
1389 }
1390
1391 mmr->sig->sig_status_checked = true;
1392 if (!mmr->sig->sig_err_exists)
1393 goto done;
1394
1395 if (ibmr->lkey == mmr->sig->err_item.key)
1396 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1397 sizeof(mr_status->sig_err));
1398 else {
1399 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1400 mr_status->sig_err.sig_err_offset = 0;
1401 mr_status->sig_err.key = mmr->sig->err_item.key;
1402 }
1403
1404 mmr->sig->sig_err_exists = false;
1405 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1406 }
1407
1408done:
1409 return ret;
1410}