blob: 3a27bddfcf31f57de9707c3160719b597c74cc76 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Leon Romanovskyeeea6952018-03-13 15:29:28 +020050static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030052static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020053static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Majd Dibbinyc8d75a92018-03-22 15:34:04 +020054
55static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
56{
57 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
58}
59
Haggai Eranb4cfe442014-12-11 17:04:26 +020060static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
61{
Matan Baraka606b0f2016-02-29 18:05:28 +020062 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020063
Leon Romanovsky13859d5d2019-01-08 16:07:26 +020064 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
65 /* Wait until all page fault handlers using the mr complete. */
66 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +020067
68 return err;
69}
70
Eli Cohene126ba92013-07-07 17:25:49 +030071static int order2idx(struct mlx5_ib_dev *dev, int order)
72{
73 struct mlx5_mr_cache *cache = &dev->cache;
74
75 if (order < cache->ent[0].order)
76 return 0;
77 else
78 return order - cache->ent[0].order;
79}
80
Noa Osherovich56e11d62016-02-29 16:46:51 +020081static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
82{
83 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
84 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
85}
86
Jason Gunthorpee3554772019-01-18 16:33:10 -080087static void reg_mr_callback(int status, struct mlx5_async_work *context)
Eli Cohen746b5582013-10-23 09:53:14 +030088{
Jason Gunthorpee3554772019-01-18 16:33:10 -080089 struct mlx5_ib_mr *mr =
90 container_of(context, struct mlx5_ib_mr, cb_work);
Eli Cohen746b5582013-10-23 09:53:14 +030091 struct mlx5_ib_dev *dev = mr->dev;
92 struct mlx5_mr_cache *cache = &dev->cache;
93 int c = order2idx(dev, mr->order);
94 struct mlx5_cache_ent *ent = &cache->ent[c];
95 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +030096 unsigned long flags;
Matthew Wilcox792c4e92019-06-20 07:03:47 +000097 struct xarray *mkeys = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +030098 int err;
Eli Cohen746b5582013-10-23 09:53:14 +030099
Eli Cohen746b5582013-10-23 09:53:14 +0300100 spin_lock_irqsave(&ent->lock, flags);
101 ent->pending--;
102 spin_unlock_irqrestore(&ent->lock, flags);
103 if (status) {
104 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
105 kfree(mr);
106 dev->fill_delay = 1;
107 mod_timer(&dev->delay_timer, jiffies + HZ);
108 return;
109 }
110
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200111 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300112 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
113 key = dev->mdev->priv.mkey_key++;
114 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300115 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300116
117 cache->last_add = jiffies;
118
119 spin_lock_irqsave(&ent->lock, flags);
120 list_add_tail(&mr->list, &ent->head);
121 ent->cur++;
122 ent->size++;
123 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300124
Matthew Wilcox792c4e92019-06-20 07:03:47 +0000125 xa_lock_irqsave(mkeys, flags);
126 err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
127 &mr->mmkey, GFP_ATOMIC));
128 xa_unlock_irqrestore(mkeys, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300129 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200130 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200131
132 if (!completion_done(&ent->compl))
133 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300134}
135
Eli Cohene126ba92013-07-07 17:25:49 +0300136static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
137{
Eli Cohene126ba92013-07-07 17:25:49 +0300138 struct mlx5_mr_cache *cache = &dev->cache;
139 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300140 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300141 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300142 void *mkc;
143 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300144 int err = 0;
145 int i;
146
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300147 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300148 if (!in)
149 return -ENOMEM;
150
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300151 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300152 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300153 if (ent->pending >= MAX_PENDING_REG_MR) {
154 err = -EAGAIN;
155 break;
156 }
157
Eli Cohene126ba92013-07-07 17:25:49 +0300158 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
159 if (!mr) {
160 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300161 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300162 }
163 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300164 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300165 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300166
167 MLX5_SET(mkc, mkc, free, 1);
168 MLX5_SET(mkc, mkc, umr_en, 1);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +0300169 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
170 MLX5_SET(mkc, mkc, access_mode_4_2,
171 (ent->access_mode >> 2) & 0x7);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300172
173 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200174 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
175 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300176
Eli Cohen746b5582013-10-23 09:53:14 +0300177 spin_lock_irq(&ent->lock);
178 ent->pending++;
179 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300180 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
Jason Gunthorpee3554772019-01-18 16:33:10 -0800181 &dev->async_ctx, in, inlen,
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300182 mr->out, sizeof(mr->out),
Jason Gunthorpee3554772019-01-18 16:33:10 -0800183 reg_mr_callback, &mr->cb_work);
Eli Cohene126ba92013-07-07 17:25:49 +0300184 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200185 spin_lock_irq(&ent->lock);
186 ent->pending--;
187 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300188 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300189 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300190 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300191 }
Eli Cohene126ba92013-07-07 17:25:49 +0300192 }
193
Eli Cohene126ba92013-07-07 17:25:49 +0300194 kfree(in);
195 return err;
196}
197
198static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
199{
Eli Cohene126ba92013-07-07 17:25:49 +0300200 struct mlx5_mr_cache *cache = &dev->cache;
201 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200202 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300203 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200204 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300205 int i;
206
207 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300208 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300210 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200211 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
213 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200214 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300215 ent->cur--;
216 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300217 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200218 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
219 }
220
Leon Romanovsky13859d5d2019-01-08 16:07:26 +0200221 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
222 synchronize_srcu(&dev->mr_srcu);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200223
224 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
225 list_del(&mr->list);
226 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300227 }
228}
229
230static ssize_t size_write(struct file *filp, const char __user *buf,
231 size_t count, loff_t *pos)
232{
233 struct mlx5_cache_ent *ent = filp->private_data;
234 struct mlx5_ib_dev *dev = ent->dev;
Jann Horn60e66272018-07-06 22:48:03 +0200235 char lbuf[20] = {0};
Eli Cohene126ba92013-07-07 17:25:49 +0300236 u32 var;
237 int err;
238 int c;
239
Jann Horn60e66272018-07-06 22:48:03 +0200240 count = min(count, sizeof(lbuf) - 1);
241 if (copy_from_user(lbuf, buf, count))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300242 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300243
244 c = order2idx(dev, ent->order);
Eli Cohene126ba92013-07-07 17:25:49 +0300245
246 if (sscanf(lbuf, "%u", &var) != 1)
247 return -EINVAL;
248
249 if (var < ent->limit)
250 return -EINVAL;
251
252 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300253 do {
254 err = add_keys(dev, c, var - ent->size);
255 if (err && err != -EAGAIN)
256 return err;
257
258 usleep_range(3000, 5000);
259 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300260 } else if (var < ent->size) {
261 remove_keys(dev, c, ent->size - var);
262 }
263
264 return count;
265}
266
267static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
268 loff_t *pos)
269{
270 struct mlx5_cache_ent *ent = filp->private_data;
271 char lbuf[20];
272 int err;
273
Eli Cohene126ba92013-07-07 17:25:49 +0300274 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
275 if (err < 0)
276 return err;
277
Jann Horn60e66272018-07-06 22:48:03 +0200278 return simple_read_from_buffer(buf, count, pos, lbuf, err);
Eli Cohene126ba92013-07-07 17:25:49 +0300279}
280
281static const struct file_operations size_fops = {
282 .owner = THIS_MODULE,
283 .open = simple_open,
284 .write = size_write,
285 .read = size_read,
286};
287
288static ssize_t limit_write(struct file *filp, const char __user *buf,
289 size_t count, loff_t *pos)
290{
291 struct mlx5_cache_ent *ent = filp->private_data;
292 struct mlx5_ib_dev *dev = ent->dev;
Jann Horn60e66272018-07-06 22:48:03 +0200293 char lbuf[20] = {0};
Eli Cohene126ba92013-07-07 17:25:49 +0300294 u32 var;
295 int err;
296 int c;
297
Jann Horn60e66272018-07-06 22:48:03 +0200298 count = min(count, sizeof(lbuf) - 1);
299 if (copy_from_user(lbuf, buf, count))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300300 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300301
302 c = order2idx(dev, ent->order);
Eli Cohene126ba92013-07-07 17:25:49 +0300303
304 if (sscanf(lbuf, "%u", &var) != 1)
305 return -EINVAL;
306
307 if (var > ent->size)
308 return -EINVAL;
309
310 ent->limit = var;
311
312 if (ent->cur < ent->limit) {
313 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
314 if (err)
315 return err;
316 }
317
318 return count;
319}
320
321static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
322 loff_t *pos)
323{
324 struct mlx5_cache_ent *ent = filp->private_data;
325 char lbuf[20];
326 int err;
327
Eli Cohene126ba92013-07-07 17:25:49 +0300328 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
329 if (err < 0)
330 return err;
331
Jann Horn60e66272018-07-06 22:48:03 +0200332 return simple_read_from_buffer(buf, count, pos, lbuf, err);
Eli Cohene126ba92013-07-07 17:25:49 +0300333}
334
335static const struct file_operations limit_fops = {
336 .owner = THIS_MODULE,
337 .open = simple_open,
338 .write = limit_write,
339 .read = limit_read,
340};
341
342static int someone_adding(struct mlx5_mr_cache *cache)
343{
344 int i;
345
346 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
347 if (cache->ent[i].cur < cache->ent[i].limit)
348 return 1;
349 }
350
351 return 0;
352}
353
354static void __cache_work_func(struct mlx5_cache_ent *ent)
355{
356 struct mlx5_ib_dev *dev = ent->dev;
357 struct mlx5_mr_cache *cache = &dev->cache;
358 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300359 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300360
361 if (cache->stopped)
362 return;
363
364 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300365 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
366 err = add_keys(dev, i, 1);
367 if (ent->cur < 2 * ent->limit) {
368 if (err == -EAGAIN) {
369 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
370 i + 2);
371 queue_delayed_work(cache->wq, &ent->dwork,
372 msecs_to_jiffies(3));
373 } else if (err) {
374 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
375 i + 2, err);
376 queue_delayed_work(cache->wq, &ent->dwork,
377 msecs_to_jiffies(1000));
378 } else {
379 queue_work(cache->wq, &ent->work);
380 }
381 }
Eli Cohene126ba92013-07-07 17:25:49 +0300382 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300383 /*
384 * The remove_keys() logic is performed as garbage collection
385 * task. Such task is intended to be run when no other active
386 * processes are running.
387 *
388 * The need_resched() will return TRUE if there are user tasks
389 * to be activated in near future.
390 *
391 * In such case, we don't execute remove_keys() and postpone
392 * the garbage collection work to try to run in next cycle,
393 * in order to free CPU resources to other tasks.
394 */
395 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300396 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300397 remove_keys(dev, i, 1);
398 if (ent->cur > ent->limit)
399 queue_work(cache->wq, &ent->work);
400 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300401 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300402 }
403 }
404}
405
406static void delayed_cache_work_func(struct work_struct *work)
407{
408 struct mlx5_cache_ent *ent;
409
410 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
411 __cache_work_func(ent);
412}
413
414static void cache_work_func(struct work_struct *work)
415{
416 struct mlx5_cache_ent *ent;
417
418 ent = container_of(work, struct mlx5_cache_ent, work);
419 __cache_work_func(ent);
420}
421
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200422struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
423{
424 struct mlx5_mr_cache *cache = &dev->cache;
425 struct mlx5_cache_ent *ent;
426 struct mlx5_ib_mr *mr;
427 int err;
428
429 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
430 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
431 return NULL;
432 }
433
434 ent = &cache->ent[entry];
435 while (1) {
436 spin_lock_irq(&ent->lock);
437 if (list_empty(&ent->head)) {
438 spin_unlock_irq(&ent->lock);
439
440 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200441 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200442 return ERR_PTR(err);
443
444 wait_for_completion(&ent->compl);
445 } else {
446 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
447 list);
448 list_del(&mr->list);
449 ent->cur--;
450 spin_unlock_irq(&ent->lock);
451 if (ent->cur < ent->limit)
452 queue_work(cache->wq, &ent->work);
453 return mr;
454 }
455 }
456}
457
Eli Cohene126ba92013-07-07 17:25:49 +0300458static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
459{
460 struct mlx5_mr_cache *cache = &dev->cache;
461 struct mlx5_ib_mr *mr = NULL;
462 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300463 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300464 int c;
465 int i;
466
467 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300468 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300469 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300470 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
471 return NULL;
472 }
473
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300474 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300475 ent = &cache->ent[i];
476
477 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
478
Eli Cohen746b5582013-10-23 09:53:14 +0300479 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300480 if (!list_empty(&ent->head)) {
481 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
482 list);
483 list_del(&mr->list);
484 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300485 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300486 if (ent->cur < ent->limit)
487 queue_work(cache->wq, &ent->work);
488 break;
489 }
Eli Cohen746b5582013-10-23 09:53:14 +0300490 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300491
492 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300493 }
494
495 if (!mr)
496 cache->ent[c].miss++;
497
498 return mr;
499}
500
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200501void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300502{
503 struct mlx5_mr_cache *cache = &dev->cache;
504 struct mlx5_cache_ent *ent;
505 int shrink = 0;
506 int c;
507
Valentine Fatievdd9a4032018-10-10 09:56:25 +0300508 if (!mr->allocated_from_cache)
509 return;
510
Eli Cohene126ba92013-07-07 17:25:49 +0300511 c = order2idx(dev, mr->order);
Yishai Hadasafd14172019-07-23 09:57:26 +0300512 WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
513
514 if (unreg_umr(dev, mr)) {
515 mr->allocated_from_cache = false;
516 destroy_mkey(dev, mr);
517 ent = &cache->ent[c];
518 if (ent->cur < ent->limit)
519 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300520 return;
521 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200522
Eli Cohene126ba92013-07-07 17:25:49 +0300523 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300524 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300525 list_add_tail(&mr->list, &ent->head);
526 ent->cur++;
527 if (ent->cur > 2 * ent->limit)
528 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300529 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300530
531 if (shrink)
532 queue_work(cache->wq, &ent->work);
533}
534
535static void clean_keys(struct mlx5_ib_dev *dev, int c)
536{
Eli Cohene126ba92013-07-07 17:25:49 +0300537 struct mlx5_mr_cache *cache = &dev->cache;
538 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200539 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300540 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200541 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300542
Moshe Lazer3c461912013-09-11 16:35:23 +0300543 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300544 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300545 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300546 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300547 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200548 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300549 }
550 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200551 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300552 ent->cur--;
553 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300554 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200555 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
556 }
557
558#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Leon Romanovskyccffa542018-12-26 15:22:12 +0200559 synchronize_srcu(&dev->mr_srcu);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200560#endif
561
562 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
563 list_del(&mr->list);
564 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300565 }
566}
567
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300568static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
569{
Mark Bloch6a4d00b2019-03-28 15:27:37 +0200570 if (!mlx5_debugfs_root || dev->is_rep)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300571 return;
572
573 debugfs_remove_recursive(dev->cache.root);
574 dev->cache.root = NULL;
575}
576
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +0100577static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300578{
579 struct mlx5_mr_cache *cache = &dev->cache;
580 struct mlx5_cache_ent *ent;
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +0100581 struct dentry *dir;
Eli Cohene126ba92013-07-07 17:25:49 +0300582 int i;
583
Mark Bloch6a4d00b2019-03-28 15:27:37 +0200584 if (!mlx5_debugfs_root || dev->is_rep)
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +0100585 return;
Eli Cohene126ba92013-07-07 17:25:49 +0300586
Jack Morgenstein9603b612014-07-28 23:30:22 +0300587 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300588
589 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
590 ent = &cache->ent[i];
591 sprintf(ent->name, "%d", ent->order);
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +0100592 dir = debugfs_create_dir(ent->name, cache->root);
593 debugfs_create_file("size", 0600, dir, ent, &size_fops);
594 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
595 debugfs_create_u32("cur", 0400, dir, &ent->cur);
596 debugfs_create_u32("miss", 0600, dir, &ent->miss);
Eli Cohene126ba92013-07-07 17:25:49 +0300597 }
Eli Cohene126ba92013-07-07 17:25:49 +0300598}
599
Kees Cooke99e88a2017-10-16 14:43:17 -0700600static void delay_time_func(struct timer_list *t)
Eli Cohen746b5582013-10-23 09:53:14 +0300601{
Kees Cooke99e88a2017-10-16 14:43:17 -0700602 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
Eli Cohen746b5582013-10-23 09:53:14 +0300603
604 dev->fill_delay = 0;
605}
606
Eli Cohene126ba92013-07-07 17:25:49 +0300607int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
608{
609 struct mlx5_mr_cache *cache = &dev->cache;
610 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300611 int i;
612
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300613 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530614 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300615 if (!cache->wq) {
616 mlx5_ib_warn(dev, "failed to create work queue\n");
617 return -ENOMEM;
618 }
619
Jason Gunthorpee3554772019-01-18 16:33:10 -0800620 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
Kees Cooke99e88a2017-10-16 14:43:17 -0700621 timer_setup(&dev->delay_timer, delay_time_func, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300622 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300623 ent = &cache->ent[i];
624 INIT_LIST_HEAD(&ent->head);
625 spin_lock_init(&ent->lock);
626 ent->order = i + 2;
627 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200628 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300629
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200630 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300631 INIT_WORK(&ent->work, cache_work_func);
632 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200633
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300634 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200635 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200636 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200637 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200638
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300639 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200640 continue;
641
642 ent->page = PAGE_SHIFT;
643 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
644 MLX5_IB_UMR_OCTOWORD;
645 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
646 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
Mark Bloch6a4d00b2019-03-28 15:27:37 +0200647 !dev->is_rep &&
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200648 mlx5_core_is_pf(dev->mdev))
649 ent->limit = dev->mdev->profile->mr_cache[i].limit;
650 else
651 ent->limit = 0;
Artemy Kovalyov013c2402018-10-15 14:13:35 +0300652 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300653 }
654
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +0100655 mlx5_mr_cache_debugfs_init(dev);
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300656
Eli Cohene126ba92013-07-07 17:25:49 +0300657 return 0;
658}
659
660int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
661{
662 int i;
663
Mark Bloch32927e22018-03-20 15:45:37 +0200664 if (!dev->cache.wq)
665 return 0;
666
Eli Cohene126ba92013-07-07 17:25:49 +0300667 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300668 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300669
670 mlx5_mr_cache_debugfs_cleanup(dev);
Jason Gunthorpee3554772019-01-18 16:33:10 -0800671 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
Eli Cohene126ba92013-07-07 17:25:49 +0300672
673 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
674 clean_keys(dev, i);
675
Moshe Lazer3c461912013-09-11 16:35:23 +0300676 destroy_workqueue(dev->cache.wq);
Eli Cohen746b5582013-10-23 09:53:14 +0300677 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300678
Eli Cohene126ba92013-07-07 17:25:49 +0300679 return 0;
680}
681
682struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
683{
684 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300685 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300686 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300687 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300688 void *mkc;
689 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300690 int err;
691
692 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
693 if (!mr)
694 return ERR_PTR(-ENOMEM);
695
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300696 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300697 if (!in) {
698 err = -ENOMEM;
699 goto err_free;
700 }
701
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300702 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300703
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +0300704 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300705 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
706 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
707 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
708 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
709 MLX5_SET(mkc, mkc, lr, 1);
710
711 MLX5_SET(mkc, mkc, length64, 1);
712 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
713 MLX5_SET(mkc, mkc, qpn, 0xffffff);
714 MLX5_SET64(mkc, mkc, start_addr, 0);
715
716 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300717 if (err)
718 goto err_in;
719
720 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200721 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200722 mr->ibmr.lkey = mr->mmkey.key;
723 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300724 mr->umem = NULL;
725
726 return &mr->ibmr;
727
728err_in:
729 kfree(in);
730
731err_free:
732 kfree(mr);
733
734 return ERR_PTR(err);
735}
736
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300737static int get_octo_len(u64 addr, u64 len, int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300738{
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300739 u64 page_size = 1ULL << page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300740 u64 offset;
741 int npages;
742
743 offset = addr & (page_size - 1);
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300744 npages = ALIGN(len + offset, page_size) >> page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300745 return (npages + 1) / 2;
746}
747
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300748static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300749{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200750 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300751 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300752 return MLX5_MAX_UMR_SHIFT;
753}
754
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200755static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
756 u64 start, u64 length, int access_flags,
757 struct ib_umem **umem, int *npages, int *page_shift,
758 int *ncont, int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200759{
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300760 struct ib_umem *u;
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200761
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300762 *umem = NULL;
763
Jason Gunthorpe261dc532019-08-19 14:17:04 +0300764 if (access_flags & IB_ACCESS_ON_DEMAND) {
765 struct ib_umem_odp *odp;
766
767 odp = ib_umem_odp_get(udata, start, length, access_flags);
768 if (IS_ERR(odp)) {
769 mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
770 PTR_ERR(odp));
771 return PTR_ERR(odp);
772 }
773
774 u = &odp->umem;
775
776 *page_shift = odp->page_shift;
777 *ncont = ib_umem_odp_num_pages(odp);
778 *npages = *ncont << (*page_shift - PAGE_SHIFT);
779 if (order)
780 *order = ilog2(roundup_pow_of_two(*ncont));
781 } else {
782 u = ib_umem_get(udata, start, length, access_flags, 0);
783 if (IS_ERR(u)) {
784 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
785 return PTR_ERR(u);
786 }
787
788 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
789 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200790 }
791
Noa Osherovich395a8e42016-02-29 16:46:50 +0200792 if (!*npages) {
793 mlx5_ib_warn(dev, "avoid zero region\n");
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300794 ib_umem_release(u);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200795 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200796 }
797
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300798 *umem = u;
799
Noa Osherovich395a8e42016-02-29 16:46:50 +0200800 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
801 *npages, *ncont, *order, *page_shift);
802
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200803 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200804}
805
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100806static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300807{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100808 struct mlx5_ib_umr_context *context =
809 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300810
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100811 context->status = wc->status;
812 complete(&context->done);
813}
Eli Cohene126ba92013-07-07 17:25:49 +0300814
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100815static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
816{
817 context->cqe.done = mlx5_ib_umr_done;
818 context->status = -1;
819 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300820}
821
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200822static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
823 struct mlx5_umr_wr *umrwr)
824{
825 struct umr_common *umrc = &dev->umrc;
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700826 const struct ib_send_wr *bad;
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200827 int err;
828 struct mlx5_ib_umr_context umr_context;
829
830 mlx5_ib_init_umr_context(&umr_context);
831 umrwr->wr.wr_cqe = &umr_context.cqe;
832
833 down(&umrc->sem);
834 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
835 if (err) {
836 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
837 } else {
838 wait_for_completion(&umr_context.done);
839 if (umr_context.status != IB_WC_SUCCESS) {
840 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
841 umr_context.status);
842 err = -EFAULT;
843 }
844 }
845 up(&umrc->sem);
846 return err;
847}
848
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300849static struct mlx5_ib_mr *alloc_mr_from_cache(
850 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300851 u64 virt_addr, u64 len, int npages,
852 int page_shift, int order, int access_flags)
853{
854 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300855 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300856 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300857 int i;
858
Eli Cohen746b5582013-10-23 09:53:14 +0300859 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300860 mr = alloc_cached_mr(dev, order);
861 if (mr)
862 break;
863
864 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300865 if (err && err != -EAGAIN) {
866 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300867 break;
868 }
869 }
870
871 if (!mr)
872 return ERR_PTR(-EAGAIN);
873
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200874 mr->ibmr.pd = pd;
875 mr->umem = umem;
876 mr->access_flags = access_flags;
877 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200878 mr->mmkey.iova = virt_addr;
879 mr->mmkey.size = len;
880 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300881
Eli Cohene126ba92013-07-07 17:25:49 +0300882 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300883}
884
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200885static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
886 void *xlt, int page_shift, size_t size,
887 int flags)
888{
889 struct mlx5_ib_dev *dev = mr->dev;
890 struct ib_umem *umem = mr->umem;
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200891
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200892 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200893 if (!umr_can_use_indirect_mkey(dev))
894 return -EPERM;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200895 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
896 return npages;
897 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200898
899 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
900
901 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
902 __mlx5_ib_populate_pas(dev, umem, page_shift,
903 idx, npages, xlt,
904 MLX5_IB_MTT_PRESENT);
905 /* Clear padding after the pages
906 * brought from the umem.
907 */
908 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
909 size - npages * sizeof(struct mlx5_mtt));
910 }
911
912 return npages;
913}
914
915#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
916 MLX5_UMR_MTT_ALIGNMENT)
917#define MLX5_SPARE_UMR_CHUNK 0x10000
918
919int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
920 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +0200921{
922 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -0800923 struct device *ddev = dev->ib_dev.dev.parent;
Haggai Eran832a6b02014-12-11 17:04:22 +0200924 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200925 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200926 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100927 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200928 struct ib_sge sg;
929 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200930 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
931 ? sizeof(struct mlx5_klm)
932 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200933 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
934 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +0200935 size_t pages_mapped = 0;
936 size_t pages_to_map = 0;
937 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200938 gfp_t gfp;
Ilya Lesokhinc44ef992018-03-13 15:18:48 +0200939 bool use_emergency_page = false;
Haggai Eran832a6b02014-12-11 17:04:22 +0200940
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200941 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
942 !umr_can_use_indirect_mkey(dev))
943 return -EPERM;
Haggai Eran832a6b02014-12-11 17:04:22 +0200944
945 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200946 * so we need to align the offset and length accordingly
947 */
948 if (idx & page_mask) {
949 npages += idx & page_mask;
950 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +0200951 }
952
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200953 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
954 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +0200955
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200956 pages_to_map = ALIGN(npages, page_align);
957 size = desc_size * pages_to_map;
958 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +0200959
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200960 xlt = (void *)__get_free_pages(gfp, get_order(size));
961 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
962 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
963 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
964
965 size = MLX5_SPARE_UMR_CHUNK;
966 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +0200967 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200968
969 if (!xlt) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200970 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
Ilya Lesokhinc44ef992018-03-13 15:18:48 +0200971 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200972 size = PAGE_SIZE;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200973 memset(xlt, 0, size);
Ilya Lesokhinc44ef992018-03-13 15:18:48 +0200974 use_emergency_page = true;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200975 }
976 pages_iter = size / desc_size;
977 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +0200978 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200979 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +0200980 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200981 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200982 }
983
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200984 sg.addr = dma;
985 sg.lkey = dev->umrc.pd->local_dma_lkey;
986
987 memset(&wr, 0, sizeof(wr));
988 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
989 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
990 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
991 wr.wr.sg_list = &sg;
992 wr.wr.num_sge = 1;
993 wr.wr.opcode = MLX5_IB_WR_UMR;
994
995 wr.pd = mr->ibmr.pd;
996 wr.mkey = mr->mmkey.key;
997 wr.length = mr->mmkey.size;
998 wr.virt_addr = mr->mmkey.iova;
999 wr.access_flags = mr->access_flags;
1000 wr.page_shift = page_shift;
1001
Haggai Eran832a6b02014-12-11 17:04:22 +02001002 for (pages_mapped = 0;
1003 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001004 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001005 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001006 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001007 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001008 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001009
1010 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1011
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001012 sg.length = ALIGN(npages * desc_size,
1013 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001014
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001015 if (pages_mapped + pages_iter >= pages_to_map) {
1016 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1017 wr.wr.send_flags |=
1018 MLX5_IB_SEND_UMR_ENABLE_MR |
1019 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1020 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1021 if (flags & MLX5_IB_UPD_XLT_PD ||
1022 flags & MLX5_IB_UPD_XLT_ACCESS)
1023 wr.wr.send_flags |=
1024 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1025 if (flags & MLX5_IB_UPD_XLT_ADDR)
1026 wr.wr.send_flags |=
1027 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1028 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001029
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001030 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001031 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001032
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001033 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001034 }
1035 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1036
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001037free_xlt:
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001038 if (use_emergency_page)
1039 mlx5_ib_put_xlt_emergency_page();
Haggai Eran832a6b02014-12-11 17:04:22 +02001040 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001041 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001042
1043 return err;
1044}
Haggai Eran832a6b02014-12-11 17:04:22 +02001045
Noa Osherovich395a8e42016-02-29 16:46:50 +02001046/*
1047 * If ibmr is NULL it will be allocated by reg_create.
1048 * Else, the given ibmr will be used.
1049 */
1050static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1051 u64 virt_addr, u64 length,
1052 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001053 int page_shift, int access_flags,
1054 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001055{
1056 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001057 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001058 __be64 *pas;
1059 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001060 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001061 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001062 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001063 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001064
Noa Osherovich395a8e42016-02-29 16:46:50 +02001065 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001066 if (!mr)
1067 return ERR_PTR(-ENOMEM);
1068
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001069 mr->ibmr.pd = pd;
1070 mr->access_flags = access_flags;
1071
1072 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1073 if (populate)
1074 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001075 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001076 if (!in) {
1077 err = -ENOMEM;
1078 goto err_1;
1079 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001080 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001081 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001082 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1083 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001084
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001085 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001086 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001087 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1088
1089 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001090 MLX5_SET(mkc, mkc, free, !populate);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001091 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001092 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1093 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1094 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1095 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1096 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001097 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001098
1099 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1100 MLX5_SET64(mkc, mkc, len, length);
1101 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1102 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1103 MLX5_SET(mkc, mkc, translations_octword_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001104 get_octo_len(virt_addr, length, page_shift));
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001105 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1106 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001107 if (populate) {
1108 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001109 get_octo_len(virt_addr, length, page_shift));
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001110 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001111
1112 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001113 if (err) {
1114 mlx5_ib_warn(dev, "create mkey failed\n");
1115 goto err_2;
1116 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001117 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001118 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001119 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001120 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001121
Matan Baraka606b0f2016-02-29 18:05:28 +02001122 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001123
1124 return mr;
1125
1126err_2:
Al Viro479163f2014-11-20 08:13:57 +00001127 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001128
1129err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001130 if (!ibmr)
1131 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001132
1133 return ERR_PTR(err);
1134}
1135
Gal Pressmanac2f7e62018-12-18 17:57:32 +02001136static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001137 int npages, u64 length, int access_flags)
1138{
1139 mr->npages = npages;
1140 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001141 mr->ibmr.lkey = mr->mmkey.key;
1142 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001143 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001144 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001145}
1146
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001147static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1148 u64 length, int acc, int mode)
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001149{
1150 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1151 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1152 struct mlx5_core_dev *mdev = dev->mdev;
1153 struct mlx5_ib_mr *mr;
1154 void *mkc;
1155 u32 *in;
1156 int err;
1157
1158 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1159 if (!mr)
1160 return ERR_PTR(-ENOMEM);
1161
1162 in = kzalloc(inlen, GFP_KERNEL);
1163 if (!in) {
1164 err = -ENOMEM;
1165 goto err_free;
1166 }
1167
1168 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1169
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001170 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1171 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001172 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1173 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1174 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1175 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1176 MLX5_SET(mkc, mkc, lr, 1);
1177
1178 MLX5_SET64(mkc, mkc, len, length);
1179 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1180 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001181 MLX5_SET64(mkc, mkc, start_addr, start_addr);
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001182
1183 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1184 if (err)
1185 goto err_in;
1186
1187 kfree(in);
1188
1189 mr->umem = NULL;
Gal Pressmanac2f7e62018-12-18 17:57:32 +02001190 set_mr_fields(dev, mr, 0, length, acc);
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001191
1192 return &mr->ibmr;
1193
1194err_in:
1195 kfree(in);
1196
1197err_free:
1198 kfree(mr);
1199
1200 return ERR_PTR(err);
1201}
1202
Moni Shoua813e90b2018-12-11 13:37:53 +02001203int mlx5_ib_advise_mr(struct ib_pd *pd,
1204 enum ib_uverbs_advise_mr_advice advice,
1205 u32 flags,
1206 struct ib_sge *sg_list,
1207 u32 num_sge,
1208 struct uverbs_attr_bundle *attrs)
1209{
1210 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1211 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE)
1212 return -EOPNOTSUPP;
1213
1214 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1215 sg_list, num_sge);
1216}
1217
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001218struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1219 struct ib_dm_mr_attr *attr,
1220 struct uverbs_attr_bundle *attrs)
1221{
1222 struct mlx5_ib_dm *mdm = to_mdm(dm);
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001223 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1224 u64 start_addr = mdm->dev_addr + attr->offset;
1225 int mode;
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001226
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001227 switch (mdm->type) {
1228 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1229 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1230 return ERR_PTR(-EINVAL);
1231
1232 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1233 start_addr -= pci_resource_start(dev->pdev, 0);
1234 break;
Ariel Levkovich25c13322019-05-05 17:07:13 +03001235 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1236 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1237 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1238 return ERR_PTR(-EINVAL);
1239
1240 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1241 break;
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001242 default:
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001243 return ERR_PTR(-EINVAL);
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001244 }
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001245
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001246 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1247 attr->access_flags, mode);
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001248}
1249
Eli Cohene126ba92013-07-07 17:25:49 +03001250struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1251 u64 virt_addr, int access_flags,
1252 struct ib_udata *udata)
1253{
1254 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1255 struct mlx5_ib_mr *mr = NULL;
Guy Levie5366d32019-07-31 11:19:29 +03001256 bool use_umr;
Eli Cohene126ba92013-07-07 17:25:49 +03001257 struct ib_umem *umem;
1258 int page_shift;
1259 int npages;
1260 int ncont;
1261 int order;
1262 int err;
1263
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001264 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
Leon Romanovskyea30f012018-03-13 15:29:25 +02001265 return ERR_PTR(-EOPNOTSUPP);
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001266
Eli Cohen900a6d72014-09-14 16:47:51 +03001267 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1268 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001269
Leon Romanovsky13859d5d2019-01-08 16:07:26 +02001270 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1271 length == U64_MAX) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001272 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1273 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1274 return ERR_PTR(-EINVAL);
1275
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001276 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
Leon Romanovsky42898612018-03-13 15:29:24 +02001277 if (IS_ERR(mr))
1278 return ERR_CAST(mr);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001279 return &mr->ibmr;
1280 }
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001281
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001282 err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
1283 &npages, &page_shift, &ncont, &order);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001284
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001285 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001286 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001287
Moni Shoua0e6613b2019-08-15 11:38:31 +03001288 use_umr = mlx5_ib_can_use_umr(dev, true);
Guy Levie5366d32019-07-31 11:19:29 +03001289
1290 if (order <= mr_cache_max_order(dev) && use_umr) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001291 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1292 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001293 if (PTR_ERR(mr) == -EAGAIN) {
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301294 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
Eli Cohene126ba92013-07-07 17:25:49 +03001295 mr = NULL;
1296 }
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001297 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1298 if (access_flags & IB_ACCESS_ON_DEMAND) {
1299 err = -EINVAL;
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301300 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001301 goto error;
1302 }
Guy Levie5366d32019-07-31 11:19:29 +03001303 use_umr = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001304 }
1305
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001306 if (!mr) {
1307 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001308 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Guy Levie5366d32019-07-31 11:19:29 +03001309 page_shift, access_flags, !use_umr);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001310 mutex_unlock(&dev->slow_path_mutex);
1311 }
Eli Cohene126ba92013-07-07 17:25:49 +03001312
1313 if (IS_ERR(mr)) {
1314 err = PTR_ERR(mr);
1315 goto error;
1316 }
1317
Matan Baraka606b0f2016-02-29 18:05:28 +02001318 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001319
1320 mr->umem = umem;
Gal Pressmanac2f7e62018-12-18 17:57:32 +02001321 set_mr_fields(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001322
Guy Levie5366d32019-07-31 11:19:29 +03001323 if (use_umr) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001324 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001325
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001326 if (access_flags & IB_ACCESS_ON_DEMAND)
1327 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1328
1329 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1330 update_xlt_flags);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001331
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001332 if (err) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001333 dereg_mr(dev, mr);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001334 return ERR_PTR(err);
1335 }
1336 }
1337
Jason Gunthorpeaa603812019-10-01 12:38:20 -03001338 if (is_odp_mr(mr)) {
1339 to_ib_umem_odp(mr->umem)->private = mr;
Moni Shouaa6bc3872019-02-17 16:08:22 +02001340 atomic_set(&mr->num_pending_prefetch, 0);
1341 }
Jason Gunthorpeaa603812019-10-01 12:38:20 -03001342 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1343 smp_store_release(&mr->live, 1);
Leon Romanovsky13859d5d2019-01-08 16:07:26 +02001344
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001345 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001346error:
1347 ib_umem_release(umem);
1348 return ERR_PTR(err);
1349}
1350
1351static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1352{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001353 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001354 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001355
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001356 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1357 return 0;
1358
Yishai Hadas9ec44832019-07-23 09:57:27 +03001359 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1360 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001361 umrwr.wr.opcode = MLX5_IB_WR_UMR;
Yishai Hadas9ec44832019-07-23 09:57:27 +03001362 umrwr.pd = dev->umrc.pd;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001363 umrwr.mkey = mr->mmkey.key;
Yishai Hadas6a053952019-07-23 09:57:25 +03001364 umrwr.ignore_free_state = 1;
Eli Cohene126ba92013-07-07 17:25:49 +03001365
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001366 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001367}
1368
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001369static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001370 int access_flags, int flags)
1371{
1372 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001373 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001374 int err;
1375
Noa Osherovich56e11d62016-02-29 16:46:51 +02001376 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1377
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001378 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1379 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001380
Artemy Kovalyov31616252017-01-02 11:37:42 +02001381 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001382 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001383 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001384 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001385 }
1386
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001387 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001388
Noa Osherovich56e11d62016-02-29 16:46:51 +02001389 return err;
1390}
1391
1392int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1393 u64 length, u64 virt_addr, int new_access_flags,
1394 struct ib_pd *new_pd, struct ib_udata *udata)
1395{
1396 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1397 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1398 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1399 int access_flags = flags & IB_MR_REREG_ACCESS ?
1400 new_access_flags :
1401 mr->access_flags;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001402 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001403 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001404 int npages = 0;
1405 int ncont = 0;
1406 int order = 0;
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001407 u64 addr, len;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001408 int err;
1409
1410 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1411 start, virt_addr, length, access_flags);
1412
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001413 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1414
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001415 if (!mr->umem)
1416 return -EINVAL;
1417
Jason Gunthorpe880505c2019-10-01 12:38:16 -03001418 if (is_odp_mr(mr))
1419 return -EOPNOTSUPP;
1420
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001421 if (flags & IB_MR_REREG_TRANS) {
1422 addr = virt_addr;
1423 len = length;
1424 } else {
1425 addr = mr->umem->address;
1426 len = mr->umem->length;
1427 }
1428
Noa Osherovich56e11d62016-02-29 16:46:51 +02001429 if (flags != IB_MR_REREG_PD) {
1430 /*
1431 * Replace umem. This needs to be done whether or not UMR is
1432 * used.
1433 */
1434 flags |= IB_MR_REREG_TRANS;
1435 ib_umem_release(mr->umem);
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001436 mr->umem = NULL;
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001437 err = mr_umem_get(dev, udata, addr, len, access_flags,
1438 &mr->umem, &npages, &page_shift, &ncont,
1439 &order);
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001440 if (err)
1441 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001442 }
1443
Moni Shoua25a45172019-08-15 11:38:33 +03001444 if (!mlx5_ib_can_use_umr(dev, true) ||
1445 (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001446 /*
1447 * UMR can't be used - MKey needs to be replaced.
1448 */
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001449 if (mr->allocated_from_cache)
Noa Osherovich56e11d62016-02-29 16:46:51 +02001450 err = unreg_umr(dev, mr);
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001451 else
Noa Osherovich56e11d62016-02-29 16:46:51 +02001452 err = destroy_mkey(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001453 if (err)
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001454 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001455
1456 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001457 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001458
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001459 if (IS_ERR(mr)) {
1460 err = PTR_ERR(mr);
1461 mr = to_mmr(ib_mr);
1462 goto err;
1463 }
Noa Osherovich56e11d62016-02-29 16:46:51 +02001464
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001465 mr->allocated_from_cache = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001466 } else {
1467 /*
1468 * Send a UMR WQE
1469 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001470 mr->ibmr.pd = pd;
1471 mr->access_flags = access_flags;
1472 mr->mmkey.iova = addr;
1473 mr->mmkey.size = len;
1474 mr->mmkey.pd = to_mpd(pd)->pdn;
1475
1476 if (flags & IB_MR_REREG_TRANS) {
1477 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1478 if (flags & IB_MR_REREG_PD)
1479 upd_flags |= MLX5_IB_UPD_XLT_PD;
1480 if (flags & IB_MR_REREG_ACCESS)
1481 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1482 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1483 upd_flags);
1484 } else {
1485 err = rereg_umr(pd, mr, access_flags, flags);
1486 }
1487
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001488 if (err)
1489 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001490 }
1491
Gal Pressmanac2f7e62018-12-18 17:57:32 +02001492 set_mr_fields(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001493
Noa Osherovich56e11d62016-02-29 16:46:51 +02001494 return 0;
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001495
1496err:
Leon Romanovsky836a0fb2019-06-16 15:05:20 +03001497 ib_umem_release(mr->umem);
1498 mr->umem = NULL;
1499
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001500 clean_mr(dev, mr);
1501 return err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001502}
1503
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001504static int
1505mlx5_alloc_priv_descs(struct ib_device *device,
1506 struct mlx5_ib_mr *mr,
1507 int ndescs,
1508 int desc_size)
1509{
1510 int size = ndescs * desc_size;
1511 int add_size;
1512 int ret;
1513
1514 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1515
1516 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1517 if (!mr->descs_alloc)
1518 return -ENOMEM;
1519
1520 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1521
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001522 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001523 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001524 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001525 ret = -ENOMEM;
1526 goto err;
1527 }
1528
1529 return 0;
1530err:
1531 kfree(mr->descs_alloc);
1532
1533 return ret;
1534}
1535
1536static void
1537mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1538{
1539 if (mr->descs) {
1540 struct ib_device *device = mr->ibmr.device;
1541 int size = mr->max_descs * mr->desc_size;
1542
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001543 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001544 size, DMA_TO_DEVICE);
1545 kfree(mr->descs_alloc);
1546 mr->descs = NULL;
1547 }
1548}
1549
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001550static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001551{
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001552 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001553
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001554 if (mr->sig) {
1555 if (mlx5_core_destroy_psv(dev->mdev,
1556 mr->sig->psv_memory.psv_idx))
1557 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1558 mr->sig->psv_memory.psv_idx);
1559 if (mlx5_core_destroy_psv(dev->mdev,
1560 mr->sig->psv_wire.psv_idx))
1561 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1562 mr->sig->psv_wire.psv_idx);
1563 kfree(mr->sig);
1564 mr->sig = NULL;
1565 }
1566
Yishai Hadasb9332da2019-07-23 09:57:28 +03001567 if (!allocated_from_cache) {
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001568 destroy_mkey(dev, mr);
Yishai Hadasb9332da2019-07-23 09:57:28 +03001569 mlx5_free_priv_descs(mr);
1570 }
Eli Cohene126ba92013-07-07 17:25:49 +03001571}
1572
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001573static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001574{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001575 int npages = mr->npages;
1576 struct ib_umem *umem = mr->umem;
1577
Leon Romanovsky8b4d5bc2019-01-08 16:07:25 +02001578 if (is_odp_mr(mr)) {
Jason Gunthorpe597ecc52018-09-16 20:48:06 +03001579 struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
1580
Moni Shouaa6bc3872019-02-17 16:08:22 +02001581 /* Prevent new page faults and
1582 * prefetch requests from succeeding
1583 */
Jason Gunthorpeaa603812019-10-01 12:38:20 -03001584 WRITE_ONCE(mr->live, 0);
Moni Shouaa6bc3872019-02-17 16:08:22 +02001585
Jason Gunthorpeaa116b82019-10-01 12:38:19 -03001586 /* Wait for all running page-fault handlers to finish. */
1587 synchronize_srcu(&dev->mr_srcu);
1588
Moni Shouaa6bc3872019-02-17 16:08:22 +02001589 /* dequeue pending prefetch requests for the mr */
1590 if (atomic_read(&mr->num_pending_prefetch))
1591 flush_workqueue(system_unbound_wq);
1592 WARN_ON(atomic_read(&mr->num_pending_prefetch));
1593
Haggai Eranb4cfe442014-12-11 17:04:26 +02001594 /* Destroy all page mappings */
Jason Gunthorpefd7dbf02019-08-19 14:17:01 +03001595 if (!umem_odp->is_implicit_odp)
Jason Gunthorped2183c62019-05-20 09:05:25 +03001596 mlx5_ib_invalidate_range(umem_odp,
1597 ib_umem_start(umem_odp),
1598 ib_umem_end(umem_odp));
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001599 else
1600 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001601 /*
1602 * We kill the umem before the MR for ODP,
1603 * so that there will not be any invalidations in
1604 * flight, looking at the *mr struct.
1605 */
Jason Gunthorpe0446cad2019-08-19 14:17:05 +03001606 ib_umem_odp_release(umem_odp);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001607 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1608
1609 /* Avoid double-freeing the umem. */
1610 umem = NULL;
1611 }
Leon Romanovsky8b4d5bc2019-01-08 16:07:25 +02001612
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001613 clean_mr(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001614
Valentine Fatievdd9a4032018-10-10 09:56:25 +03001615 /*
1616 * We should unregister the DMA address from the HCA before
1617 * remove the DMA mapping.
1618 */
1619 mlx5_mr_cache_free(dev, mr);
Leon Romanovsky836a0fb2019-06-16 15:05:20 +03001620 ib_umem_release(umem);
1621 if (umem)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001622 atomic_sub(npages, &dev->mdev->priv.reg_pages);
Leon Romanovsky836a0fb2019-06-16 15:05:20 +03001623
Leon Romanovskyf3f134f2018-03-12 21:26:37 +02001624 if (!mr->allocated_from_cache)
1625 kfree(mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001626}
1627
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001628int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001629{
Max Gurtovoy6c984472019-06-11 18:52:42 +03001630 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1631
Israel Rukshinde0ae952019-06-11 18:52:55 +03001632 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1633 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
1634 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
1635 }
Max Gurtovoy6c984472019-06-11 18:52:42 +03001636
1637 dereg_mr(to_mdev(ibmr->device), mmr);
1638
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001639 return 0;
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001640}
1641
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001642static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1643 int access_mode, int page_shift)
1644{
1645 void *mkc;
1646
1647 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1648
1649 MLX5_SET(mkc, mkc, free, 1);
1650 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1651 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1652 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1653 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1654 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1655 MLX5_SET(mkc, mkc, umr_en, 1);
1656 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1657}
1658
1659static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1660 int ndescs, int desc_size, int page_shift,
1661 int access_mode, u32 *in, int inlen)
1662{
1663 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1664 int err;
1665
1666 mr->access_mode = access_mode;
1667 mr->desc_size = desc_size;
1668 mr->max_descs = ndescs;
1669
1670 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1671 if (err)
1672 return err;
1673
1674 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1675
1676 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1677 if (err)
1678 goto err_free_descs;
1679
1680 mr->mmkey.type = MLX5_MKEY_MR;
1681 mr->ibmr.lkey = mr->mmkey.key;
1682 mr->ibmr.rkey = mr->mmkey.key;
1683
1684 return 0;
1685
1686err_free_descs:
1687 mlx5_free_priv_descs(mr);
1688 return err;
1689}
1690
Max Gurtovoy6c984472019-06-11 18:52:42 +03001691static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
Israel Rukshinde0ae952019-06-11 18:52:55 +03001692 u32 max_num_sg, u32 max_num_meta_sg,
1693 int desc_size, int access_mode)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001694{
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001695 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Max Gurtovoy6c984472019-06-11 18:52:42 +03001696 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001697 int page_shift = 0;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001698 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001699 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001700 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001701
1702 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1703 if (!mr)
1704 return ERR_PTR(-ENOMEM);
1705
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001706 mr->ibmr.pd = pd;
1707 mr->ibmr.device = pd->device;
1708
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001709 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001710 if (!in) {
1711 err = -ENOMEM;
1712 goto err_free;
1713 }
1714
Israel Rukshinde0ae952019-06-11 18:52:55 +03001715 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001716 page_shift = PAGE_SHIFT;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001717
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001718 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1719 access_mode, in, inlen);
Max Gurtovoy6c984472019-06-11 18:52:42 +03001720 if (err)
1721 goto err_free_in;
Max Gurtovoy6c984472019-06-11 18:52:42 +03001722
Max Gurtovoy6c984472019-06-11 18:52:42 +03001723 mr->umem = NULL;
1724 kfree(in);
1725
1726 return mr;
1727
Max Gurtovoy6c984472019-06-11 18:52:42 +03001728err_free_in:
1729 kfree(in);
1730err_free:
1731 kfree(mr);
1732 return ERR_PTR(err);
1733}
1734
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001735static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1736 int ndescs, u32 *in, int inlen)
1737{
1738 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1739 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
1740 inlen);
1741}
1742
1743static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1744 int ndescs, u32 *in, int inlen)
1745{
1746 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1747 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1748}
1749
1750static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1751 int max_num_sg, int max_num_meta_sg,
1752 u32 *in, int inlen)
1753{
1754 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1755 u32 psv_index[2];
1756 void *mkc;
1757 int err;
1758
1759 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1760 if (!mr->sig)
1761 return -ENOMEM;
1762
1763 /* create mem & wire PSVs */
1764 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
1765 if (err)
1766 goto err_free_sig;
1767
1768 mr->sig->psv_memory.psv_idx = psv_index[0];
1769 mr->sig->psv_wire.psv_idx = psv_index[1];
1770
1771 mr->sig->sig_status_checked = true;
1772 mr->sig->sig_err_exists = false;
1773 /* Next UMR, Arm SIGERR */
1774 ++mr->sig->sigerr_count;
1775 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1776 sizeof(struct mlx5_klm),
1777 MLX5_MKC_ACCESS_MODE_KLMS);
1778 if (IS_ERR(mr->klm_mr)) {
1779 err = PTR_ERR(mr->klm_mr);
1780 goto err_destroy_psv;
1781 }
1782 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1783 sizeof(struct mlx5_mtt),
1784 MLX5_MKC_ACCESS_MODE_MTT);
1785 if (IS_ERR(mr->mtt_mr)) {
1786 err = PTR_ERR(mr->mtt_mr);
1787 goto err_free_klm_mr;
1788 }
1789
1790 /* Set bsf descriptors for mkey */
1791 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1792 MLX5_SET(mkc, mkc, bsf_en, 1);
1793 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1794
1795 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1796 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1797 if (err)
1798 goto err_free_mtt_mr;
1799
1800 return 0;
1801
1802err_free_mtt_mr:
1803 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
1804 mr->mtt_mr = NULL;
1805err_free_klm_mr:
1806 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
1807 mr->klm_mr = NULL;
1808err_destroy_psv:
1809 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1810 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1811 mr->sig->psv_memory.psv_idx);
1812 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1813 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1814 mr->sig->psv_wire.psv_idx);
1815err_free_sig:
1816 kfree(mr->sig);
1817
1818 return err;
1819}
1820
Max Gurtovoy6c984472019-06-11 18:52:42 +03001821static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
1822 enum ib_mr_type mr_type, u32 max_num_sg,
1823 u32 max_num_meta_sg)
1824{
1825 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1826 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1827 int ndescs = ALIGN(max_num_sg, 4);
1828 struct mlx5_ib_mr *mr;
Max Gurtovoy6c984472019-06-11 18:52:42 +03001829 u32 *in;
1830 int err;
1831
1832 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1833 if (!mr)
1834 return ERR_PTR(-ENOMEM);
1835
1836 in = kzalloc(inlen, GFP_KERNEL);
1837 if (!in) {
1838 err = -ENOMEM;
1839 goto err_free;
1840 }
1841
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001842 mr->ibmr.device = pd->device;
1843 mr->umem = NULL;
Max Gurtovoy6c984472019-06-11 18:52:42 +03001844
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001845 switch (mr_type) {
1846 case IB_MR_TYPE_MEM_REG:
1847 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1848 break;
1849 case IB_MR_TYPE_SG_GAPS:
1850 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1851 break;
1852 case IB_MR_TYPE_INTEGRITY:
1853 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1854 max_num_meta_sg, in, inlen);
1855 break;
1856 default:
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001857 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1858 err = -EINVAL;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001859 }
1860
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001861 if (err)
Max Gurtovoy7796d2a2019-06-11 18:52:57 +03001862 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001863
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001864 kfree(in);
1865
1866 return &mr->ibmr;
1867
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001868err_free_in:
1869 kfree(in);
1870err_free:
1871 kfree(mr);
1872 return ERR_PTR(err);
1873}
1874
Max Gurtovoy6c984472019-06-11 18:52:42 +03001875struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1876 u32 max_num_sg, struct ib_udata *udata)
1877{
1878 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
1879}
1880
1881struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1882 u32 max_num_sg, u32 max_num_meta_sg)
1883{
1884 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
1885 max_num_meta_sg);
1886}
1887
Matan Barakd2370e02016-02-29 18:05:30 +02001888struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1889 struct ib_udata *udata)
1890{
1891 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001892 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001893 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001894 u32 *in = NULL;
1895 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001896 int ndescs;
1897 int err;
1898 struct mlx5_ib_alloc_mw req = {};
1899 struct {
1900 __u32 comp_mask;
1901 __u32 response_length;
1902 } resp = {};
1903
1904 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1905 if (err)
1906 return ERR_PTR(err);
1907
1908 if (req.comp_mask || req.reserved1 || req.reserved2)
1909 return ERR_PTR(-EOPNOTSUPP);
1910
1911 if (udata->inlen > sizeof(req) &&
1912 !ib_is_udata_cleared(udata, sizeof(req),
1913 udata->inlen - sizeof(req)))
1914 return ERR_PTR(-EOPNOTSUPP);
1915
1916 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1917
1918 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001919 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001920 if (!mw || !in) {
1921 err = -ENOMEM;
1922 goto free;
1923 }
1924
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001925 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001926
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001927 MLX5_SET(mkc, mkc, free, 1);
1928 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1929 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1930 MLX5_SET(mkc, mkc, umr_en, 1);
1931 MLX5_SET(mkc, mkc, lr, 1);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001932 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001933 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1934 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1935
1936 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001937 if (err)
1938 goto free;
1939
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001940 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001941 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001942 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001943
1944 resp.response_length = min(offsetof(typeof(resp), response_length) +
1945 sizeof(resp.response_length), udata->outlen);
1946 if (resp.response_length) {
1947 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1948 if (err) {
1949 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1950 goto free;
1951 }
1952 }
1953
1954 kfree(in);
1955 return &mw->ibmw;
1956
1957free:
1958 kfree(mw);
1959 kfree(in);
1960 return ERR_PTR(err);
1961}
1962
1963int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1964{
1965 struct mlx5_ib_mw *mmw = to_mmw(mw);
1966 int err;
1967
1968 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1969 &mmw->mmkey);
1970 if (!err)
1971 kfree(mmw);
1972 return err;
1973}
1974
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001975int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1976 struct ib_mr_status *mr_status)
1977{
1978 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1979 int ret = 0;
1980
1981 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1982 pr_err("Invalid status check mask\n");
1983 ret = -EINVAL;
1984 goto done;
1985 }
1986
1987 mr_status->fail_status = 0;
1988 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1989 if (!mmr->sig) {
1990 ret = -EINVAL;
1991 pr_err("signature status check requested on a non-signature enabled MR\n");
1992 goto done;
1993 }
1994
1995 mmr->sig->sig_status_checked = true;
1996 if (!mmr->sig->sig_err_exists)
1997 goto done;
1998
1999 if (ibmr->lkey == mmr->sig->err_item.key)
2000 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2001 sizeof(mr_status->sig_err));
2002 else {
2003 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2004 mr_status->sig_err.sig_err_offset = 0;
2005 mr_status->sig_err.key = mmr->sig->err_item.key;
2006 }
2007
2008 mmr->sig->sig_err_exists = false;
2009 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2010 }
2011
2012done:
2013 return ret;
2014}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002015
Sagi Grimbergb005d312016-02-29 19:07:33 +02002016static int
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002017mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2018 int data_sg_nents, unsigned int *data_sg_offset,
2019 struct scatterlist *meta_sg, int meta_sg_nents,
2020 unsigned int *meta_sg_offset)
2021{
2022 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2023 unsigned int sg_offset = 0;
2024 int n = 0;
2025
2026 mr->meta_length = 0;
2027 if (data_sg_nents == 1) {
2028 n++;
2029 mr->ndescs = 1;
2030 if (data_sg_offset)
2031 sg_offset = *data_sg_offset;
2032 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2033 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2034 if (meta_sg_nents == 1) {
2035 n++;
2036 mr->meta_ndescs = 1;
2037 if (meta_sg_offset)
2038 sg_offset = *meta_sg_offset;
2039 else
2040 sg_offset = 0;
2041 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2042 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2043 }
2044 ibmr->length = mr->data_length + mr->meta_length;
2045 }
2046
2047 return n;
2048}
2049
2050static int
Sagi Grimbergb005d312016-02-29 19:07:33 +02002051mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2052 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002053 unsigned short sg_nents,
Max Gurtovoy6c984472019-06-11 18:52:42 +03002054 unsigned int *sg_offset_p,
2055 struct scatterlist *meta_sgl,
2056 unsigned short meta_sg_nents,
2057 unsigned int *meta_sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02002058{
2059 struct scatterlist *sg = sgl;
2060 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002061 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02002062 u32 lkey = mr->ibmr.pd->local_dma_lkey;
Max Gurtovoy6c984472019-06-11 18:52:42 +03002063 int i, j = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02002064
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002065 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02002066 mr->ibmr.length = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02002067
2068 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07002069 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02002070 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002071 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2072 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02002073 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03002074 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002075
2076 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02002077 }
2078
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002079 if (sg_offset_p)
2080 *sg_offset_p = sg_offset;
2081
Max Gurtovoy6c984472019-06-11 18:52:42 +03002082 mr->ndescs = i;
2083 mr->data_length = mr->ibmr.length;
2084
2085 if (meta_sg_nents) {
2086 sg = meta_sgl;
2087 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2088 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2089 if (unlikely(i + j >= mr->max_descs))
2090 break;
2091 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2092 sg_offset);
2093 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2094 sg_offset);
2095 klms[i + j].key = cpu_to_be32(lkey);
2096 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2097
2098 sg_offset = 0;
2099 }
2100 if (meta_sg_offset_p)
2101 *meta_sg_offset_p = sg_offset;
2102
2103 mr->meta_ndescs = j;
2104 mr->meta_length = mr->ibmr.length - mr->data_length;
2105 }
2106
2107 return i + j;
Sagi Grimbergb005d312016-02-29 19:07:33 +02002108}
2109
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002110static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2111{
2112 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2113 __be64 *descs;
2114
2115 if (unlikely(mr->ndescs == mr->max_descs))
2116 return -ENOMEM;
2117
2118 descs = mr->descs;
2119 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2120
2121 return 0;
2122}
2123
Israel Rukshinde0ae952019-06-11 18:52:55 +03002124static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2125{
2126 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2127 __be64 *descs;
2128
2129 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2130 return -ENOMEM;
2131
2132 descs = mr->descs;
2133 descs[mr->ndescs + mr->meta_ndescs++] =
2134 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2135
2136 return 0;
2137}
2138
2139static int
2140mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
Max Gurtovoy6c984472019-06-11 18:52:42 +03002141 int data_sg_nents, unsigned int *data_sg_offset,
2142 struct scatterlist *meta_sg, int meta_sg_nents,
2143 unsigned int *meta_sg_offset)
2144{
2145 struct mlx5_ib_mr *mr = to_mmr(ibmr);
Israel Rukshinde0ae952019-06-11 18:52:55 +03002146 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
Max Gurtovoy6c984472019-06-11 18:52:42 +03002147 int n;
2148
Israel Rukshinde0ae952019-06-11 18:52:55 +03002149 pi_mr->ndescs = 0;
2150 pi_mr->meta_ndescs = 0;
2151 pi_mr->meta_length = 0;
2152
2153 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2154 pi_mr->desc_size * pi_mr->max_descs,
2155 DMA_TO_DEVICE);
2156
2157 pi_mr->ibmr.page_size = ibmr->page_size;
2158 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2159 mlx5_set_page);
2160 if (n != data_sg_nents)
2161 return n;
2162
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002163 pi_mr->data_iova = pi_mr->ibmr.iova;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002164 pi_mr->data_length = pi_mr->ibmr.length;
2165 pi_mr->ibmr.length = pi_mr->data_length;
2166 ibmr->length = pi_mr->data_length;
2167
2168 if (meta_sg_nents) {
2169 u64 page_mask = ~((u64)ibmr->page_size - 1);
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002170 u64 iova = pi_mr->data_iova;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002171
2172 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2173 meta_sg_offset, mlx5_set_page_pi);
2174
2175 pi_mr->meta_length = pi_mr->ibmr.length;
2176 /*
2177 * PI address for the HW is the offset of the metadata address
2178 * relative to the first data page address.
2179 * It equals to first data page address + size of data pages +
2180 * metadata offset at the first metadata page
2181 */
2182 pi_mr->pi_iova = (iova & page_mask) +
2183 pi_mr->ndescs * ibmr->page_size +
2184 (pi_mr->ibmr.iova & ~page_mask);
2185 /*
2186 * In order to use one MTT MR for data and metadata, we register
2187 * also the gaps between the end of the data and the start of
2188 * the metadata (the sig MR will verify that the HW will access
2189 * to right addresses). This mapping is safe because we use
2190 * internal mkey for the registration.
2191 */
2192 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2193 pi_mr->ibmr.iova = iova;
2194 ibmr->length += pi_mr->meta_length;
2195 }
2196
2197 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2198 pi_mr->desc_size * pi_mr->max_descs,
2199 DMA_TO_DEVICE);
2200
2201 return n;
2202}
2203
2204static int
2205mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2206 int data_sg_nents, unsigned int *data_sg_offset,
2207 struct scatterlist *meta_sg, int meta_sg_nents,
2208 unsigned int *meta_sg_offset)
2209{
2210 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2211 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2212 int n;
Max Gurtovoy6c984472019-06-11 18:52:42 +03002213
2214 pi_mr->ndescs = 0;
2215 pi_mr->meta_ndescs = 0;
2216 pi_mr->meta_length = 0;
2217
2218 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2219 pi_mr->desc_size * pi_mr->max_descs,
2220 DMA_TO_DEVICE);
2221
2222 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2223 meta_sg, meta_sg_nents, meta_sg_offset);
2224
Max Gurtovoy6c984472019-06-11 18:52:42 +03002225 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2226 pi_mr->desc_size * pi_mr->max_descs,
2227 DMA_TO_DEVICE);
2228
Israel Rukshinde0ae952019-06-11 18:52:55 +03002229 /* This is zero-based memory region */
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002230 pi_mr->data_iova = 0;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002231 pi_mr->ibmr.iova = 0;
2232 pi_mr->pi_iova = pi_mr->data_length;
2233 ibmr->length = pi_mr->ibmr.length;
2234
2235 return n;
2236}
2237
2238int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2239 int data_sg_nents, unsigned int *data_sg_offset,
2240 struct scatterlist *meta_sg, int meta_sg_nents,
2241 unsigned int *meta_sg_offset)
2242{
2243 struct mlx5_ib_mr *mr = to_mmr(ibmr);
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002244 struct mlx5_ib_mr *pi_mr = NULL;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002245 int n;
2246
2247 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2248
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002249 mr->ndescs = 0;
2250 mr->data_length = 0;
2251 mr->data_iova = 0;
2252 mr->meta_ndescs = 0;
2253 mr->pi_iova = 0;
2254 /*
2255 * As a performance optimization, if possible, there is no need to
2256 * perform UMR operation to register the data/metadata buffers.
2257 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2258 * Fallback to UMR only in case of a failure.
2259 */
2260 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2261 data_sg_offset, meta_sg, meta_sg_nents,
2262 meta_sg_offset);
2263 if (n == data_sg_nents + meta_sg_nents)
2264 goto out;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002265 /*
2266 * As a performance optimization, if possible, there is no need to map
2267 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2268 * descriptors and fallback to KLM only in case of a failure.
2269 * It's more efficient for the HW to work with MTT descriptors
2270 * (especially in high load).
2271 * Use KLM (indirect access) only if it's mandatory.
2272 */
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002273 pi_mr = mr->mtt_mr;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002274 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2275 data_sg_offset, meta_sg, meta_sg_nents,
2276 meta_sg_offset);
2277 if (n == data_sg_nents + meta_sg_nents)
2278 goto out;
2279
2280 pi_mr = mr->klm_mr;
2281 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2282 data_sg_offset, meta_sg, meta_sg_nents,
2283 meta_sg_offset);
Max Gurtovoy6c984472019-06-11 18:52:42 +03002284 if (unlikely(n != data_sg_nents + meta_sg_nents))
2285 return -ENOMEM;
2286
Israel Rukshinde0ae952019-06-11 18:52:55 +03002287out:
2288 /* This is zero-based memory region */
2289 ibmr->iova = 0;
2290 mr->pi_mr = pi_mr;
Max Gurtovoy2563e2f2019-06-11 18:52:56 +03002291 if (pi_mr)
2292 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2293 else
2294 ibmr->sig_attrs->meta_length = mr->meta_length;
Israel Rukshinde0ae952019-06-11 18:52:55 +03002295
Max Gurtovoy6c984472019-06-11 18:52:42 +03002296 return 0;
2297}
2298
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002299int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002300 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002301{
2302 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2303 int n;
2304
2305 mr->ndescs = 0;
2306
2307 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2308 mr->desc_size * mr->max_descs,
2309 DMA_TO_DEVICE);
2310
Saeed Mahameedec22eb52016-07-16 06:28:36 +03002311 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Max Gurtovoy6c984472019-06-11 18:52:42 +03002312 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2313 NULL);
Sagi Grimbergb005d312016-02-29 19:07:33 +02002314 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002315 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2316 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03002317
2318 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2319 mr->desc_size * mr->max_descs,
2320 DMA_TO_DEVICE);
2321
2322 return n;
2323}