blob: 5a4e23105b0c32ee91e821b390ad2fcd22196567 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Maor Gottliebfe248c32017-05-30 10:29:14 +030033#include <linux/debugfs.h>
Christoph Hellwigadec6402015-08-28 09:27:19 +020034#include <linux/highmem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030035#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/errno.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
Ariel Levkovich24da0012018-04-05 18:53:27 +030041#include <linux/bitmap.h>
Guy Levi37aa5c32016-04-27 16:49:50 +030042#if defined(CONFIG_X86)
43#include <asm/pat.h>
44#endif
Eli Cohene126ba92013-07-07 17:25:49 +030045#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010046#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010047#include <linux/sched/task.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030048#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030049#include <rdma/ib_user_verbs.h>
Achiad Shochat3f89a642015-12-23 18:47:21 +020050#include <rdma/ib_addr.h>
Achiad Shochat2811ba52015-12-23 18:47:24 +020051#include <rdma/ib_cache.h>
Achiad Shochatada68c32016-02-22 18:17:23 +020052#include <linux/mlx5/port.h>
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030053#include <linux/mlx5/vport.h>
Pravin Shedge72c7fe92017-12-06 22:19:39 +053054#include <linux/mlx5/fs.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030055#include <linux/list.h>
Eli Cohene126ba92013-07-07 17:25:49 +030056#include <rdma/ib_smi.h>
57#include <rdma/ib_umem.h>
Maor Gottlieb038d2ef2016-01-11 10:26:07 +020058#include <linux/in.h>
59#include <linux/etherdevice.h>
Eli Cohene126ba92013-07-07 17:25:49 +030060#include "mlx5_ib.h"
Mark Blochfc385b72018-01-16 14:34:48 +000061#include "ib_rep.h"
Parav Pandite1f24a72017-04-16 07:29:29 +030062#include "cmd.h"
Leon Romanovskyf3da6572018-11-28 20:53:41 +020063#include "srq.h"
Boris Pismenny3346c482017-08-20 15:13:08 +030064#include <linux/mlx5/fs_helpers.h>
Aviad Yehezkelc6475a02018-03-28 09:27:50 +030065#include <linux/mlx5/accel.h>
Matan Barak8c846602018-03-28 09:27:41 +030066#include <rdma/uverbs_std_types.h>
Aviad Yehezkelc6475a02018-03-28 09:27:50 +030067#include <rdma/mlx5_user_ioctl_verbs.h>
68#include <rdma/mlx5_user_ioctl_cmds.h>
Matan Barak8c846602018-03-28 09:27:41 +030069
70#define UVERBS_MODULE_NAME mlx5_ib
71#include <rdma/uverbs_named_ioctl.h>
Eli Cohene126ba92013-07-07 17:25:49 +030072
73#define DRIVER_NAME "mlx5_ib"
Tariq Toukanb3599112017-02-22 17:45:46 +020074#define DRIVER_VERSION "5.0-0"
Eli Cohene126ba92013-07-07 17:25:49 +030075
76MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
77MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
78MODULE_LICENSE("Dual BSD/GPL");
Eli Cohene126ba92013-07-07 17:25:49 +030079
Eli Cohene126ba92013-07-07 17:25:49 +030080static char mlx5_version[] =
81 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
Tariq Toukanb3599112017-02-22 17:45:46 +020082 DRIVER_VERSION "\n";
Eli Cohene126ba92013-07-07 17:25:49 +030083
Daniel Jurgensd69a24e2018-01-04 17:25:37 +020084struct mlx5_ib_event_work {
85 struct work_struct work;
Saeed Mahameeddf097a22018-11-26 14:39:00 -080086 union {
87 struct mlx5_ib_dev *dev;
88 struct mlx5_ib_multiport_info *mpi;
89 };
90 bool is_slave;
Saeed Mahameed134e9342018-11-26 14:39:02 -080091 unsigned int event;
Saeed Mahameeddf097a22018-11-26 14:39:00 -080092 void *param;
Daniel Jurgensd69a24e2018-01-04 17:25:37 +020093};
94
Eran Ben Elishada7525d2015-12-14 16:34:10 +020095enum {
96 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
97};
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030098
Daniel Jurgensd69a24e2018-01-04 17:25:37 +020099static struct workqueue_struct *mlx5_ib_event_wq;
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200100static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
101static LIST_HEAD(mlx5_ib_dev_list);
102/*
103 * This mutex should be held when accessing either of the above lists
104 */
105static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
106
Ilya Lesokhinc44ef992018-03-13 15:18:48 +0200107/* We can't use an array for xlt_emergency_page because dma_map_single
108 * doesn't work on kernel modules memory
109 */
110static unsigned long xlt_emergency_page;
111static struct mutex xlt_emergency_page_mutex;
112
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200113struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
114{
115 struct mlx5_ib_dev *dev;
116
117 mutex_lock(&mlx5_ib_multiport_mutex);
118 dev = mpi->ibdev;
119 mutex_unlock(&mlx5_ib_multiport_mutex);
120 return dev;
121}
122
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300123static enum rdma_link_layer
Achiad Shochatebd61f62015-12-23 18:47:16 +0200124mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300125{
Achiad Shochatebd61f62015-12-23 18:47:16 +0200126 switch (port_type_cap) {
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300127 case MLX5_CAP_PORT_TYPE_IB:
128 return IB_LINK_LAYER_INFINIBAND;
129 case MLX5_CAP_PORT_TYPE_ETH:
130 return IB_LINK_LAYER_ETHERNET;
131 default:
132 return IB_LINK_LAYER_UNSPECIFIED;
133 }
134}
135
Achiad Shochatebd61f62015-12-23 18:47:16 +0200136static enum rdma_link_layer
137mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
138{
139 struct mlx5_ib_dev *dev = to_mdev(device);
140 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
141
142 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
143}
144
Moni Shouafd65f1b2017-05-30 09:56:05 +0300145static int get_port_state(struct ib_device *ibdev,
146 u8 port_num,
147 enum ib_port_state *state)
148{
149 struct ib_port_attr attr;
150 int ret;
151
152 memset(&attr, 0, sizeof(attr));
Kamal Heib3023a1e2018-12-10 21:09:48 +0200153 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
Moni Shouafd65f1b2017-05-30 09:56:05 +0300154 if (!ret)
155 *state = attr.state;
156 return ret;
157}
158
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200159static int mlx5_netdev_event(struct notifier_block *this,
160 unsigned long event, void *ptr)
161{
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200162 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200163 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200164 u8 port_num = roce->native_port_num;
165 struct mlx5_core_dev *mdev;
166 struct mlx5_ib_dev *ibdev;
167
168 ibdev = roce->dev;
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200169 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
170 if (!mdev)
171 return NOTIFY_DONE;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200172
Aviv Heller5ec8c832016-09-18 20:48:00 +0300173 switch (event) {
174 case NETDEV_REGISTER:
175 case NETDEV_UNREGISTER:
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200176 write_lock(&roce->netdev_lock);
Mark Blochbcf87f12018-01-16 15:02:36 +0000177 if (ibdev->rep) {
178 struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
179 struct net_device *rep_ndev;
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200180
Mark Blochbcf87f12018-01-16 15:02:36 +0000181 rep_ndev = mlx5_ib_get_rep_netdev(esw,
182 ibdev->rep->vport);
183 if (rep_ndev == ndev)
184 roce->netdev = (event == NETDEV_UNREGISTER) ?
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200185 NULL : ndev;
Parav Pandit84a6a7a2018-04-23 17:01:55 +0300186 } else if (ndev->dev.parent == &mdev->pdev->dev) {
Mark Blochbcf87f12018-01-16 15:02:36 +0000187 roce->netdev = (event == NETDEV_UNREGISTER) ?
188 NULL : ndev;
189 }
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200190 write_unlock(&roce->netdev_lock);
Aviv Heller5ec8c832016-09-18 20:48:00 +0300191 break;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200192
Moni Shouafd65f1b2017-05-30 09:56:05 +0300193 case NETDEV_CHANGE:
Aviv Heller5ec8c832016-09-18 20:48:00 +0300194 case NETDEV_UP:
Aviv Heller88621df2016-09-18 20:48:02 +0300195 case NETDEV_DOWN: {
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200196 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
Aviv Heller88621df2016-09-18 20:48:02 +0300197 struct net_device *upper = NULL;
198
199 if (lag_ndev) {
200 upper = netdev_master_upper_dev_get(lag_ndev);
201 dev_put(lag_ndev);
202 }
203
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200204 if ((upper == ndev || (!upper && ndev == roce->netdev))
Aviv Heller88621df2016-09-18 20:48:02 +0300205 && ibdev->ib_active) {
Bart Van Assche626bc022016-12-05 17:18:08 -0800206 struct ib_event ibev = { };
Moni Shouafd65f1b2017-05-30 09:56:05 +0300207 enum ib_port_state port_state;
Aviv Heller5ec8c832016-09-18 20:48:00 +0300208
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200209 if (get_port_state(&ibdev->ib_dev, port_num,
210 &port_state))
211 goto done;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300212
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200213 if (roce->last_port_state == port_state)
214 goto done;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300215
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200216 roce->last_port_state = port_state;
Aviv Heller5ec8c832016-09-18 20:48:00 +0300217 ibev.device = &ibdev->ib_dev;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300218 if (port_state == IB_PORT_DOWN)
219 ibev.event = IB_EVENT_PORT_ERR;
220 else if (port_state == IB_PORT_ACTIVE)
221 ibev.event = IB_EVENT_PORT_ACTIVE;
222 else
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200223 goto done;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300224
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200225 ibev.element.port_num = port_num;
Aviv Heller5ec8c832016-09-18 20:48:00 +0300226 ib_dispatch_event(&ibev);
227 }
228 break;
Aviv Heller88621df2016-09-18 20:48:02 +0300229 }
Aviv Heller5ec8c832016-09-18 20:48:00 +0300230
231 default:
232 break;
233 }
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200234done:
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200235 mlx5_ib_put_native_port_mdev(ibdev, port_num);
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200236 return NOTIFY_DONE;
237}
238
239static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
240 u8 port_num)
241{
242 struct mlx5_ib_dev *ibdev = to_mdev(device);
243 struct net_device *ndev;
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200244 struct mlx5_core_dev *mdev;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200245
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200246 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
247 if (!mdev)
248 return NULL;
249
250 ndev = mlx5_lag_get_roce_netdev(mdev);
Aviv Heller88621df2016-09-18 20:48:02 +0300251 if (ndev)
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200252 goto out;
Aviv Heller88621df2016-09-18 20:48:02 +0300253
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200254 /* Ensure ndev does not disappear before we invoke dev_hold()
255 */
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200256 read_lock(&ibdev->roce[port_num - 1].netdev_lock);
257 ndev = ibdev->roce[port_num - 1].netdev;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200258 if (ndev)
259 dev_hold(ndev);
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200260 read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200261
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200262out:
263 mlx5_ib_put_native_port_mdev(ibdev, port_num);
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200264 return ndev;
265}
266
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200267struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
268 u8 ib_port_num,
269 u8 *native_port_num)
270{
271 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
272 ib_port_num);
273 struct mlx5_core_dev *mdev = NULL;
274 struct mlx5_ib_multiport_info *mpi;
275 struct mlx5_ib_port *port;
276
Mark Bloch210b1f72018-03-05 20:09:47 +0200277 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
278 ll != IB_LINK_LAYER_ETHERNET) {
279 if (native_port_num)
280 *native_port_num = ib_port_num;
281 return ibdev->mdev;
282 }
283
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200284 if (native_port_num)
285 *native_port_num = 1;
286
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200287 port = &ibdev->port[ib_port_num - 1];
288 if (!port)
289 return NULL;
290
291 spin_lock(&port->mp.mpi_lock);
292 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
293 if (mpi && !mpi->unaffiliate) {
294 mdev = mpi->mdev;
295 /* If it's the master no need to refcount, it'll exist
296 * as long as the ib_dev exists.
297 */
298 if (!mpi->is_master)
299 mpi->mdev_refcnt++;
300 }
301 spin_unlock(&port->mp.mpi_lock);
302
303 return mdev;
304}
305
306void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
307{
308 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
309 port_num);
310 struct mlx5_ib_multiport_info *mpi;
311 struct mlx5_ib_port *port;
312
313 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
314 return;
315
316 port = &ibdev->port[port_num - 1];
317
318 spin_lock(&port->mp.mpi_lock);
319 mpi = ibdev->port[port_num - 1].mp.mpi;
320 if (mpi->is_master)
321 goto out;
322
323 mpi->mdev_refcnt--;
324 if (mpi->unaffiliate)
325 complete(&mpi->unref_comp);
326out:
327 spin_unlock(&port->mp.mpi_lock);
328}
329
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300330static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
331 u8 *active_width)
332{
333 switch (eth_proto_oper) {
334 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
335 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
336 case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
337 case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
338 *active_width = IB_WIDTH_1X;
339 *active_speed = IB_SPEED_SDR;
340 break;
341 case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
342 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
343 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
344 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
345 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
346 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
347 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
348 *active_width = IB_WIDTH_1X;
349 *active_speed = IB_SPEED_QDR;
350 break;
351 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
352 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
353 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
354 *active_width = IB_WIDTH_1X;
355 *active_speed = IB_SPEED_EDR;
356 break;
357 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
358 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
359 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
360 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
361 *active_width = IB_WIDTH_4X;
362 *active_speed = IB_SPEED_QDR;
363 break;
364 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
365 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
366 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
367 *active_width = IB_WIDTH_1X;
368 *active_speed = IB_SPEED_HDR;
369 break;
370 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
371 *active_width = IB_WIDTH_4X;
372 *active_speed = IB_SPEED_FDR;
373 break;
374 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
375 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
376 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
377 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
378 *active_width = IB_WIDTH_4X;
379 *active_speed = IB_SPEED_EDR;
380 break;
381 default:
382 return -EINVAL;
383 }
384
385 return 0;
386}
387
Ilan Tayari095b0922017-05-14 16:04:30 +0300388static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
389 struct ib_port_attr *props)
Achiad Shochat3f89a642015-12-23 18:47:21 +0200390{
391 struct mlx5_ib_dev *dev = to_mdev(device);
Colin Ian Kingda005f92018-01-09 15:55:43 +0000392 struct mlx5_core_dev *mdev;
Aviv Heller88621df2016-09-18 20:48:02 +0300393 struct net_device *ndev, *upper;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200394 enum ib_mtu ndev_ib_mtu;
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200395 bool put_mdev = true;
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200396 u16 qkey_viol_cntr;
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300397 u32 eth_prot_oper;
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200398 u8 mdev_port_num;
Ilan Tayari095b0922017-05-14 16:04:30 +0300399 int err;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200400
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200401 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
402 if (!mdev) {
403 /* This means the port isn't affiliated yet. Get the
404 * info for the master port instead.
405 */
406 put_mdev = false;
407 mdev = dev->mdev;
408 mdev_port_num = 1;
409 port_num = 1;
410 }
411
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300412 /* Possible bad flows are checked before filling out props so in case
413 * of an error it will still be zeroed out.
Noa Osherovich50f22fd2017-04-20 20:53:32 +0300414 */
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200415 err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
416 mdev_port_num);
Ilan Tayari095b0922017-05-14 16:04:30 +0300417 if (err)
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200418 goto out;
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300419
Honggang Li7672ed32018-03-16 10:37:13 +0800420 props->active_width = IB_WIDTH_4X;
421 props->active_speed = IB_SPEED_QDR;
422
Noa Osherovichf1b65df2017-04-20 20:53:33 +0300423 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
424 &props->active_width);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200425
Jason Gunthorpe2f944c02018-07-04 15:57:48 +0300426 props->port_cap_flags |= IB_PORT_CM_SUP;
427 props->ip_gids = true;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200428
429 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
430 roce_address_table_size);
431 props->max_mtu = IB_MTU_4096;
432 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
433 props->pkey_tbl_len = 1;
434 props->state = IB_PORT_DOWN;
435 props->phys_state = 3;
436
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200437 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200438 props->qkey_viol_cntr = qkey_viol_cntr;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200439
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200440 /* If this is a stub query for an unaffiliated port stop here */
441 if (!put_mdev)
442 goto out;
443
Achiad Shochat3f89a642015-12-23 18:47:21 +0200444 ndev = mlx5_ib_get_netdev(device, port_num);
445 if (!ndev)
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200446 goto out;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200447
Aviv Heller88621df2016-09-18 20:48:02 +0300448 if (mlx5_lag_is_active(dev->mdev)) {
449 rcu_read_lock();
450 upper = netdev_master_upper_dev_get_rcu(ndev);
451 if (upper) {
452 dev_put(ndev);
453 ndev = upper;
454 dev_hold(ndev);
455 }
456 rcu_read_unlock();
457 }
458
Achiad Shochat3f89a642015-12-23 18:47:21 +0200459 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
460 props->state = IB_PORT_ACTIVE;
461 props->phys_state = 5;
462 }
463
464 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
465
466 dev_put(ndev);
467
468 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +0200469out:
470 if (put_mdev)
471 mlx5_ib_put_native_port_mdev(dev, port_num);
472 return err;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200473}
474
Ilan Tayari095b0922017-05-14 16:04:30 +0300475static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
476 unsigned int index, const union ib_gid *gid,
477 const struct ib_gid_attr *attr)
Achiad Shochat3cca2602015-12-23 18:47:23 +0200478{
Ilan Tayari095b0922017-05-14 16:04:30 +0300479 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
480 u8 roce_version = 0;
481 u8 roce_l3_type = 0;
482 bool vlan = false;
483 u8 mac[ETH_ALEN];
484 u16 vlan_id = 0;
Achiad Shochat3cca2602015-12-23 18:47:23 +0200485
Ilan Tayari095b0922017-05-14 16:04:30 +0300486 if (gid) {
487 gid_type = attr->gid_type;
488 ether_addr_copy(mac, attr->ndev->dev_addr);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200489
Ilan Tayari095b0922017-05-14 16:04:30 +0300490 if (is_vlan_dev(attr->ndev)) {
491 vlan = true;
492 vlan_id = vlan_dev_vlan_id(attr->ndev);
493 }
Achiad Shochat3cca2602015-12-23 18:47:23 +0200494 }
495
Ilan Tayari095b0922017-05-14 16:04:30 +0300496 switch (gid_type) {
Achiad Shochat3cca2602015-12-23 18:47:23 +0200497 case IB_GID_TYPE_IB:
Ilan Tayari095b0922017-05-14 16:04:30 +0300498 roce_version = MLX5_ROCE_VERSION_1;
Achiad Shochat3cca2602015-12-23 18:47:23 +0200499 break;
500 case IB_GID_TYPE_ROCE_UDP_ENCAP:
Ilan Tayari095b0922017-05-14 16:04:30 +0300501 roce_version = MLX5_ROCE_VERSION_2;
502 if (ipv6_addr_v4mapped((void *)gid))
503 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
504 else
505 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
Achiad Shochat3cca2602015-12-23 18:47:23 +0200506 break;
507
508 default:
Ilan Tayari095b0922017-05-14 16:04:30 +0300509 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200510 }
511
Ilan Tayari095b0922017-05-14 16:04:30 +0300512 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
513 roce_l3_type, gid->raw, mac, vlan,
Daniel Jurgenscfe4e372018-01-04 17:25:41 +0200514 vlan_id, port_num);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200515}
516
Parav Panditf4df9a72018-06-05 08:40:16 +0300517static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
Achiad Shochat3cca2602015-12-23 18:47:23 +0200518 __always_unused void **context)
519{
Parav Pandit414448d2018-04-01 15:08:24 +0300520 return set_roce_addr(to_mdev(attr->device), attr->port_num,
Parav Panditf4df9a72018-06-05 08:40:16 +0300521 attr->index, &attr->gid, attr);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200522}
523
Parav Pandit414448d2018-04-01 15:08:24 +0300524static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
525 __always_unused void **context)
Achiad Shochat3cca2602015-12-23 18:47:23 +0200526{
Parav Pandit414448d2018-04-01 15:08:24 +0300527 return set_roce_addr(to_mdev(attr->device), attr->port_num,
528 attr->index, NULL, NULL);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200529}
530
Parav Pandit47ec3862018-06-13 10:22:06 +0300531__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
532 const struct ib_gid_attr *attr)
Achiad Shochat2811ba52015-12-23 18:47:24 +0200533{
Parav Pandit47ec3862018-06-13 10:22:06 +0300534 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
Achiad Shochat2811ba52015-12-23 18:47:24 +0200535 return 0;
536
537 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
538}
539
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300540static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
541{
Noa Osherovich7fae6652016-09-12 19:16:23 +0300542 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
543 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
544 return 0;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300545}
546
547enum {
548 MLX5_VPORT_ACCESS_METHOD_MAD,
549 MLX5_VPORT_ACCESS_METHOD_HCA,
550 MLX5_VPORT_ACCESS_METHOD_NIC,
551};
552
553static int mlx5_get_vport_access_method(struct ib_device *ibdev)
554{
555 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
556 return MLX5_VPORT_ACCESS_METHOD_MAD;
557
Achiad Shochatebd61f62015-12-23 18:47:16 +0200558 if (mlx5_ib_port_link_layer(ibdev, 1) ==
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300559 IB_LINK_LAYER_ETHERNET)
560 return MLX5_VPORT_ACCESS_METHOD_NIC;
561
562 return MLX5_VPORT_ACCESS_METHOD_HCA;
563}
564
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200565static void get_atomic_caps(struct mlx5_ib_dev *dev,
Moni Shoua776a3902018-01-02 16:19:33 +0200566 u8 atomic_size_qp,
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200567 struct ib_device_attr *props)
568{
569 u8 tmp;
570 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200571 u8 atomic_req_8B_endianness_mode =
Or Gerlitzbd108382017-05-28 15:24:17 +0300572 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200573
574 /* Check if HW supports 8 bytes standard atomic operations and capable
575 * of host endianness respond
576 */
577 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
578 if (((atomic_operations & tmp) == tmp) &&
579 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
580 (atomic_req_8B_endianness_mode)) {
581 props->atomic_cap = IB_ATOMIC_HCA;
582 } else {
583 props->atomic_cap = IB_ATOMIC_NONE;
584 }
585}
586
Moni Shoua776a3902018-01-02 16:19:33 +0200587static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
588 struct ib_device_attr *props)
589{
590 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
591
592 get_atomic_caps(dev, atomic_size_qp, props);
593}
594
595static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
596 struct ib_device_attr *props)
597{
598 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
599
600 get_atomic_caps(dev, atomic_size_qp, props);
601}
602
603bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
604{
605 struct ib_device_attr props = {};
606
607 get_atomic_caps_dc(dev, &props);
608 return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
609}
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300610static int mlx5_query_system_image_guid(struct ib_device *ibdev,
611 __be64 *sys_image_guid)
612{
613 struct mlx5_ib_dev *dev = to_mdev(ibdev);
614 struct mlx5_core_dev *mdev = dev->mdev;
615 u64 tmp;
616 int err;
617
618 switch (mlx5_get_vport_access_method(ibdev)) {
619 case MLX5_VPORT_ACCESS_METHOD_MAD:
620 return mlx5_query_mad_ifc_system_image_guid(ibdev,
621 sys_image_guid);
622
623 case MLX5_VPORT_ACCESS_METHOD_HCA:
624 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200625 break;
626
627 case MLX5_VPORT_ACCESS_METHOD_NIC:
628 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
629 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300630
631 default:
632 return -EINVAL;
633 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200634
635 if (!err)
636 *sys_image_guid = cpu_to_be64(tmp);
637
638 return err;
639
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300640}
641
642static int mlx5_query_max_pkeys(struct ib_device *ibdev,
643 u16 *max_pkeys)
644{
645 struct mlx5_ib_dev *dev = to_mdev(ibdev);
646 struct mlx5_core_dev *mdev = dev->mdev;
647
648 switch (mlx5_get_vport_access_method(ibdev)) {
649 case MLX5_VPORT_ACCESS_METHOD_MAD:
650 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
651
652 case MLX5_VPORT_ACCESS_METHOD_HCA:
653 case MLX5_VPORT_ACCESS_METHOD_NIC:
654 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
655 pkey_table_size));
656 return 0;
657
658 default:
659 return -EINVAL;
660 }
661}
662
663static int mlx5_query_vendor_id(struct ib_device *ibdev,
664 u32 *vendor_id)
665{
666 struct mlx5_ib_dev *dev = to_mdev(ibdev);
667
668 switch (mlx5_get_vport_access_method(ibdev)) {
669 case MLX5_VPORT_ACCESS_METHOD_MAD:
670 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
671
672 case MLX5_VPORT_ACCESS_METHOD_HCA:
673 case MLX5_VPORT_ACCESS_METHOD_NIC:
674 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
675
676 default:
677 return -EINVAL;
678 }
679}
680
681static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
682 __be64 *node_guid)
683{
684 u64 tmp;
685 int err;
686
687 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
688 case MLX5_VPORT_ACCESS_METHOD_MAD:
689 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
690
691 case MLX5_VPORT_ACCESS_METHOD_HCA:
692 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200693 break;
694
695 case MLX5_VPORT_ACCESS_METHOD_NIC:
696 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
697 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300698
699 default:
700 return -EINVAL;
701 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200702
703 if (!err)
704 *node_guid = cpu_to_be64(tmp);
705
706 return err;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300707}
708
709struct mlx5_reg_node_desc {
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700710 u8 desc[IB_DEVICE_NODE_DESC_MAX];
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300711};
712
713static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
714{
715 struct mlx5_reg_node_desc in;
716
717 if (mlx5_use_mad_ifc(dev))
718 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
719
720 memset(&in, 0, sizeof(in));
721
722 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
723 sizeof(struct mlx5_reg_node_desc),
724 MLX5_REG_NODE_DESC, 0, 0);
725}
726
Eli Cohene126ba92013-07-07 17:25:49 +0300727static int mlx5_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300728 struct ib_device_attr *props,
729 struct ib_udata *uhw)
Eli Cohene126ba92013-07-07 17:25:49 +0300730{
731 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300732 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300733 int err = -ENOMEM;
Eli Cohen288c01b2016-10-27 16:36:45 +0300734 int max_sq_desc;
Eli Cohene126ba92013-07-07 17:25:49 +0300735 int max_rq_sg;
736 int max_sq_sg;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300737 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
Daniel Jurgens85c7c012018-01-04 17:25:43 +0200738 bool raw_support = !mlx5_core_mp_enabled(mdev);
Bodong Wang402ca532016-06-17 15:02:20 +0300739 struct mlx5_ib_query_device_resp resp = {};
740 size_t resp_len;
741 u64 max_tso;
Eli Cohene126ba92013-07-07 17:25:49 +0300742
Bodong Wang402ca532016-06-17 15:02:20 +0300743 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
744 if (uhw->outlen && uhw->outlen < resp_len)
745 return -EINVAL;
746 else
747 resp.response_length = resp_len;
748
749 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
Matan Barak2528e332015-06-11 16:35:25 +0300750 return -EINVAL;
751
Eli Cohene126ba92013-07-07 17:25:49 +0300752 memset(props, 0, sizeof(*props));
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300753 err = mlx5_query_system_image_guid(ibdev,
754 &props->sys_image_guid);
755 if (err)
756 return err;
757
758 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
759 if (err)
760 return err;
761
762 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
763 if (err)
764 return err;
Eli Cohene126ba92013-07-07 17:25:49 +0300765
Jack Morgenstein9603b612014-07-28 23:30:22 +0300766 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
767 (fw_rev_min(dev->mdev) << 16) |
768 fw_rev_sub(dev->mdev);
Eli Cohene126ba92013-07-07 17:25:49 +0300769 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
770 IB_DEVICE_PORT_ACTIVE_EVENT |
771 IB_DEVICE_SYS_IMAGE_GUID |
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200772 IB_DEVICE_RC_RNR_NAK_GEN;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300773
774 if (MLX5_CAP_GEN(mdev, pkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300775 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300776 if (MLX5_CAP_GEN(mdev, qkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300777 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300778 if (MLX5_CAP_GEN(mdev, apm))
Eli Cohene126ba92013-07-07 17:25:49 +0300779 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300780 if (MLX5_CAP_GEN(mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +0300781 props->device_cap_flags |= IB_DEVICE_XRC;
Matan Barakd2370e02016-02-29 18:05:30 +0200782 if (MLX5_CAP_GEN(mdev, imaicl)) {
783 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
784 IB_DEVICE_MEM_WINDOW_TYPE_2B;
785 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
Sagi Grimbergb005d312016-02-29 19:07:33 +0200786 /* We support 'Gappy' memory registration too */
787 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
Matan Barakd2370e02016-02-29 18:05:30 +0200788 }
Eli Cohene126ba92013-07-07 17:25:49 +0300789 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300790 if (MLX5_CAP_GEN(mdev, sho)) {
Sagi Grimberg2dea9092014-02-23 14:19:13 +0200791 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
792 /* At this stage no support for signature handover */
793 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
794 IB_PROT_T10DIF_TYPE_2 |
795 IB_PROT_T10DIF_TYPE_3;
796 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
797 IB_GUARD_T10DIF_CSUM;
798 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300799 if (MLX5_CAP_GEN(mdev, block_lb_mc))
Eli Cohenf360d882014-04-02 00:10:16 +0300800 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Eli Cohene126ba92013-07-07 17:25:49 +0300801
Daniel Jurgens85c7c012018-01-04 17:25:43 +0200802 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
Noa Osheroviche8161332017-01-18 15:40:01 +0200803 if (MLX5_CAP_ETH(mdev, csum_cap)) {
804 /* Legacy bit to support old userspace libraries */
Bodong Wang88115fe2015-12-18 13:53:20 +0200805 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
Noa Osheroviche8161332017-01-18 15:40:01 +0200806 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
807 }
808
809 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
810 props->raw_packet_caps |=
811 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
Bodong Wang88115fe2015-12-18 13:53:20 +0200812
Bodong Wang402ca532016-06-17 15:02:20 +0300813 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
814 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
815 if (max_tso) {
816 resp.tso_caps.max_tso = 1 << max_tso;
817 resp.tso_caps.supported_qpts |=
818 1 << IB_QPT_RAW_PACKET;
819 resp.response_length += sizeof(resp.tso_caps);
820 }
821 }
Yishai Hadas31f69a82016-08-28 11:28:45 +0300822
823 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
824 resp.rss_caps.rx_hash_function =
825 MLX5_RX_HASH_FUNC_TOEPLITZ;
826 resp.rss_caps.rx_hash_fields_mask =
827 MLX5_RX_HASH_SRC_IPV4 |
828 MLX5_RX_HASH_DST_IPV4 |
829 MLX5_RX_HASH_SRC_IPV6 |
830 MLX5_RX_HASH_DST_IPV6 |
831 MLX5_RX_HASH_SRC_PORT_TCP |
832 MLX5_RX_HASH_DST_PORT_TCP |
833 MLX5_RX_HASH_SRC_PORT_UDP |
Maor Gottlieb4e2b53a2017-12-24 14:51:25 +0200834 MLX5_RX_HASH_DST_PORT_UDP |
835 MLX5_RX_HASH_INNER;
Matan Barak2d93fc82018-03-28 09:27:55 +0300836 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
837 MLX5_ACCEL_IPSEC_CAP_DEVICE)
838 resp.rss_caps.rx_hash_fields_mask |=
839 MLX5_RX_HASH_IPSEC_SPI;
Yishai Hadas31f69a82016-08-28 11:28:45 +0300840 resp.response_length += sizeof(resp.rss_caps);
841 }
842 } else {
843 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
844 resp.response_length += sizeof(resp.tso_caps);
845 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
846 resp.response_length += sizeof(resp.rss_caps);
Bodong Wang402ca532016-06-17 15:02:20 +0300847 }
848
Erez Shitritf0313962016-02-21 16:27:17 +0200849 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
850 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
851 props->device_cap_flags |= IB_DEVICE_UD_TSO;
852 }
853
Maor Gottlieb03404e82017-05-30 10:29:13 +0300854 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
Daniel Jurgens85c7c012018-01-04 17:25:43 +0200855 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
856 raw_support)
Maor Gottlieb03404e82017-05-30 10:29:13 +0300857 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
858
Yishai Hadas1d54f892017-06-08 16:15:11 +0300859 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
860 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
861 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
862
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300863 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
Daniel Jurgens85c7c012018-01-04 17:25:43 +0200864 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
865 raw_support) {
Noa Osheroviche8161332017-01-18 15:40:01 +0200866 /* Legacy bit to support old userspace libraries */
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300867 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
Noa Osheroviche8161332017-01-18 15:40:01 +0200868 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
869 }
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300870
Ariel Levkovich24da0012018-04-05 18:53:27 +0300871 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
872 props->max_dm_size =
873 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
874 }
875
Maor Gottliebda6d6ba32016-06-04 15:15:28 +0300876 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
877 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
878
Noa Osherovichb1383aa2017-10-29 13:59:45 +0200879 if (MLX5_CAP_GEN(mdev, end_pad))
880 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
881
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300882 props->vendor_part_id = mdev->pdev->device;
883 props->hw_ver = mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +0300884
885 props->max_mr_size = ~0ull;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300886 props->page_size_cap = ~(min_page_size - 1);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300887 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
888 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
889 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
890 sizeof(struct mlx5_wqe_data_seg);
Eli Cohen288c01b2016-10-27 16:36:45 +0300891 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
892 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
893 sizeof(struct mlx5_wqe_raddr_seg)) /
894 sizeof(struct mlx5_wqe_data_seg);
Steve Wise33023fb2018-06-18 08:05:26 -0700895 props->max_send_sge = max_sq_sg;
896 props->max_recv_sge = max_rq_sg;
Sagi Grimberg986ef952016-03-31 19:03:25 +0300897 props->max_sge_rd = MLX5_MAX_SGE_RD;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300898 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
Leon Romanovsky9f177682016-01-14 08:11:40 +0200899 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300900 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
901 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
902 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
903 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
904 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
905 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
906 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
Eli Cohene126ba92013-07-07 17:25:49 +0300907 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300908 props->max_srq_sge = max_rq_sg - 1;
Sagi Grimberg911f4332016-03-03 13:37:51 +0200909 props->max_fast_reg_page_list_len =
910 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
Moni Shoua776a3902018-01-02 16:19:33 +0200911 get_atomic_caps_qp(dev, props);
Eli Cohen81bea282013-09-11 16:35:30 +0300912 props->masked_atomic_cap = IB_ATOMIC_NONE;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300913 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
914 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
Eli Cohene126ba92013-07-07 17:25:49 +0300915 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
916 props->max_mcast_grp;
917 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
Maor Gottlieb86695a62016-10-27 16:36:38 +0300918 props->max_ah = INT_MAX;
Matan Barak7c60bcb2015-12-15 20:30:11 +0200919 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
920 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300921
Haggai Eran8cdd3122014-12-11 17:04:20 +0200922#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Saeed Mahameed938fe832015-05-28 22:28:41 +0300923 if (MLX5_CAP_GEN(mdev, pg))
Haggai Eran8cdd3122014-12-11 17:04:20 +0200924 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
925 props->odp_caps = dev->odp_caps;
926#endif
927
Leon Romanovsky051f2632015-12-20 12:16:11 +0200928 if (MLX5_CAP_GEN(mdev, cd))
929 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
930
Eli Coheneff901d2016-03-11 22:58:42 +0200931 if (!mlx5_core_is_pf(mdev))
932 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
933
Yishai Hadas31f69a82016-08-28 11:28:45 +0300934 if (mlx5_ib_port_link_layer(ibdev, 1) ==
Daniel Jurgens85c7c012018-01-04 17:25:43 +0200935 IB_LINK_LAYER_ETHERNET && raw_support) {
Yishai Hadas31f69a82016-08-28 11:28:45 +0300936 props->rss_caps.max_rwq_indirection_tables =
937 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
938 props->rss_caps.max_rwq_indirection_table_size =
939 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
940 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
941 props->max_wq_type_rq =
942 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
943 }
944
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300945 if (MLX5_CAP_GEN(mdev, tag_matching)) {
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300946 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
947 props->tm_caps.max_num_tags =
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300948 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300949 props->tm_caps.flags = IB_TM_CAP_RC;
950 props->tm_caps.max_ops =
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300951 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300952 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300953 }
954
Yonatan Cohen87ab3f52017-11-13 10:51:18 +0200955 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
956 props->cq_caps.max_cq_moderation_count =
957 MLX5_MAX_CQ_COUNT;
958 props->cq_caps.max_cq_moderation_period =
959 MLX5_MAX_CQ_PERIOD;
960 }
961
Bodong Wang7e43a2a2016-10-31 12:16:44 +0200962 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
Bodong Wang7e43a2a2016-10-31 12:16:44 +0200963 resp.response_length += sizeof(resp.cqe_comp_caps);
Yonatan Cohen572f46b2018-05-27 13:42:33 +0300964
965 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
966 resp.cqe_comp_caps.max_num =
967 MLX5_CAP_GEN(dev->mdev,
968 cqe_compression_max_num);
969
970 resp.cqe_comp_caps.supported_format =
971 MLX5_IB_CQE_RES_FORMAT_HASH |
972 MLX5_IB_CQE_RES_FORMAT_CSUM;
Yonatan Cohen6f1006a2018-05-27 13:42:34 +0300973
974 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
975 resp.cqe_comp_caps.supported_format |=
976 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
Yonatan Cohen572f46b2018-05-27 13:42:33 +0300977 }
Bodong Wang7e43a2a2016-10-31 12:16:44 +0200978 }
979
Daniel Jurgens85c7c012018-01-04 17:25:43 +0200980 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
981 raw_support) {
Bodong Wangd9491672016-12-01 13:43:13 +0200982 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
983 MLX5_CAP_GEN(mdev, qos)) {
984 resp.packet_pacing_caps.qp_rate_limit_max =
985 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
986 resp.packet_pacing_caps.qp_rate_limit_min =
987 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
988 resp.packet_pacing_caps.supported_qpts |=
989 1 << IB_QPT_RAW_PACKET;
Bodong Wang61147f32018-03-19 15:10:30 +0200990 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
991 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
992 resp.packet_pacing_caps.cap_flags |=
993 MLX5_IB_PP_SUPPORT_BURST;
Bodong Wangd9491672016-12-01 13:43:13 +0200994 }
995 resp.response_length += sizeof(resp.packet_pacing_caps);
996 }
997
Leon Romanovsky9f885202017-01-02 11:37:39 +0200998 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
999 uhw->outlen)) {
Bodong Wang795b6092017-08-17 15:52:34 +03001000 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1001 resp.mlx5_ib_support_multi_pkt_send_wqes =
1002 MLX5_IB_ALLOW_MPW;
Bodong Wang050da902017-08-17 15:52:35 +03001003
1004 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1005 resp.mlx5_ib_support_multi_pkt_send_wqes |=
1006 MLX5_IB_SUPPORT_EMPW;
1007
Leon Romanovsky9f885202017-01-02 11:37:39 +02001008 resp.response_length +=
1009 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1010 }
1011
Guy Levide57f2a2017-10-19 08:25:52 +03001012 if (field_avail(typeof(resp), flags, uhw->outlen)) {
1013 resp.response_length += sizeof(resp.flags);
Guy Levi7a0c8f42017-10-19 08:25:53 +03001014
Guy Levide57f2a2017-10-19 08:25:52 +03001015 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1016 resp.flags |=
1017 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
Guy Levi7a0c8f42017-10-19 08:25:53 +03001018
1019 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1020 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
Danit Goldberg7e11b912018-11-30 13:22:06 +02001021 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1022 resp.flags |=
1023 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
Guy Levide57f2a2017-10-19 08:25:52 +03001024 }
Leon Romanovsky9f885202017-01-02 11:37:39 +02001025
Noa Osherovich96dc3fc2017-08-17 15:52:28 +03001026 if (field_avail(typeof(resp), sw_parsing_caps,
1027 uhw->outlen)) {
1028 resp.response_length += sizeof(resp.sw_parsing_caps);
1029 if (MLX5_CAP_ETH(mdev, swp)) {
1030 resp.sw_parsing_caps.sw_parsing_offloads |=
1031 MLX5_IB_SW_PARSING;
1032
1033 if (MLX5_CAP_ETH(mdev, swp_csum))
1034 resp.sw_parsing_caps.sw_parsing_offloads |=
1035 MLX5_IB_SW_PARSING_CSUM;
1036
1037 if (MLX5_CAP_ETH(mdev, swp_lso))
1038 resp.sw_parsing_caps.sw_parsing_offloads |=
1039 MLX5_IB_SW_PARSING_LSO;
1040
1041 if (resp.sw_parsing_caps.sw_parsing_offloads)
1042 resp.sw_parsing_caps.supported_qpts =
1043 BIT(IB_QPT_RAW_PACKET);
1044 }
1045 }
1046
Daniel Jurgens85c7c012018-01-04 17:25:43 +02001047 if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1048 raw_support) {
Noa Osherovichb4f34592017-10-17 18:01:12 +03001049 resp.response_length += sizeof(resp.striding_rq_caps);
1050 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1051 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1052 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1053 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1054 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1055 resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
1056 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1057 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1058 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1059 resp.striding_rq_caps.supported_qpts =
1060 BIT(IB_QPT_RAW_PACKET);
1061 }
1062 }
1063
Maor Gottliebf95ef6c2017-10-19 08:25:55 +03001064 if (field_avail(typeof(resp), tunnel_offloads_caps,
1065 uhw->outlen)) {
1066 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1067 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1068 resp.tunnel_offloads_caps |=
1069 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1070 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1071 resp.tunnel_offloads_caps |=
1072 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1073 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1074 resp.tunnel_offloads_caps |=
1075 MLX5_IB_TUNNELED_OFFLOADS_GRE;
Ariel Levkoviche818e252018-05-13 14:33:35 +03001076 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1077 MLX5_FLEX_PROTO_CW_MPLS_GRE)
1078 resp.tunnel_offloads_caps |=
1079 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1080 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1081 MLX5_FLEX_PROTO_CW_MPLS_UDP)
1082 resp.tunnel_offloads_caps |=
1083 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
Maor Gottliebf95ef6c2017-10-19 08:25:55 +03001084 }
1085
Bodong Wang402ca532016-06-17 15:02:20 +03001086 if (uhw->outlen) {
1087 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1088
1089 if (err)
1090 return err;
1091 }
1092
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001093 return 0;
1094}
Eli Cohene126ba92013-07-07 17:25:49 +03001095
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001096enum mlx5_ib_width {
1097 MLX5_IB_WIDTH_1X = 1 << 0,
1098 MLX5_IB_WIDTH_2X = 1 << 1,
1099 MLX5_IB_WIDTH_4X = 1 << 2,
1100 MLX5_IB_WIDTH_8X = 1 << 3,
1101 MLX5_IB_WIDTH_12X = 1 << 4
1102};
1103
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001104static void translate_active_width(struct ib_device *ibdev, u8 active_width,
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001105 u8 *ib_width)
1106{
1107 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001108
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001109 if (active_width & MLX5_IB_WIDTH_1X)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001110 *ib_width = IB_WIDTH_1X;
Michael Guralnikd7649702018-12-09 11:49:54 +02001111 else if (active_width & MLX5_IB_WIDTH_2X)
1112 *ib_width = IB_WIDTH_2X;
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001113 else if (active_width & MLX5_IB_WIDTH_4X)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001114 *ib_width = IB_WIDTH_4X;
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001115 else if (active_width & MLX5_IB_WIDTH_8X)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001116 *ib_width = IB_WIDTH_8X;
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001117 else if (active_width & MLX5_IB_WIDTH_12X)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001118 *ib_width = IB_WIDTH_12X;
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001119 else {
1120 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001121 (int)active_width);
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001122 *ib_width = IB_WIDTH_4X;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001123 }
1124
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001125 return;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001126}
1127
1128static int mlx5_mtu_to_ib_mtu(int mtu)
1129{
1130 switch (mtu) {
1131 case 256: return 1;
1132 case 512: return 2;
1133 case 1024: return 3;
1134 case 2048: return 4;
1135 case 4096: return 5;
1136 default:
1137 pr_warn("invalid mtu\n");
1138 return -1;
1139 }
1140}
1141
1142enum ib_max_vl_num {
1143 __IB_MAX_VL_0 = 1,
1144 __IB_MAX_VL_0_1 = 2,
1145 __IB_MAX_VL_0_3 = 3,
1146 __IB_MAX_VL_0_7 = 4,
1147 __IB_MAX_VL_0_14 = 5,
1148};
1149
1150enum mlx5_vl_hw_cap {
1151 MLX5_VL_HW_0 = 1,
1152 MLX5_VL_HW_0_1 = 2,
1153 MLX5_VL_HW_0_2 = 3,
1154 MLX5_VL_HW_0_3 = 4,
1155 MLX5_VL_HW_0_4 = 5,
1156 MLX5_VL_HW_0_5 = 6,
1157 MLX5_VL_HW_0_6 = 7,
1158 MLX5_VL_HW_0_7 = 8,
1159 MLX5_VL_HW_0_14 = 15
1160};
1161
1162static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1163 u8 *max_vl_num)
1164{
1165 switch (vl_hw_cap) {
1166 case MLX5_VL_HW_0:
1167 *max_vl_num = __IB_MAX_VL_0;
1168 break;
1169 case MLX5_VL_HW_0_1:
1170 *max_vl_num = __IB_MAX_VL_0_1;
1171 break;
1172 case MLX5_VL_HW_0_3:
1173 *max_vl_num = __IB_MAX_VL_0_3;
1174 break;
1175 case MLX5_VL_HW_0_7:
1176 *max_vl_num = __IB_MAX_VL_0_7;
1177 break;
1178 case MLX5_VL_HW_0_14:
1179 *max_vl_num = __IB_MAX_VL_0_14;
1180 break;
1181
1182 default:
1183 return -EINVAL;
1184 }
1185
1186 return 0;
1187}
1188
1189static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1190 struct ib_port_attr *props)
1191{
1192 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1193 struct mlx5_core_dev *mdev = dev->mdev;
1194 struct mlx5_hca_vport_context *rep;
Saeed Mahameed046339e2016-04-22 00:33:03 +03001195 u16 max_mtu;
1196 u16 oper_mtu;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001197 int err;
1198 u8 ib_link_width_oper;
1199 u8 vl_hw_cap;
1200
1201 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1202 if (!rep) {
1203 err = -ENOMEM;
1204 goto out;
1205 }
1206
Or Gerlitzc4550c62017-01-24 13:02:39 +02001207 /* props being zeroed by the caller, avoid zeroing it here */
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001208
1209 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1210 if (err)
1211 goto out;
1212
1213 props->lid = rep->lid;
1214 props->lmc = rep->lmc;
1215 props->sm_lid = rep->sm_lid;
1216 props->sm_sl = rep->sm_sl;
1217 props->state = rep->vport_state;
1218 props->phys_state = rep->port_physical_state;
1219 props->port_cap_flags = rep->cap_mask1;
1220 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1221 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1222 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1223 props->bad_pkey_cntr = rep->pkey_violation_counter;
1224 props->qkey_viol_cntr = rep->qkey_violation_counter;
1225 props->subnet_timeout = rep->subnet_timeout;
1226 props->init_type_reply = rep->init_type_reply;
1227
Michael Guralnik4106a752018-12-09 11:49:51 +02001228 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1229 props->port_cap_flags2 = rep->cap_mask2;
1230
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001231 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1232 if (err)
1233 goto out;
1234
Michael Guralnikdb7a6912018-11-21 15:03:54 +02001235 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1236
Noa Osherovichd5beb7f2016-06-02 10:47:53 +03001237 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001238 if (err)
1239 goto out;
1240
Saeed Mahameedfacc9692015-06-11 14:47:27 +03001241 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001242
1243 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1244
Saeed Mahameedfacc9692015-06-11 14:47:27 +03001245 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001246
1247 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1248
1249 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1250 if (err)
1251 goto out;
1252
1253 err = translate_max_vl_num(ibdev, vl_hw_cap,
1254 &props->max_vl_num);
1255out:
1256 kfree(rep);
Eli Cohene126ba92013-07-07 17:25:49 +03001257 return err;
1258}
1259
1260int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1261 struct ib_port_attr *props)
1262{
Ilan Tayari095b0922017-05-14 16:04:30 +03001263 unsigned int count;
1264 int ret;
1265
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001266 switch (mlx5_get_vport_access_method(ibdev)) {
1267 case MLX5_VPORT_ACCESS_METHOD_MAD:
Ilan Tayari095b0922017-05-14 16:04:30 +03001268 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1269 break;
Eli Cohene126ba92013-07-07 17:25:49 +03001270
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001271 case MLX5_VPORT_ACCESS_METHOD_HCA:
Ilan Tayari095b0922017-05-14 16:04:30 +03001272 ret = mlx5_query_hca_port(ibdev, port, props);
1273 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001274
Achiad Shochat3f89a642015-12-23 18:47:21 +02001275 case MLX5_VPORT_ACCESS_METHOD_NIC:
Ilan Tayari095b0922017-05-14 16:04:30 +03001276 ret = mlx5_query_port_roce(ibdev, port, props);
1277 break;
Achiad Shochat3f89a642015-12-23 18:47:21 +02001278
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001279 default:
Ilan Tayari095b0922017-05-14 16:04:30 +03001280 ret = -EINVAL;
Eli Cohene126ba92013-07-07 17:25:49 +03001281 }
Ilan Tayari095b0922017-05-14 16:04:30 +03001282
1283 if (!ret && props) {
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001284 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1285 struct mlx5_core_dev *mdev;
1286 bool put_mdev = true;
1287
1288 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1289 if (!mdev) {
1290 /* If the port isn't affiliated yet query the master.
1291 * The master and slave will have the same values.
1292 */
1293 mdev = dev->mdev;
1294 port = 1;
1295 put_mdev = false;
1296 }
1297 count = mlx5_core_reserved_gids_count(mdev);
1298 if (put_mdev)
1299 mlx5_ib_put_native_port_mdev(dev, port);
Ilan Tayari095b0922017-05-14 16:04:30 +03001300 props->gid_tbl_len -= count;
1301 }
1302 return ret;
Eli Cohene126ba92013-07-07 17:25:49 +03001303}
1304
Mark Bloch8e6efa32017-11-06 12:22:13 +00001305static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1306 struct ib_port_attr *props)
1307{
1308 int ret;
1309
1310 /* Only link layer == ethernet is valid for representors */
1311 ret = mlx5_query_port_roce(ibdev, port, props);
1312 if (ret || !props)
1313 return ret;
1314
1315 /* We don't support GIDS */
1316 props->gid_tbl_len = 0;
1317
1318 return ret;
1319}
1320
Eli Cohene126ba92013-07-07 17:25:49 +03001321static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1322 union ib_gid *gid)
1323{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001324 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1325 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001326
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001327 switch (mlx5_get_vport_access_method(ibdev)) {
1328 case MLX5_VPORT_ACCESS_METHOD_MAD:
1329 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +03001330
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001331 case MLX5_VPORT_ACCESS_METHOD_HCA:
1332 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +03001333
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001334 default:
1335 return -EINVAL;
1336 }
Eli Cohene126ba92013-07-07 17:25:49 +03001337
Eli Cohene126ba92013-07-07 17:25:49 +03001338}
1339
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001340static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1341 u16 index, u16 *pkey)
1342{
1343 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1344 struct mlx5_core_dev *mdev;
1345 bool put_mdev = true;
1346 u8 mdev_port_num;
1347 int err;
1348
1349 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1350 if (!mdev) {
1351 /* The port isn't affiliated yet, get the PKey from the master
1352 * port. For RoCE the PKey tables will be the same.
1353 */
1354 put_mdev = false;
1355 mdev = dev->mdev;
1356 mdev_port_num = 1;
1357 }
1358
1359 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1360 index, pkey);
1361 if (put_mdev)
1362 mlx5_ib_put_native_port_mdev(dev, port);
1363
1364 return err;
1365}
1366
Eli Cohene126ba92013-07-07 17:25:49 +03001367static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1368 u16 *pkey)
1369{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001370 switch (mlx5_get_vport_access_method(ibdev)) {
1371 case MLX5_VPORT_ACCESS_METHOD_MAD:
1372 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
Eli Cohene126ba92013-07-07 17:25:49 +03001373
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001374 case MLX5_VPORT_ACCESS_METHOD_HCA:
1375 case MLX5_VPORT_ACCESS_METHOD_NIC:
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001376 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001377 default:
1378 return -EINVAL;
1379 }
Eli Cohene126ba92013-07-07 17:25:49 +03001380}
1381
Eli Cohene126ba92013-07-07 17:25:49 +03001382static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1383 struct ib_device_modify *props)
1384{
1385 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1386 struct mlx5_reg_node_desc in;
1387 struct mlx5_reg_node_desc out;
1388 int err;
1389
1390 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1391 return -EOPNOTSUPP;
1392
1393 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1394 return 0;
1395
1396 /*
1397 * If possible, pass node desc to FW, so it can generate
1398 * a 144 trap. If cmd fails, just ignore.
1399 */
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001400 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001401 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
Eli Cohene126ba92013-07-07 17:25:49 +03001402 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1403 if (err)
1404 return err;
1405
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001406 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03001407
1408 return err;
1409}
1410
Eli Cohencdbe33d2017-02-14 07:25:38 +02001411static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1412 u32 value)
1413{
1414 struct mlx5_hca_vport_context ctx = {};
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001415 struct mlx5_core_dev *mdev;
1416 u8 mdev_port_num;
Eli Cohencdbe33d2017-02-14 07:25:38 +02001417 int err;
1418
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001419 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1420 if (!mdev)
1421 return -ENODEV;
1422
1423 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
Eli Cohencdbe33d2017-02-14 07:25:38 +02001424 if (err)
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001425 goto out;
Eli Cohencdbe33d2017-02-14 07:25:38 +02001426
1427 if (~ctx.cap_mask1_perm & mask) {
1428 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1429 mask, ctx.cap_mask1_perm);
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001430 err = -EINVAL;
1431 goto out;
Eli Cohencdbe33d2017-02-14 07:25:38 +02001432 }
1433
1434 ctx.cap_mask1 = value;
1435 ctx.cap_mask1_perm = mask;
Daniel Jurgensb3cbd6f2018-01-04 17:25:38 +02001436 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1437 0, &ctx);
1438
1439out:
1440 mlx5_ib_put_native_port_mdev(dev, port_num);
Eli Cohencdbe33d2017-02-14 07:25:38 +02001441
1442 return err;
1443}
1444
Eli Cohene126ba92013-07-07 17:25:49 +03001445static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1446 struct ib_port_modify *props)
1447{
1448 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1449 struct ib_port_attr attr;
1450 u32 tmp;
1451 int err;
Eli Cohencdbe33d2017-02-14 07:25:38 +02001452 u32 change_mask;
1453 u32 value;
1454 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1455 IB_LINK_LAYER_INFINIBAND);
1456
Majd Dibbinyec255872017-08-23 08:35:42 +03001457 /* CM layer calls ib_modify_port() regardless of the link layer. For
1458 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1459 */
1460 if (!is_ib)
1461 return 0;
1462
Eli Cohencdbe33d2017-02-14 07:25:38 +02001463 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1464 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1465 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1466 return set_port_caps_atomic(dev, port, change_mask, value);
1467 }
Eli Cohene126ba92013-07-07 17:25:49 +03001468
1469 mutex_lock(&dev->cap_mask_mutex);
1470
Or Gerlitzc4550c62017-01-24 13:02:39 +02001471 err = ib_query_port(ibdev, port, &attr);
Eli Cohene126ba92013-07-07 17:25:49 +03001472 if (err)
1473 goto out;
1474
1475 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1476 ~props->clr_port_cap_mask;
1477
Jack Morgenstein9603b612014-07-28 23:30:22 +03001478 err = mlx5_set_port_caps(dev->mdev, port, tmp);
Eli Cohene126ba92013-07-07 17:25:49 +03001479
1480out:
1481 mutex_unlock(&dev->cap_mask_mutex);
1482 return err;
1483}
1484
Eli Cohen30aa60b2017-01-03 23:55:27 +02001485static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1486{
1487 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1488 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1489}
1490
Yishai Hadas31a78a52017-12-24 16:31:34 +02001491static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1492{
1493 /* Large page with non 4k uar support might limit the dynamic size */
1494 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
1495 return MLX5_MIN_DYN_BFREGS;
1496
1497 return MLX5_MAX_DYN_BFREGS;
1498}
1499
Eli Cohenb037c292017-01-03 23:55:26 +02001500static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1501 struct mlx5_ib_alloc_ucontext_req_v2 *req,
Yishai Hadas31a78a52017-12-24 16:31:34 +02001502 struct mlx5_bfreg_info *bfregi)
Eli Cohenb037c292017-01-03 23:55:26 +02001503{
1504 int uars_per_sys_page;
1505 int bfregs_per_sys_page;
1506 int ref_bfregs = req->total_num_bfregs;
1507
1508 if (req->total_num_bfregs == 0)
1509 return -EINVAL;
1510
1511 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1512 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1513
1514 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1515 return -ENOMEM;
1516
1517 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1518 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
Yishai Hadas31a78a52017-12-24 16:31:34 +02001519 /* This holds the required static allocation asked by the user */
Eli Cohenb037c292017-01-03 23:55:26 +02001520 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
Eli Cohenb037c292017-01-03 23:55:26 +02001521 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1522 return -EINVAL;
1523
Yishai Hadas31a78a52017-12-24 16:31:34 +02001524 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1525 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1526 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1527 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1528
1529 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
Eli Cohenb037c292017-01-03 23:55:26 +02001530 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1531 lib_uar_4k ? "yes" : "no", ref_bfregs,
Yishai Hadas31a78a52017-12-24 16:31:34 +02001532 req->total_num_bfregs, bfregi->total_num_bfregs,
1533 bfregi->num_sys_pages);
Eli Cohenb037c292017-01-03 23:55:26 +02001534
1535 return 0;
1536}
1537
1538static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1539{
1540 struct mlx5_bfreg_info *bfregi;
1541 int err;
1542 int i;
1543
1544 bfregi = &context->bfregi;
Yishai Hadas31a78a52017-12-24 16:31:34 +02001545 for (i = 0; i < bfregi->num_static_sys_pages; i++) {
Eli Cohenb037c292017-01-03 23:55:26 +02001546 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1547 if (err)
1548 goto error;
1549
1550 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1551 }
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001552
1553 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1554 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1555
Eli Cohenb037c292017-01-03 23:55:26 +02001556 return 0;
1557
1558error:
1559 for (--i; i >= 0; i--)
1560 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1561 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1562
1563 return err;
1564}
1565
Leon Romanovsky15177992018-06-27 10:44:24 +03001566static void deallocate_uars(struct mlx5_ib_dev *dev,
1567 struct mlx5_ib_ucontext *context)
Eli Cohenb037c292017-01-03 23:55:26 +02001568{
1569 struct mlx5_bfreg_info *bfregi;
Eli Cohenb037c292017-01-03 23:55:26 +02001570 int i;
1571
1572 bfregi = &context->bfregi;
Leon Romanovsky15177992018-06-27 10:44:24 +03001573 for (i = 0; i < bfregi->num_sys_pages; i++)
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001574 if (i < bfregi->num_static_sys_pages ||
Leon Romanovsky15177992018-06-27 10:44:24 +03001575 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1576 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
Eli Cohenb037c292017-01-03 23:55:26 +02001577}
1578
Mark Bloch0042f9e2018-09-17 13:30:49 +03001579int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
Mark Blocha560f1d2018-09-17 13:30:47 +03001580{
1581 int err = 0;
1582
1583 mutex_lock(&dev->lb.mutex);
Mark Bloch0042f9e2018-09-17 13:30:49 +03001584 if (td)
1585 dev->lb.user_td++;
1586 if (qp)
1587 dev->lb.qps++;
Mark Blocha560f1d2018-09-17 13:30:47 +03001588
Mark Bloch0042f9e2018-09-17 13:30:49 +03001589 if (dev->lb.user_td == 2 ||
1590 dev->lb.qps == 1) {
1591 if (!dev->lb.enabled) {
1592 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1593 dev->lb.enabled = true;
1594 }
1595 }
Mark Blocha560f1d2018-09-17 13:30:47 +03001596
1597 mutex_unlock(&dev->lb.mutex);
1598
1599 return err;
1600}
1601
Mark Bloch0042f9e2018-09-17 13:30:49 +03001602void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
Mark Blocha560f1d2018-09-17 13:30:47 +03001603{
1604 mutex_lock(&dev->lb.mutex);
Mark Bloch0042f9e2018-09-17 13:30:49 +03001605 if (td)
1606 dev->lb.user_td--;
1607 if (qp)
1608 dev->lb.qps--;
Mark Blocha560f1d2018-09-17 13:30:47 +03001609
Mark Bloch0042f9e2018-09-17 13:30:49 +03001610 if (dev->lb.user_td == 1 &&
1611 dev->lb.qps == 0) {
1612 if (dev->lb.enabled) {
1613 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1614 dev->lb.enabled = false;
1615 }
1616 }
Mark Blocha560f1d2018-09-17 13:30:47 +03001617
1618 mutex_unlock(&dev->lb.mutex);
1619}
1620
Yishai Hadasd2d19122018-09-20 21:39:32 +03001621static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1622 u16 uid)
Huy Nguyenc85023e2017-05-30 09:42:54 +03001623{
1624 int err;
1625
Leon Romanovskycfdeb892018-06-19 10:39:06 +03001626 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1627 return 0;
1628
Yishai Hadasd2d19122018-09-20 21:39:32 +03001629 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
Huy Nguyenc85023e2017-05-30 09:42:54 +03001630 if (err)
1631 return err;
1632
1633 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
Eran Ben Elisha8978cc92018-01-09 11:41:10 +02001634 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1635 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
Huy Nguyenc85023e2017-05-30 09:42:54 +03001636 return err;
1637
Mark Bloch0042f9e2018-09-17 13:30:49 +03001638 return mlx5_ib_enable_lb(dev, true, false);
Huy Nguyenc85023e2017-05-30 09:42:54 +03001639}
1640
Yishai Hadasd2d19122018-09-20 21:39:32 +03001641static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1642 u16 uid)
Huy Nguyenc85023e2017-05-30 09:42:54 +03001643{
Leon Romanovskycfdeb892018-06-19 10:39:06 +03001644 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1645 return;
1646
Yishai Hadasd2d19122018-09-20 21:39:32 +03001647 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
Huy Nguyenc85023e2017-05-30 09:42:54 +03001648
1649 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
Eran Ben Elisha8978cc92018-01-09 11:41:10 +02001650 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1651 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
Huy Nguyenc85023e2017-05-30 09:42:54 +03001652 return;
1653
Mark Bloch0042f9e2018-09-17 13:30:49 +03001654 mlx5_ib_disable_lb(dev, true, false);
Huy Nguyenc85023e2017-05-30 09:42:54 +03001655}
1656
Eli Cohene126ba92013-07-07 17:25:49 +03001657static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1658 struct ib_udata *udata)
1659{
1660 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Matan Barakb368d7c2015-12-15 20:30:12 +02001661 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1662 struct mlx5_ib_alloc_ucontext_resp resp = {};
Feras Daoud5c99eae2018-01-16 20:08:41 +02001663 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001664 struct mlx5_ib_ucontext *context;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001665 struct mlx5_bfreg_info *bfregi;
Eli Cohen78c0f982014-01-30 13:49:48 +02001666 int ver;
Eli Cohene126ba92013-07-07 17:25:49 +03001667 int err;
Majd Dibbinya168a41c2016-01-28 17:51:47 +02001668 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1669 max_cqe_version);
Yonatan Cohen25bb36e2018-06-19 08:47:24 +03001670 u32 dump_fill_mkey;
Eli Cohenb037c292017-01-03 23:55:26 +02001671 bool lib_uar_4k;
Eli Cohene126ba92013-07-07 17:25:49 +03001672
1673 if (!dev->ib_active)
1674 return ERR_PTR(-EAGAIN);
1675
Amrani, Rame0931112017-06-27 17:04:42 +03001676 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
Eli Cohen78c0f982014-01-30 13:49:48 +02001677 ver = 0;
Amrani, Rame0931112017-06-27 17:04:42 +03001678 else if (udata->inlen >= min_req_v2)
Eli Cohen78c0f982014-01-30 13:49:48 +02001679 ver = 2;
1680 else
1681 return ERR_PTR(-EINVAL);
1682
Amrani, Rame0931112017-06-27 17:04:42 +03001683 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
Eli Cohene126ba92013-07-07 17:25:49 +03001684 if (err)
1685 return ERR_PTR(err);
1686
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001687 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1688 return ERR_PTR(-EOPNOTSUPP);
Eli Cohen78c0f982014-01-30 13:49:48 +02001689
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001690 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
Matan Barakb368d7c2015-12-15 20:30:12 +02001691 return ERR_PTR(-EOPNOTSUPP);
1692
Eli Cohen2f5ff262017-01-03 23:55:21 +02001693 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1694 MLX5_NON_FP_BFREGS_PER_UAR);
1695 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
Eli Cohene126ba92013-07-07 17:25:49 +03001696 return ERR_PTR(-EINVAL);
1697
Saeed Mahameed938fe832015-05-28 22:28:41 +03001698 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
Noa Osherovich2cc6ad52016-06-04 15:15:33 +03001699 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1700 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
Daniel Jurgensb47bd6e2016-10-25 18:36:24 +03001701 resp.cache_line_size = cache_line_size();
Saeed Mahameed938fe832015-05-28 22:28:41 +03001702 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1703 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1704 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1705 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1706 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001707 resp.cqe_version = min_t(__u8,
1708 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1709 req.max_cqe_version);
Eli Cohen30aa60b2017-01-03 23:55:27 +02001710 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1711 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1712 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1713 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
Matan Barakb368d7c2015-12-15 20:30:12 +02001714 resp.response_length = min(offsetof(typeof(resp), response_length) +
1715 sizeof(resp.response_length), udata->outlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001716
Matan Barakc03faa52018-03-28 09:27:54 +03001717 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1718 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1719 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1720 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1721 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1722 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1723 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1724 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1725 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1726 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1727 }
1728
Eli Cohene126ba92013-07-07 17:25:49 +03001729 context = kzalloc(sizeof(*context), GFP_KERNEL);
1730 if (!context)
1731 return ERR_PTR(-ENOMEM);
1732
Eli Cohen30aa60b2017-01-03 23:55:27 +02001733 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001734 bfregi = &context->bfregi;
Eli Cohenb037c292017-01-03 23:55:26 +02001735
1736 /* updates req->total_num_bfregs */
Yishai Hadas31a78a52017-12-24 16:31:34 +02001737 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
Eli Cohenb037c292017-01-03 23:55:26 +02001738 if (err)
1739 goto out_ctx;
1740
Eli Cohen2f5ff262017-01-03 23:55:21 +02001741 mutex_init(&bfregi->lock);
Eli Cohenb037c292017-01-03 23:55:26 +02001742 bfregi->lib_uar_4k = lib_uar_4k;
Yishai Hadas31a78a52017-12-24 16:31:34 +02001743 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
Eli Cohenb037c292017-01-03 23:55:26 +02001744 GFP_KERNEL);
1745 if (!bfregi->count) {
Eli Cohene126ba92013-07-07 17:25:49 +03001746 err = -ENOMEM;
1747 goto out_ctx;
1748 }
1749
Eli Cohenb037c292017-01-03 23:55:26 +02001750 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1751 sizeof(*bfregi->sys_pages),
1752 GFP_KERNEL);
1753 if (!bfregi->sys_pages) {
Eli Cohene126ba92013-07-07 17:25:49 +03001754 err = -ENOMEM;
Eli Cohenb037c292017-01-03 23:55:26 +02001755 goto out_count;
Eli Cohene126ba92013-07-07 17:25:49 +03001756 }
1757
Eli Cohenb037c292017-01-03 23:55:26 +02001758 err = allocate_uars(dev, context);
1759 if (err)
1760 goto out_sys_pages;
Eli Cohene126ba92013-07-07 17:25:49 +03001761
Haggai Eranb4cfe442014-12-11 17:04:26 +02001762#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1763 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1764#endif
1765
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001766 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
Yishai Hadasfb981532018-11-26 08:28:36 +02001767 err = mlx5_ib_devx_create(dev, true);
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001768 if (err < 0)
Yishai Hadasd2d19122018-09-20 21:39:32 +03001769 goto out_uars;
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001770 context->devx_uid = err;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001771 }
1772
Yishai Hadasd2d19122018-09-20 21:39:32 +03001773 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1774 context->devx_uid);
1775 if (err)
1776 goto out_devx;
1777
Yonatan Cohen25bb36e2018-06-19 08:47:24 +03001778 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1779 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1780 if (err)
Jason Gunthorpe8193abb2018-07-04 13:19:46 -06001781 goto out_mdev;
Yonatan Cohen25bb36e2018-06-19 08:47:24 +03001782 }
1783
Eli Cohene126ba92013-07-07 17:25:49 +03001784 INIT_LIST_HEAD(&context->db_page_list);
1785 mutex_init(&context->db_page_mutex);
1786
Eli Cohen2f5ff262017-01-03 23:55:21 +02001787 resp.tot_bfregs = req.total_num_bfregs;
Daniel Jurgens508562d2018-01-04 17:25:34 +02001788 resp.num_ports = dev->num_ports;
Matan Barakb368d7c2015-12-15 20:30:12 +02001789
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001790 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1791 resp.response_length += sizeof(resp.cqe_version);
Matan Barakb368d7c2015-12-15 20:30:12 +02001792
Bodong Wang402ca532016-06-17 15:02:20 +03001793 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
Moni Shoua6ad279c52016-11-23 08:23:23 +02001794 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1795 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
Bodong Wang402ca532016-06-17 15:02:20 +03001796 resp.response_length += sizeof(resp.cmds_supp_uhw);
1797 }
1798
Or Gerlitz78984892016-11-30 20:33:33 +02001799 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1800 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1801 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1802 resp.eth_min_inline++;
1803 }
1804 resp.response_length += sizeof(resp.eth_min_inline);
1805 }
1806
Feras Daoud5c99eae2018-01-16 20:08:41 +02001807 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1808 if (mdev->clock_info)
1809 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1810 resp.response_length += sizeof(resp.clock_info_versions);
1811 }
1812
Noa Osherovichbc5c6ee2016-06-04 15:15:31 +03001813 /*
1814 * We don't want to expose information from the PCI bar that is located
1815 * after 4096 bytes, so if the arch only supports larger pages, let's
1816 * pretend we don't support reading the HCA's core clock. This is also
1817 * forced by mmap function.
1818 */
Eli Cohende8d6e02017-01-03 23:55:19 +02001819 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1820 if (PAGE_SIZE <= 4096) {
1821 resp.comp_mask |=
1822 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1823 resp.hca_core_clock_offset =
1824 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1825 }
Feras Daoud5c99eae2018-01-16 20:08:41 +02001826 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barakb368d7c2015-12-15 20:30:12 +02001827 }
1828
Eli Cohen30aa60b2017-01-03 23:55:27 +02001829 if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1830 resp.response_length += sizeof(resp.log_uar_size);
1831
1832 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1833 resp.response_length += sizeof(resp.num_uars_per_page);
1834
Yishai Hadas31a78a52017-12-24 16:31:34 +02001835 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1836 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1837 resp.response_length += sizeof(resp.num_dyn_bfregs);
1838 }
1839
Yonatan Cohen25bb36e2018-06-19 08:47:24 +03001840 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1841 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1842 resp.dump_fill_mkey = dump_fill_mkey;
1843 resp.comp_mask |=
1844 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1845 }
1846 resp.response_length += sizeof(resp.dump_fill_mkey);
1847 }
1848
Matan Barakb368d7c2015-12-15 20:30:12 +02001849 err = ib_copy_to_udata(udata, &resp, resp.response_length);
Eli Cohene126ba92013-07-07 17:25:49 +03001850 if (err)
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001851 goto out_mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001852
Eli Cohen2f5ff262017-01-03 23:55:21 +02001853 bfregi->ver = ver;
1854 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001855 context->cqe_version = resp.cqe_version;
Eli Cohen30aa60b2017-01-03 23:55:27 +02001856 context->lib_caps = req.lib_caps;
1857 print_lib_caps(dev, context->lib_caps);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001858
Majd Dibbinyc6a21c32018-08-28 14:29:05 +03001859 if (mlx5_lag_is_active(dev->mdev)) {
1860 u8 port = mlx5_core_native_port_num(dev->mdev);
1861
1862 atomic_set(&context->tx_port_affinity,
1863 atomic_add_return(
1864 1, &dev->roce[port].tx_port_affinity));
1865 }
1866
Eli Cohene126ba92013-07-07 17:25:49 +03001867 return &context->ibucontext;
1868
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001869out_mdev:
Yishai Hadasd2d19122018-09-20 21:39:32 +03001870 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1871out_devx:
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001872 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001873 mlx5_ib_devx_destroy(dev, context->devx_uid);
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001874
Eli Cohene126ba92013-07-07 17:25:49 +03001875out_uars:
Eli Cohenb037c292017-01-03 23:55:26 +02001876 deallocate_uars(dev, context);
1877
1878out_sys_pages:
1879 kfree(bfregi->sys_pages);
1880
Eli Cohene126ba92013-07-07 17:25:49 +03001881out_count:
Eli Cohen2f5ff262017-01-03 23:55:21 +02001882 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001883
Eli Cohene126ba92013-07-07 17:25:49 +03001884out_ctx:
1885 kfree(context);
Eli Cohenb037c292017-01-03 23:55:26 +02001886
Eli Cohene126ba92013-07-07 17:25:49 +03001887 return ERR_PTR(err);
1888}
1889
1890static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1891{
1892 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1893 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohenb037c292017-01-03 23:55:26 +02001894 struct mlx5_bfreg_info *bfregi;
Eli Cohene126ba92013-07-07 17:25:49 +03001895
Jason Gunthorpef27a0d52018-09-16 20:48:08 +03001896#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1897 /* All umem's must be destroyed before destroying the ucontext. */
1898 mutex_lock(&ibcontext->per_mm_list_lock);
1899 WARN_ON(!list_empty(&ibcontext->per_mm_list));
1900 mutex_unlock(&ibcontext->per_mm_list_lock);
1901#endif
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001902
Eli Cohenb037c292017-01-03 23:55:26 +02001903 bfregi = &context->bfregi;
Yishai Hadasd2d19122018-09-20 21:39:32 +03001904 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1905
Eli Cohenb037c292017-01-03 23:55:26 +02001906 if (context->devx_uid)
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001907 mlx5_ib_devx_destroy(dev, context->devx_uid);
Eli Cohene126ba92013-07-07 17:25:49 +03001908
1909 deallocate_uars(dev, context);
Eli Cohen2f5ff262017-01-03 23:55:21 +02001910 kfree(bfregi->sys_pages);
1911 kfree(bfregi->count);
1912 kfree(context);
Eli Cohene126ba92013-07-07 17:25:49 +03001913
1914 return 0;
1915}
1916
1917static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
1918 int uar_idx)
1919{
Eli Cohenb037c292017-01-03 23:55:26 +02001920 int fw_uars_per_page;
1921
1922 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
1923
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001924 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
Eli Cohene126ba92013-07-07 17:25:49 +03001925}
1926
1927static int get_command(unsigned long offset)
1928{
1929 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1930}
1931
1932static int get_arg(unsigned long offset)
1933{
1934 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1935}
1936
1937static int get_index(unsigned long offset)
1938{
1939 return get_arg(offset);
1940}
1941
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001942/* Index resides in an extra byte to enable larger values than 255 */
1943static int get_extended_index(unsigned long offset)
1944{
1945 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1946}
1947
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001948
1949static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1950{
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001951}
1952
Guy Levi37aa5c32016-04-27 16:49:50 +03001953static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1954{
1955 switch (cmd) {
1956 case MLX5_IB_MMAP_WC_PAGE:
1957 return "WC";
1958 case MLX5_IB_MMAP_REGULAR_PAGE:
1959 return "best effort WC";
1960 case MLX5_IB_MMAP_NC_PAGE:
1961 return "NC";
Ariel Levkovich24da0012018-04-05 18:53:27 +03001962 case MLX5_IB_MMAP_DEVICE_MEM:
1963 return "Device Memory";
Guy Levi37aa5c32016-04-27 16:49:50 +03001964 default:
1965 return NULL;
1966 }
1967}
1968
Feras Daoud5c99eae2018-01-16 20:08:41 +02001969static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
1970 struct vm_area_struct *vma,
1971 struct mlx5_ib_ucontext *context)
1972{
Feras Daoud5c99eae2018-01-16 20:08:41 +02001973 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1974 return -EINVAL;
1975
1976 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
1977 return -EOPNOTSUPP;
1978
1979 if (vma->vm_flags & VM_WRITE)
1980 return -EPERM;
1981
1982 if (!dev->mdev->clock_info_page)
1983 return -EOPNOTSUPP;
1984
Jason Gunthorpee2cd1d12018-09-16 20:43:10 +03001985 return rdma_user_mmap_page(&context->ibucontext, vma,
1986 dev->mdev->clock_info_page, PAGE_SIZE);
Feras Daoud5c99eae2018-01-16 20:08:41 +02001987}
1988
Guy Levi37aa5c32016-04-27 16:49:50 +03001989static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001990 struct vm_area_struct *vma,
1991 struct mlx5_ib_ucontext *context)
Guy Levi37aa5c32016-04-27 16:49:50 +03001992{
Eli Cohen2f5ff262017-01-03 23:55:21 +02001993 struct mlx5_bfreg_info *bfregi = &context->bfregi;
Guy Levi37aa5c32016-04-27 16:49:50 +03001994 int err;
1995 unsigned long idx;
Kamal Heibaa09ea62018-07-19 00:05:32 +03001996 phys_addr_t pfn;
Guy Levi37aa5c32016-04-27 16:49:50 +03001997 pgprot_t prot;
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001998 u32 bfreg_dyn_idx = 0;
1999 u32 uar_index;
2000 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2001 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2002 bfregi->num_static_sys_pages;
Eli Cohenb037c292017-01-03 23:55:26 +02002003
2004 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2005 return -EINVAL;
2006
Yishai Hadas4ed131d2017-12-24 16:31:35 +02002007 if (dyn_uar)
2008 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2009 else
2010 idx = get_index(vma->vm_pgoff);
2011
2012 if (idx >= max_valid_idx) {
2013 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2014 idx, max_valid_idx);
Eli Cohenb037c292017-01-03 23:55:26 +02002015 return -EINVAL;
2016 }
Guy Levi37aa5c32016-04-27 16:49:50 +03002017
2018 switch (cmd) {
2019 case MLX5_IB_MMAP_WC_PAGE:
Yishai Hadas4ed131d2017-12-24 16:31:35 +02002020 case MLX5_IB_MMAP_ALLOC_WC:
Guy Levi37aa5c32016-04-27 16:49:50 +03002021/* Some architectures don't support WC memory */
2022#if defined(CONFIG_X86)
2023 if (!pat_enabled())
2024 return -EPERM;
2025#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2026 return -EPERM;
2027#endif
2028 /* fall through */
2029 case MLX5_IB_MMAP_REGULAR_PAGE:
2030 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2031 prot = pgprot_writecombine(vma->vm_page_prot);
2032 break;
2033 case MLX5_IB_MMAP_NC_PAGE:
2034 prot = pgprot_noncached(vma->vm_page_prot);
2035 break;
2036 default:
2037 return -EINVAL;
2038 }
2039
Yishai Hadas4ed131d2017-12-24 16:31:35 +02002040 if (dyn_uar) {
2041 int uars_per_page;
2042
2043 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2044 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2045 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2046 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2047 bfreg_dyn_idx, bfregi->total_num_bfregs);
2048 return -EINVAL;
2049 }
2050
2051 mutex_lock(&bfregi->lock);
2052 /* Fail if uar already allocated, first bfreg index of each
2053 * page holds its count.
2054 */
2055 if (bfregi->count[bfreg_dyn_idx]) {
2056 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2057 mutex_unlock(&bfregi->lock);
2058 return -EINVAL;
2059 }
2060
2061 bfregi->count[bfreg_dyn_idx]++;
2062 mutex_unlock(&bfregi->lock);
2063
2064 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2065 if (err) {
2066 mlx5_ib_warn(dev, "UAR alloc failed\n");
2067 goto free_bfreg;
2068 }
2069 } else {
2070 uar_index = bfregi->sys_pages[idx];
2071 }
2072
2073 pfn = uar_index2pfn(dev, uar_index);
Guy Levi37aa5c32016-04-27 16:49:50 +03002074 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2075
Jason Gunthorpee2cd1d12018-09-16 20:43:10 +03002076 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2077 prot);
Guy Levi37aa5c32016-04-27 16:49:50 +03002078 if (err) {
Leon Romanovsky8f062282018-05-22 08:31:03 +03002079 mlx5_ib_err(dev,
Jason Gunthorpee2cd1d12018-09-16 20:43:10 +03002080 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
Leon Romanovsky8f062282018-05-22 08:31:03 +03002081 err, mmap_cmd2str(cmd));
Yishai Hadas4ed131d2017-12-24 16:31:35 +02002082 goto err;
Guy Levi37aa5c32016-04-27 16:49:50 +03002083 }
2084
Yishai Hadas4ed131d2017-12-24 16:31:35 +02002085 if (dyn_uar)
2086 bfregi->sys_pages[idx] = uar_index;
2087 return 0;
2088
2089err:
2090 if (!dyn_uar)
2091 return err;
2092
2093 mlx5_cmd_free_uar(dev->mdev, idx);
2094
2095free_bfreg:
2096 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2097
2098 return err;
Guy Levi37aa5c32016-04-27 16:49:50 +03002099}
2100
Ariel Levkovich24da0012018-04-05 18:53:27 +03002101static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2102{
2103 struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2104 struct mlx5_ib_dev *dev = to_mdev(context->device);
2105 u16 page_idx = get_extended_index(vma->vm_pgoff);
2106 size_t map_size = vma->vm_end - vma->vm_start;
2107 u32 npages = map_size >> PAGE_SHIFT;
2108 phys_addr_t pfn;
Ariel Levkovich24da0012018-04-05 18:53:27 +03002109
2110 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2111 page_idx + npages)
2112 return -EINVAL;
2113
2114 pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2115 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2116 PAGE_SHIFT) +
2117 page_idx;
Jason Gunthorpee2cd1d12018-09-16 20:43:10 +03002118 return rdma_user_mmap_io(context, vma, pfn, map_size,
2119 pgprot_writecombine(vma->vm_page_prot));
Ariel Levkovich24da0012018-04-05 18:53:27 +03002120}
2121
Eli Cohene126ba92013-07-07 17:25:49 +03002122static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2123{
2124 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2125 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohene126ba92013-07-07 17:25:49 +03002126 unsigned long command;
Eli Cohene126ba92013-07-07 17:25:49 +03002127 phys_addr_t pfn;
2128
2129 command = get_command(vma->vm_pgoff);
2130 switch (command) {
Guy Levi37aa5c32016-04-27 16:49:50 +03002131 case MLX5_IB_MMAP_WC_PAGE:
2132 case MLX5_IB_MMAP_NC_PAGE:
Eli Cohene126ba92013-07-07 17:25:49 +03002133 case MLX5_IB_MMAP_REGULAR_PAGE:
Yishai Hadas4ed131d2017-12-24 16:31:35 +02002134 case MLX5_IB_MMAP_ALLOC_WC:
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03002135 return uar_mmap(dev, command, vma, context);
Eli Cohene126ba92013-07-07 17:25:49 +03002136
2137 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2138 return -ENOSYS;
2139
Matan Barakd69e3bc2015-12-15 20:30:13 +02002140 case MLX5_IB_MMAP_CORE_CLOCK:
Matan Barakd69e3bc2015-12-15 20:30:13 +02002141 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2142 return -EINVAL;
2143
Matan Barak6cbac1e2016-04-14 16:52:10 +03002144 if (vma->vm_flags & VM_WRITE)
Matan Barakd69e3bc2015-12-15 20:30:13 +02002145 return -EPERM;
2146
2147 /* Don't expose to user-space information it shouldn't have */
2148 if (PAGE_SIZE > 4096)
2149 return -EOPNOTSUPP;
2150
2151 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2152 pfn = (dev->mdev->iseg_base +
2153 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2154 PAGE_SHIFT;
2155 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
2156 PAGE_SIZE, vma->vm_page_prot))
2157 return -EAGAIN;
Matan Barakd69e3bc2015-12-15 20:30:13 +02002158 break;
Feras Daoud5c99eae2018-01-16 20:08:41 +02002159 case MLX5_IB_MMAP_CLOCK_INFO:
2160 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
Matan Barakd69e3bc2015-12-15 20:30:13 +02002161
Ariel Levkovich24da0012018-04-05 18:53:27 +03002162 case MLX5_IB_MMAP_DEVICE_MEM:
2163 return dm_mmap(ibcontext, vma);
2164
Eli Cohene126ba92013-07-07 17:25:49 +03002165 default:
2166 return -EINVAL;
2167 }
2168
2169 return 0;
2170}
2171
Ariel Levkovich24da0012018-04-05 18:53:27 +03002172struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2173 struct ib_ucontext *context,
2174 struct ib_dm_alloc_attr *attr,
2175 struct uverbs_attr_bundle *attrs)
2176{
2177 u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2178 struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
2179 phys_addr_t memic_addr;
2180 struct mlx5_ib_dm *dm;
2181 u64 start_offset;
2182 u32 page_idx;
2183 int err;
2184
2185 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2186 if (!dm)
2187 return ERR_PTR(-ENOMEM);
2188
2189 mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
2190 attr->length, act_size, attr->alignment);
2191
2192 err = mlx5_cmd_alloc_memic(memic, &memic_addr,
2193 act_size, attr->alignment);
2194 if (err)
2195 goto err_free;
2196
2197 start_offset = memic_addr & ~PAGE_MASK;
2198 page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
2199 MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2200 PAGE_SHIFT;
2201
2202 err = uverbs_copy_to(attrs,
2203 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2204 &start_offset, sizeof(start_offset));
2205 if (err)
2206 goto err_dealloc;
2207
2208 err = uverbs_copy_to(attrs,
2209 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2210 &page_idx, sizeof(page_idx));
2211 if (err)
2212 goto err_dealloc;
2213
2214 bitmap_set(to_mucontext(context)->dm_pages, page_idx,
2215 DIV_ROUND_UP(act_size, PAGE_SIZE));
2216
2217 dm->dev_addr = memic_addr;
2218
2219 return &dm->ibdm;
2220
2221err_dealloc:
2222 mlx5_cmd_dealloc_memic(memic, memic_addr,
2223 act_size);
2224err_free:
2225 kfree(dm);
2226 return ERR_PTR(err);
2227}
2228
2229int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
2230{
2231 struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
2232 struct mlx5_ib_dm *dm = to_mdm(ibdm);
2233 u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
2234 u32 page_idx;
2235 int ret;
2236
2237 ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
2238 if (ret)
2239 return ret;
2240
2241 page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
2242 MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2243 PAGE_SHIFT;
2244 bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
2245 page_idx,
2246 DIV_ROUND_UP(act_size, PAGE_SIZE));
2247
2248 kfree(dm);
2249
2250 return 0;
2251}
2252
Eli Cohene126ba92013-07-07 17:25:49 +03002253static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
2254 struct ib_ucontext *context,
2255 struct ib_udata *udata)
2256{
2257 struct mlx5_ib_alloc_pd_resp resp;
2258 struct mlx5_ib_pd *pd;
2259 int err;
Yishai Hadasa1069c12018-09-20 21:39:19 +03002260 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2261 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2262 u16 uid = 0;
Eli Cohene126ba92013-07-07 17:25:49 +03002263
2264 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
2265 if (!pd)
2266 return ERR_PTR(-ENOMEM);
2267
Yishai Hadas58895f02018-09-20 21:39:29 +03002268 uid = context ? to_mucontext(context)->devx_uid : 0;
Yishai Hadasa1069c12018-09-20 21:39:19 +03002269 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2270 MLX5_SET(alloc_pd_in, in, uid, uid);
2271 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2272 out, sizeof(out));
Eli Cohene126ba92013-07-07 17:25:49 +03002273 if (err) {
2274 kfree(pd);
2275 return ERR_PTR(err);
2276 }
2277
Yishai Hadasa1069c12018-09-20 21:39:19 +03002278 pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2279 pd->uid = uid;
Eli Cohene126ba92013-07-07 17:25:49 +03002280 if (context) {
2281 resp.pdn = pd->pdn;
2282 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
Yishai Hadasa1069c12018-09-20 21:39:19 +03002283 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
Eli Cohene126ba92013-07-07 17:25:49 +03002284 kfree(pd);
2285 return ERR_PTR(-EFAULT);
2286 }
Eli Cohene126ba92013-07-07 17:25:49 +03002287 }
2288
2289 return &pd->ibpd;
2290}
2291
2292static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
2293{
2294 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2295 struct mlx5_ib_pd *mpd = to_mpd(pd);
2296
Yishai Hadasa1069c12018-09-20 21:39:19 +03002297 mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
Eli Cohene126ba92013-07-07 17:25:49 +03002298 kfree(mpd);
2299
2300 return 0;
2301}
2302
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002303enum {
2304 MATCH_CRITERIA_ENABLE_OUTER_BIT,
2305 MATCH_CRITERIA_ENABLE_MISC_BIT,
Ariel Levkovich71c6e862018-05-13 14:33:34 +03002306 MATCH_CRITERIA_ENABLE_INNER_BIT,
2307 MATCH_CRITERIA_ENABLE_MISC2_BIT
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002308};
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002309
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002310#define HEADER_IS_ZERO(match_criteria, headers) \
2311 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2312 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2313
2314static u8 get_match_criteria_enable(u32 *match_criteria)
2315{
2316 u8 match_criteria_enable;
2317
2318 match_criteria_enable =
2319 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2320 MATCH_CRITERIA_ENABLE_OUTER_BIT;
2321 match_criteria_enable |=
2322 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2323 MATCH_CRITERIA_ENABLE_MISC_BIT;
2324 match_criteria_enable |=
2325 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2326 MATCH_CRITERIA_ENABLE_INNER_BIT;
Ariel Levkovich71c6e862018-05-13 14:33:34 +03002327 match_criteria_enable |=
2328 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2329 MATCH_CRITERIA_ENABLE_MISC2_BIT;
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002330
2331 return match_criteria_enable;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002332}
2333
Maor Gottliebca0d4752016-08-30 16:58:35 +03002334static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2335{
2336 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2337 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2338}
2339
Daria Velikovsky37da2a02018-05-07 10:20:02 +03002340static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
Moses Reuben2d1e6972016-11-14 19:04:52 +02002341 bool inner)
2342{
2343 if (inner) {
2344 MLX5_SET(fte_match_set_misc,
2345 misc_c, inner_ipv6_flow_label, mask);
2346 MLX5_SET(fte_match_set_misc,
2347 misc_v, inner_ipv6_flow_label, val);
2348 } else {
2349 MLX5_SET(fte_match_set_misc,
2350 misc_c, outer_ipv6_flow_label, mask);
2351 MLX5_SET(fte_match_set_misc,
2352 misc_v, outer_ipv6_flow_label, val);
2353 }
2354}
2355
Maor Gottliebca0d4752016-08-30 16:58:35 +03002356static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2357{
2358 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2359 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2360 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2361 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2362}
2363
Ariel Levkovich71c6e862018-05-13 14:33:34 +03002364static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2365{
2366 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2367 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2368 return -EOPNOTSUPP;
2369
2370 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2371 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2372 return -EOPNOTSUPP;
2373
2374 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2375 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2376 return -EOPNOTSUPP;
2377
2378 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2379 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2380 return -EOPNOTSUPP;
2381
2382 return 0;
2383}
2384
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002385#define LAST_ETH_FIELD vlan_tag
2386#define LAST_IB_FIELD sl
Maor Gottliebca0d4752016-08-30 16:58:35 +03002387#define LAST_IPV4_FIELD tos
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002388#define LAST_IPV6_FIELD traffic_class
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002389#define LAST_TCP_UDP_FIELD src_port
Moses Reubenffb30d82016-11-14 19:04:50 +02002390#define LAST_TUNNEL_FIELD tunnel_id
Moses Reuben2ac693f2017-01-18 14:59:50 +02002391#define LAST_FLOW_TAG_FIELD tag_id
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002392#define LAST_DROP_FIELD size
Raed Salem3b3233f2018-05-31 16:43:39 +03002393#define LAST_COUNTERS_FIELD counters
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002394
2395/* Field is the last supported field */
2396#define FIELDS_NOT_SUPPORTED(filter, field)\
2397 memchr_inv((void *)&filter.field +\
2398 sizeof(filter.field), 0,\
2399 sizeof(filter) -\
2400 offsetof(typeof(filter), field) -\
2401 sizeof(filter.field))
2402
Mark Bloch2ea26202018-09-06 17:27:03 +03002403int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2404 bool is_egress,
2405 struct mlx5_flow_act *action)
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002406{
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002407
2408 switch (maction->ib_action.type) {
2409 case IB_FLOW_ACTION_ESP:
Mark Bloch501f14e2018-09-06 17:27:04 +03002410 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2411 MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2412 return -EINVAL;
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002413 /* Currently only AES_GCM keymat is supported by the driver */
2414 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
Mark Bloch2ea26202018-09-06 17:27:03 +03002415 action->action |= is_egress ?
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002416 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2417 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2418 return 0;
Mark Blochb1085be2018-09-02 12:51:32 +03002419 case IB_FLOW_ACTION_UNSPECIFIED:
2420 if (maction->flow_action_raw.sub_type ==
2421 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
Mark Bloch501f14e2018-09-06 17:27:04 +03002422 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2423 return -EINVAL;
Mark Blochb1085be2018-09-02 12:51:32 +03002424 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2425 action->modify_id = maction->flow_action_raw.action_id;
2426 return 0;
2427 }
Mark Bloch10a30892018-09-02 12:51:34 +03002428 if (maction->flow_action_raw.sub_type ==
2429 MLX5_IB_FLOW_ACTION_DECAP) {
Mark Bloch501f14e2018-09-06 17:27:04 +03002430 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2431 return -EINVAL;
Mark Bloch10a30892018-09-02 12:51:34 +03002432 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2433 return 0;
2434 }
Mark Bloche806f932018-09-02 12:51:36 +03002435 if (maction->flow_action_raw.sub_type ==
2436 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
Mark Bloch501f14e2018-09-06 17:27:04 +03002437 if (action->action &
2438 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2439 return -EINVAL;
Mark Bloche806f932018-09-02 12:51:36 +03002440 action->action |=
2441 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2442 action->reformat_id =
2443 maction->flow_action_raw.action_id;
2444 return 0;
2445 }
Mark Blochb1085be2018-09-02 12:51:32 +03002446 /* fall through */
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002447 default:
2448 return -EOPNOTSUPP;
2449 }
2450}
2451
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002452static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2453 u32 *match_v, const union ib_flow_spec *ib_spec,
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002454 const struct ib_flow_attr *flow_attr,
Ariel Levkovich71c6e862018-05-13 14:33:34 +03002455 struct mlx5_flow_act *action, u32 prev_type)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002456{
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002457 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2458 misc_parameters);
2459 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2460 misc_parameters);
Ariel Levkovich71c6e862018-05-13 14:33:34 +03002461 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2462 misc_parameters_2);
2463 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2464 misc_parameters_2);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002465 void *headers_c;
2466 void *headers_v;
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002467 int match_ipv;
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002468 int ret;
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002469
Moses Reuben2d1e6972016-11-14 19:04:52 +02002470 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2471 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2472 inner_headers);
2473 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2474 inner_headers);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002475 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2476 ft_field_support.inner_ip_version);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002477 } else {
2478 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2479 outer_headers);
2480 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2481 outer_headers);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002482 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2483 ft_field_support.outer_ip_version);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002484 }
2485
2486 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002487 case IB_FLOW_SPEC_ETH:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002488 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002489 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002490
Moses Reuben2d1e6972016-11-14 19:04:52 +02002491 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002492 dmac_47_16),
2493 ib_spec->eth.mask.dst_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002494 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002495 dmac_47_16),
2496 ib_spec->eth.val.dst_mac);
2497
Moses Reuben2d1e6972016-11-14 19:04:52 +02002498 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottliebee3da802016-09-12 19:16:24 +03002499 smac_47_16),
2500 ib_spec->eth.mask.src_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002501 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottliebee3da802016-09-12 19:16:24 +03002502 smac_47_16),
2503 ib_spec->eth.val.src_mac);
2504
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002505 if (ib_spec->eth.mask.vlan_tag) {
Moses Reuben2d1e6972016-11-14 19:04:52 +02002506 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Mohamad Haj Yahia10543362016-10-09 16:25:43 +03002507 cvlan_tag, 1);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002508 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Mohamad Haj Yahia10543362016-10-09 16:25:43 +03002509 cvlan_tag, 1);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002510
Moses Reuben2d1e6972016-11-14 19:04:52 +02002511 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002512 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002513 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002514 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2515
Moses Reuben2d1e6972016-11-14 19:04:52 +02002516 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002517 first_cfi,
2518 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002519 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002520 first_cfi,
2521 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2522
Moses Reuben2d1e6972016-11-14 19:04:52 +02002523 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002524 first_prio,
2525 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002526 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002527 first_prio,
2528 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2529 }
Moses Reuben2d1e6972016-11-14 19:04:52 +02002530 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002531 ethertype, ntohs(ib_spec->eth.mask.ether_type));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002532 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002533 ethertype, ntohs(ib_spec->eth.val.ether_type));
2534 break;
2535 case IB_FLOW_SPEC_IPV4:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002536 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002537 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002538
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002539 if (match_ipv) {
2540 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2541 ip_version, 0xf);
2542 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Boris Pismenny3346c482017-08-20 15:13:08 +03002543 ip_version, MLX5_FS_IPV4_VERSION);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002544 } else {
2545 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2546 ethertype, 0xffff);
2547 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2548 ethertype, ETH_P_IP);
2549 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002550
Moses Reuben2d1e6972016-11-14 19:04:52 +02002551 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002552 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2553 &ib_spec->ipv4.mask.src_ip,
2554 sizeof(ib_spec->ipv4.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002555 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002556 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2557 &ib_spec->ipv4.val.src_ip,
2558 sizeof(ib_spec->ipv4.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002559 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002560 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2561 &ib_spec->ipv4.mask.dst_ip,
2562 sizeof(ib_spec->ipv4.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002563 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002564 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2565 &ib_spec->ipv4.val.dst_ip,
2566 sizeof(ib_spec->ipv4.val.dst_ip));
Maor Gottliebca0d4752016-08-30 16:58:35 +03002567
Moses Reuben2d1e6972016-11-14 19:04:52 +02002568 set_tos(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03002569 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2570
Moses Reuben2d1e6972016-11-14 19:04:52 +02002571 set_proto(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03002572 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002573 break;
Maor Gottlieb026bae02016-06-17 15:14:51 +03002574 case IB_FLOW_SPEC_IPV6:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002575 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002576 return -EOPNOTSUPP;
Maor Gottlieb026bae02016-06-17 15:14:51 +03002577
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002578 if (match_ipv) {
2579 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2580 ip_version, 0xf);
2581 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Boris Pismenny3346c482017-08-20 15:13:08 +03002582 ip_version, MLX5_FS_IPV6_VERSION);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002583 } else {
2584 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2585 ethertype, 0xffff);
2586 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2587 ethertype, ETH_P_IPV6);
2588 }
Maor Gottlieb026bae02016-06-17 15:14:51 +03002589
Moses Reuben2d1e6972016-11-14 19:04:52 +02002590 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002591 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2592 &ib_spec->ipv6.mask.src_ip,
2593 sizeof(ib_spec->ipv6.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002594 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002595 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2596 &ib_spec->ipv6.val.src_ip,
2597 sizeof(ib_spec->ipv6.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002598 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002599 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2600 &ib_spec->ipv6.mask.dst_ip,
2601 sizeof(ib_spec->ipv6.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002602 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03002603 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2604 &ib_spec->ipv6.val.dst_ip,
2605 sizeof(ib_spec->ipv6.val.dst_ip));
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002606
Moses Reuben2d1e6972016-11-14 19:04:52 +02002607 set_tos(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002608 ib_spec->ipv6.mask.traffic_class,
2609 ib_spec->ipv6.val.traffic_class);
2610
Moses Reuben2d1e6972016-11-14 19:04:52 +02002611 set_proto(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002612 ib_spec->ipv6.mask.next_hdr,
2613 ib_spec->ipv6.val.next_hdr);
2614
Moses Reuben2d1e6972016-11-14 19:04:52 +02002615 set_flow_label(misc_params_c, misc_params_v,
2616 ntohl(ib_spec->ipv6.mask.flow_label),
2617 ntohl(ib_spec->ipv6.val.flow_label),
2618 ib_spec->type & IB_FLOW_SPEC_INNER);
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002619 break;
2620 case IB_FLOW_SPEC_ESP:
2621 if (ib_spec->esp.mask.seq)
2622 return -EOPNOTSUPP;
Moses Reuben2d1e6972016-11-14 19:04:52 +02002623
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002624 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2625 ntohl(ib_spec->esp.mask.spi));
2626 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2627 ntohl(ib_spec->esp.val.spi));
Maor Gottlieb026bae02016-06-17 15:14:51 +03002628 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002629 case IB_FLOW_SPEC_TCP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002630 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2631 LAST_TCP_UDP_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002632 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002633
Moses Reuben2d1e6972016-11-14 19:04:52 +02002634 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002635 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002636 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002637 IPPROTO_TCP);
2638
Moses Reuben2d1e6972016-11-14 19:04:52 +02002639 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002640 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002641 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002642 ntohs(ib_spec->tcp_udp.val.src_port));
2643
Moses Reuben2d1e6972016-11-14 19:04:52 +02002644 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002645 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002646 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002647 ntohs(ib_spec->tcp_udp.val.dst_port));
2648 break;
2649 case IB_FLOW_SPEC_UDP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03002650 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2651 LAST_TCP_UDP_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002652 return -EOPNOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002653
Moses Reuben2d1e6972016-11-14 19:04:52 +02002654 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002655 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02002656 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002657 IPPROTO_UDP);
2658
Moses Reuben2d1e6972016-11-14 19:04:52 +02002659 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002660 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002661 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002662 ntohs(ib_spec->tcp_udp.val.src_port));
2663
Moses Reuben2d1e6972016-11-14 19:04:52 +02002664 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002665 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02002666 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002667 ntohs(ib_spec->tcp_udp.val.dst_port));
2668 break;
Ariel Levkovichda2f22a2018-05-13 14:33:33 +03002669 case IB_FLOW_SPEC_GRE:
2670 if (ib_spec->gre.mask.c_ks_res0_ver)
2671 return -EOPNOTSUPP;
2672
2673 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2674 0xff);
2675 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2676 IPPROTO_GRE);
2677
2678 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
Maor Gottlieba93b6322018-07-01 15:50:17 +03002679 ntohs(ib_spec->gre.mask.protocol));
Ariel Levkovichda2f22a2018-05-13 14:33:33 +03002680 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2681 ntohs(ib_spec->gre.val.protocol));
2682
2683 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2684 gre_key_h),
2685 &ib_spec->gre.mask.key,
2686 sizeof(ib_spec->gre.mask.key));
2687 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2688 gre_key_h),
2689 &ib_spec->gre.val.key,
2690 sizeof(ib_spec->gre.val.key));
2691 break;
Ariel Levkovich71c6e862018-05-13 14:33:34 +03002692 case IB_FLOW_SPEC_MPLS:
2693 switch (prev_type) {
2694 case IB_FLOW_SPEC_UDP:
2695 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2696 ft_field_support.outer_first_mpls_over_udp),
2697 &ib_spec->mpls.mask.tag))
2698 return -EOPNOTSUPP;
2699
2700 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2701 outer_first_mpls_over_udp),
2702 &ib_spec->mpls.val.tag,
2703 sizeof(ib_spec->mpls.val.tag));
2704 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2705 outer_first_mpls_over_udp),
2706 &ib_spec->mpls.mask.tag,
2707 sizeof(ib_spec->mpls.mask.tag));
2708 break;
2709 case IB_FLOW_SPEC_GRE:
2710 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2711 ft_field_support.outer_first_mpls_over_gre),
2712 &ib_spec->mpls.mask.tag))
2713 return -EOPNOTSUPP;
2714
2715 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2716 outer_first_mpls_over_gre),
2717 &ib_spec->mpls.val.tag,
2718 sizeof(ib_spec->mpls.val.tag));
2719 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2720 outer_first_mpls_over_gre),
2721 &ib_spec->mpls.mask.tag,
2722 sizeof(ib_spec->mpls.mask.tag));
2723 break;
2724 default:
2725 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2726 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2727 ft_field_support.inner_first_mpls),
2728 &ib_spec->mpls.mask.tag))
2729 return -EOPNOTSUPP;
2730
2731 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2732 inner_first_mpls),
2733 &ib_spec->mpls.val.tag,
2734 sizeof(ib_spec->mpls.val.tag));
2735 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2736 inner_first_mpls),
2737 &ib_spec->mpls.mask.tag,
2738 sizeof(ib_spec->mpls.mask.tag));
2739 } else {
2740 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2741 ft_field_support.outer_first_mpls),
2742 &ib_spec->mpls.mask.tag))
2743 return -EOPNOTSUPP;
2744
2745 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2746 outer_first_mpls),
2747 &ib_spec->mpls.val.tag,
2748 sizeof(ib_spec->mpls.val.tag));
2749 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2750 outer_first_mpls),
2751 &ib_spec->mpls.mask.tag,
2752 sizeof(ib_spec->mpls.mask.tag));
2753 }
2754 }
2755 break;
Moses Reubenffb30d82016-11-14 19:04:50 +02002756 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2757 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2758 LAST_TUNNEL_FIELD))
Leon Romanovsky1ffd3a22017-01-18 14:59:51 +02002759 return -EOPNOTSUPP;
Moses Reubenffb30d82016-11-14 19:04:50 +02002760
2761 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2762 ntohl(ib_spec->tunnel.mask.tunnel_id));
2763 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2764 ntohl(ib_spec->tunnel.val.tunnel_id));
2765 break;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002766 case IB_FLOW_SPEC_ACTION_TAG:
2767 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2768 LAST_FLOW_TAG_FIELD))
2769 return -EOPNOTSUPP;
2770 if (ib_spec->flow_tag.tag_id >= BIT(24))
2771 return -EINVAL;
2772
Boris Pismenny075572d2017-08-16 09:33:30 +03002773 action->flow_tag = ib_spec->flow_tag.tag_id;
Paul Blakeyd5634fe2018-09-20 12:17:48 +02002774 action->flags |= FLOW_ACT_HAS_TAG;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002775 break;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002776 case IB_FLOW_SPEC_ACTION_DROP:
2777 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2778 LAST_DROP_FIELD))
2779 return -EOPNOTSUPP;
Boris Pismenny075572d2017-08-16 09:33:30 +03002780 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03002781 break;
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002782 case IB_FLOW_SPEC_ACTION_HANDLE:
Mark Bloch2ea26202018-09-06 17:27:03 +03002783 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
2784 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002785 if (ret)
2786 return ret;
2787 break;
Raed Salem3b3233f2018-05-31 16:43:39 +03002788 case IB_FLOW_SPEC_ACTION_COUNT:
2789 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2790 LAST_COUNTERS_FIELD))
2791 return -EOPNOTSUPP;
2792
2793 /* for now support only one counters spec per flow */
2794 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
2795 return -EINVAL;
2796
2797 action->counters = ib_spec->flow_count.counters;
2798 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2799 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002800 default:
2801 return -EINVAL;
2802 }
2803
2804 return 0;
2805}
2806
2807/* If a flow could catch both multicast and unicast packets,
2808 * it won't fall into the multicast flow steering table and this rule
2809 * could steal other multicast packets.
2810 */
Yishai Hadasa550ddf2017-08-17 15:52:33 +03002811static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002812{
Yishai Hadas81e30882017-06-08 16:15:09 +03002813 union ib_flow_spec *flow_spec;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002814
2815 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002816 ib_attr->num_of_specs < 1)
2817 return false;
2818
Yishai Hadas81e30882017-06-08 16:15:09 +03002819 flow_spec = (union ib_flow_spec *)(ib_attr + 1);
2820 if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
2821 struct ib_flow_spec_ipv4 *ipv4_spec;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002822
Yishai Hadas81e30882017-06-08 16:15:09 +03002823 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
2824 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
2825 return true;
2826
2827 return false;
2828 }
2829
2830 if (flow_spec->type == IB_FLOW_SPEC_ETH) {
2831 struct ib_flow_spec_eth *eth_spec;
2832
2833 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2834 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2835 is_multicast_ether_addr(eth_spec->val.dst_mac);
2836 }
2837
2838 return false;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002839}
2840
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002841enum valid_spec {
2842 VALID_SPEC_INVALID,
2843 VALID_SPEC_VALID,
2844 VALID_SPEC_NA,
2845};
2846
2847static enum valid_spec
2848is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
2849 const struct mlx5_flow_spec *spec,
2850 const struct mlx5_flow_act *flow_act,
2851 bool egress)
2852{
2853 const u32 *match_c = spec->match_criteria;
2854 bool is_crypto =
2855 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2856 MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
2857 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2858 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2859
2860 /*
2861 * Currently only crypto is supported in egress, when regular egress
2862 * rules would be supported, always return VALID_SPEC_NA.
2863 */
2864 if (!is_crypto)
Mark Bloch78dd0c42018-09-02 12:51:31 +03002865 return VALID_SPEC_NA;
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002866
2867 return is_crypto && is_ipsec &&
Paul Blakeyd5634fe2018-09-20 12:17:48 +02002868 (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
Aviad Yehezkel802c2122018-03-28 09:27:53 +03002869 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2870}
2871
2872static bool is_valid_spec(struct mlx5_core_dev *mdev,
2873 const struct mlx5_flow_spec *spec,
2874 const struct mlx5_flow_act *flow_act,
2875 bool egress)
2876{
2877 /* We curretly only support ipsec egress flow */
2878 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
2879}
2880
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002881static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2882 const struct ib_flow_attr *flow_attr,
Ariel Levkovich0f750962017-04-03 13:11:02 +03002883 bool check_inner)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002884{
2885 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002886 int match_ipv = check_inner ?
2887 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2888 ft_field_support.inner_ip_version) :
2889 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2890 ft_field_support.outer_ip_version);
Ariel Levkovich0f750962017-04-03 13:11:02 +03002891 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
2892 bool ipv4_spec_valid, ipv6_spec_valid;
2893 unsigned int ip_spec_type = 0;
2894 bool has_ethertype = false;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002895 unsigned int spec_index;
Ariel Levkovich0f750962017-04-03 13:11:02 +03002896 bool mask_valid = true;
2897 u16 eth_type = 0;
2898 bool type_valid;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002899
2900 /* Validate that ethertype is correct */
2901 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
Ariel Levkovich0f750962017-04-03 13:11:02 +03002902 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002903 ib_spec->eth.mask.ether_type) {
Ariel Levkovich0f750962017-04-03 13:11:02 +03002904 mask_valid = (ib_spec->eth.mask.ether_type ==
2905 htons(0xffff));
2906 has_ethertype = true;
2907 eth_type = ntohs(ib_spec->eth.val.ether_type);
2908 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
2909 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
2910 ip_spec_type = ib_spec->type;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002911 }
2912 ib_spec = (void *)ib_spec + ib_spec->size;
2913 }
Ariel Levkovich0f750962017-04-03 13:11:02 +03002914
2915 type_valid = (!has_ethertype) || (!ip_spec_type);
2916 if (!type_valid && mask_valid) {
2917 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
2918 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
2919 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
2920 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002921
2922 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
2923 (((eth_type == ETH_P_MPLS_UC) ||
2924 (eth_type == ETH_P_MPLS_MC)) && match_ipv);
Ariel Levkovich0f750962017-04-03 13:11:02 +03002925 }
2926
2927 return type_valid;
2928}
2929
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002930static bool is_valid_attr(struct mlx5_core_dev *mdev,
2931 const struct ib_flow_attr *flow_attr)
Ariel Levkovich0f750962017-04-03 13:11:02 +03002932{
Ariel Levkovich19cc7522017-04-03 13:11:03 +03002933 return is_valid_ethertype(mdev, flow_attr, false) &&
2934 is_valid_ethertype(mdev, flow_attr, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002935}
2936
2937static void put_flow_table(struct mlx5_ib_dev *dev,
2938 struct mlx5_ib_flow_prio *prio, bool ft_added)
2939{
2940 prio->refcount -= !!ft_added;
2941 if (!prio->refcount) {
2942 mlx5_destroy_flow_table(prio->flow_table);
2943 prio->flow_table = NULL;
2944 }
2945}
2946
Raed Salem3b3233f2018-05-31 16:43:39 +03002947static void counters_clear_description(struct ib_counters *counters)
2948{
2949 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
2950
2951 mutex_lock(&mcounters->mcntrs_mutex);
2952 kfree(mcounters->counters_data);
2953 mcounters->counters_data = NULL;
2954 mcounters->cntrs_max_index = 0;
2955 mutex_unlock(&mcounters->mcntrs_mutex);
2956}
2957
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002958static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2959{
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002960 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2961 struct mlx5_ib_flow_handler,
2962 ibflow);
2963 struct mlx5_ib_flow_handler *iter, *tmp;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03002964 struct mlx5_ib_dev *dev = handler->dev;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002965
Mark Bloch9a4ca382018-01-16 14:42:35 +00002966 mutex_lock(&dev->flow_db->lock);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002967
2968 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
Mark Bloch74491de2016-08-31 11:24:25 +00002969 mlx5_del_flow_rules(iter->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002970 put_flow_table(dev, iter->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002971 list_del(&iter->list);
2972 kfree(iter);
2973 }
2974
Mark Bloch74491de2016-08-31 11:24:25 +00002975 mlx5_del_flow_rules(handler->rule);
Maor Gottlieb5497adc2016-08-28 14:16:31 +03002976 put_flow_table(dev, handler->prio, true);
Raed Salem3b3233f2018-05-31 16:43:39 +03002977 if (handler->ibcounters &&
2978 atomic_read(&handler->ibcounters->usecnt) == 1)
2979 counters_clear_description(handler->ibcounters);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002980
Raed Salem3b3233f2018-05-31 16:43:39 +03002981 mutex_unlock(&dev->flow_db->lock);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03002982 if (handler->flow_matcher)
2983 atomic_dec(&handler->flow_matcher->usecnt);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002984 kfree(handler);
2985
2986 return 0;
2987}
2988
Maor Gottlieb35d190112016-03-07 18:51:47 +02002989static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
2990{
2991 priority *= 2;
2992 if (!dont_trap)
2993 priority++;
2994 return priority;
2995}
2996
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002997enum flow_table_type {
2998 MLX5_IB_FT_RX,
2999 MLX5_IB_FT_TX
3000};
3001
Maor Gottlieb00b7c2a2017-03-29 06:09:01 +03003002#define MLX5_FS_MAX_TYPES 6
3003#define MLX5_FS_MAX_ENTRIES BIT(16)
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003004
3005static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3006 struct mlx5_ib_flow_prio *prio,
3007 int priority,
Mark Bloch4adda112018-09-02 12:51:33 +03003008 int num_entries, int num_groups,
3009 u32 flags)
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003010{
3011 struct mlx5_flow_table *ft;
3012
3013 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3014 num_entries,
3015 num_groups,
Mark Bloch4adda112018-09-02 12:51:33 +03003016 0, flags);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003017 if (IS_ERR(ft))
3018 return ERR_CAST(ft);
3019
3020 prio->flow_table = ft;
3021 prio->refcount = 0;
3022 return prio;
3023}
3024
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003025static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003026 struct ib_flow_attr *flow_attr,
3027 enum flow_table_type ft_type)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003028{
Maor Gottlieb35d190112016-03-07 18:51:47 +02003029 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003030 struct mlx5_flow_namespace *ns = NULL;
3031 struct mlx5_ib_flow_prio *prio;
3032 struct mlx5_flow_table *ft;
Maor Gottliebdac388e2017-03-29 06:09:00 +03003033 int max_table_size;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003034 int num_entries;
3035 int num_groups;
Mark Bloch4adda112018-09-02 12:51:33 +03003036 u32 flags = 0;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003037 int priority;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003038
Maor Gottliebdac388e2017-03-29 06:09:00 +03003039 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3040 log_max_ft_size));
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003041 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Mark Bloch78dd0c42018-09-02 12:51:31 +03003042 enum mlx5_flow_namespace_type fn_type;
3043
3044 if (flow_is_multicast_only(flow_attr) &&
3045 !dont_trap)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003046 priority = MLX5_IB_FLOW_MCAST_PRIO;
3047 else
Maor Gottlieb35d190112016-03-07 18:51:47 +02003048 priority = ib_prio_to_core_prio(flow_attr->priority,
3049 dont_trap);
Mark Bloch78dd0c42018-09-02 12:51:31 +03003050 if (ft_type == MLX5_IB_FT_RX) {
3051 fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3052 prio = &dev->flow_db->prios[priority];
Mark Bloch4adda112018-09-02 12:51:33 +03003053 if (!dev->rep &&
3054 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3055 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
Mark Bloch5c2db532018-09-02 12:51:35 +03003056 if (!dev->rep &&
3057 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3058 reformat_l3_tunnel_to_l2))
3059 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
Mark Bloch78dd0c42018-09-02 12:51:31 +03003060 } else {
3061 max_table_size =
3062 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3063 log_max_ft_size));
3064 fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3065 prio = &dev->flow_db->egress_prios[priority];
Mark Bloch4adda112018-09-02 12:51:33 +03003066 if (!dev->rep &&
3067 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3068 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
Mark Bloch78dd0c42018-09-02 12:51:31 +03003069 }
3070 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003071 num_entries = MLX5_FS_MAX_ENTRIES;
3072 num_groups = MLX5_FS_MAX_TYPES;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003073 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3074 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3075 ns = mlx5_get_flow_namespace(dev->mdev,
3076 MLX5_FLOW_NAMESPACE_LEFTOVERS);
3077 build_leftovers_ft_param(&priority,
3078 &num_entries,
3079 &num_groups);
Mark Bloch9a4ca382018-01-16 14:42:35 +00003080 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003081 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3082 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3083 allow_sniffer_and_nic_rx_shared_tir))
3084 return ERR_PTR(-ENOTSUPP);
3085
3086 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3087 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3088 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3089
Mark Bloch9a4ca382018-01-16 14:42:35 +00003090 prio = &dev->flow_db->sniffer[ft_type];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003091 priority = 0;
3092 num_entries = 1;
3093 num_groups = 1;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003094 }
3095
3096 if (!ns)
3097 return ERR_PTR(-ENOTSUPP);
3098
Maor Gottliebdac388e2017-03-29 06:09:00 +03003099 if (num_entries > max_table_size)
3100 return ERR_PTR(-ENOMEM);
3101
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003102 ft = prio->flow_table;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003103 if (!ft)
Mark Bloch4adda112018-09-02 12:51:33 +03003104 return _get_prio(ns, prio, priority, num_entries, num_groups,
3105 flags);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003106
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003107 return prio;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003108}
3109
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003110static void set_underlay_qp(struct mlx5_ib_dev *dev,
3111 struct mlx5_flow_spec *spec,
3112 u32 underlay_qpn)
3113{
3114 void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3115 spec->match_criteria,
3116 misc_parameters);
3117 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3118 misc_parameters);
3119
3120 if (underlay_qpn &&
3121 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3122 ft_field_support.bth_dst_qp)) {
3123 MLX5_SET(fte_match_set_misc,
3124 misc_params_v, bth_dst_qp, underlay_qpn);
3125 MLX5_SET(fte_match_set_misc,
3126 misc_params_c, bth_dst_qp, 0xffffff);
3127 }
3128}
3129
Raed Salem5e95af52018-05-31 16:43:40 +03003130static int read_flow_counters(struct ib_device *ibdev,
3131 struct mlx5_read_counters_attr *read_attr)
3132{
3133 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3134 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3135
3136 return mlx5_fc_query(dev->mdev, fc,
3137 &read_attr->out[IB_COUNTER_PACKETS],
3138 &read_attr->out[IB_COUNTER_BYTES]);
3139}
3140
3141/* flow counters currently expose two counters packets and bytes */
3142#define FLOW_COUNTERS_NUM 2
Raed Salem3b3233f2018-05-31 16:43:39 +03003143static int counters_set_description(struct ib_counters *counters,
3144 enum mlx5_ib_counters_type counters_type,
3145 struct mlx5_ib_flow_counters_desc *desc_data,
3146 u32 ncounters)
3147{
3148 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3149 u32 cntrs_max_index = 0;
3150 int i;
3151
3152 if (counters_type != MLX5_IB_COUNTERS_FLOW)
3153 return -EINVAL;
3154
3155 /* init the fields for the object */
3156 mcounters->type = counters_type;
Raed Salem5e95af52018-05-31 16:43:40 +03003157 mcounters->read_counters = read_flow_counters;
3158 mcounters->counters_num = FLOW_COUNTERS_NUM;
Raed Salem3b3233f2018-05-31 16:43:39 +03003159 mcounters->ncounters = ncounters;
3160 /* each counter entry have both description and index pair */
3161 for (i = 0; i < ncounters; i++) {
3162 if (desc_data[i].description > IB_COUNTER_BYTES)
3163 return -EINVAL;
3164
3165 if (cntrs_max_index <= desc_data[i].index)
3166 cntrs_max_index = desc_data[i].index + 1;
3167 }
3168
3169 mutex_lock(&mcounters->mcntrs_mutex);
3170 mcounters->counters_data = desc_data;
3171 mcounters->cntrs_max_index = cntrs_max_index;
3172 mutex_unlock(&mcounters->mcntrs_mutex);
3173
3174 return 0;
3175}
3176
3177#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3178static int flow_counters_set_data(struct ib_counters *ibcounters,
3179 struct mlx5_ib_create_flow *ucmd)
3180{
3181 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3182 struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3183 struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3184 bool hw_hndl = false;
3185 int ret = 0;
3186
3187 if (ucmd && ucmd->ncounters_data != 0) {
3188 cntrs_data = ucmd->data;
3189 if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3190 return -EINVAL;
3191
3192 desc_data = kcalloc(cntrs_data->ncounters,
3193 sizeof(*desc_data),
3194 GFP_KERNEL);
3195 if (!desc_data)
3196 return -ENOMEM;
3197
3198 if (copy_from_user(desc_data,
3199 u64_to_user_ptr(cntrs_data->counters_data),
3200 sizeof(*desc_data) * cntrs_data->ncounters)) {
3201 ret = -EFAULT;
3202 goto free;
3203 }
3204 }
3205
3206 if (!mcounters->hw_cntrs_hndl) {
3207 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3208 to_mdev(ibcounters->device)->mdev, false);
weiyongjun (A)e31abf72018-06-07 01:47:41 +00003209 if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3210 ret = PTR_ERR(mcounters->hw_cntrs_hndl);
Raed Salem3b3233f2018-05-31 16:43:39 +03003211 goto free;
3212 }
3213 hw_hndl = true;
3214 }
3215
3216 if (desc_data) {
3217 /* counters already bound to at least one flow */
3218 if (mcounters->cntrs_max_index) {
3219 ret = -EINVAL;
3220 goto free_hndl;
3221 }
3222
3223 ret = counters_set_description(ibcounters,
3224 MLX5_IB_COUNTERS_FLOW,
3225 desc_data,
3226 cntrs_data->ncounters);
3227 if (ret)
3228 goto free_hndl;
3229
3230 } else if (!mcounters->cntrs_max_index) {
3231 /* counters not bound yet, must have udata passed */
3232 ret = -EINVAL;
3233 goto free_hndl;
3234 }
3235
3236 return 0;
3237
3238free_hndl:
3239 if (hw_hndl) {
3240 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3241 mcounters->hw_cntrs_hndl);
3242 mcounters->hw_cntrs_hndl = NULL;
3243 }
3244free:
3245 kfree(desc_data);
3246 return ret;
3247}
3248
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003249static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3250 struct mlx5_ib_flow_prio *ft_prio,
3251 const struct ib_flow_attr *flow_attr,
3252 struct mlx5_flow_destination *dst,
Raed Salem3b3233f2018-05-31 16:43:39 +03003253 u32 underlay_qpn,
3254 struct mlx5_ib_create_flow *ucmd)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003255{
3256 struct mlx5_flow_table *ft = ft_prio->flow_table;
3257 struct mlx5_ib_flow_handler *handler;
Boris Pismenny075572d2017-08-16 09:33:30 +03003258 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
Maor Gottliebc5bb1732016-07-04 17:23:05 +03003259 struct mlx5_flow_spec *spec;
Raed Salem3b3233f2018-05-31 16:43:39 +03003260 struct mlx5_flow_destination dest_arr[2] = {};
3261 struct mlx5_flow_destination *rule_dst = dest_arr;
Maor Gottliebdd063d02016-08-28 14:16:32 +03003262 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003263 unsigned int spec_index;
Ariel Levkovich71c6e862018-05-13 14:33:34 +03003264 u32 prev_type = 0;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003265 int err = 0;
Raed Salem3b3233f2018-05-31 16:43:39 +03003266 int dest_num = 0;
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003267 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003268
Ariel Levkovich19cc7522017-04-03 13:11:03 +03003269 if (!is_valid_attr(dev->mdev, flow_attr))
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003270 return ERR_PTR(-EINVAL);
3271
Mark Bloch78dd0c42018-09-02 12:51:31 +03003272 if (dev->rep && is_egress)
3273 return ERR_PTR(-EINVAL);
3274
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03003275 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003276 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03003277 if (!handler || !spec) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003278 err = -ENOMEM;
3279 goto free;
3280 }
3281
3282 INIT_LIST_HEAD(&handler->list);
Raed Salem3b3233f2018-05-31 16:43:39 +03003283 if (dst) {
3284 memcpy(&dest_arr[0], dst, sizeof(*dst));
3285 dest_num++;
3286 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003287
3288 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
Ariel Levkovich19cc7522017-04-03 13:11:03 +03003289 err = parse_flow_attr(dev->mdev, spec->match_criteria,
Slava Shwartsmana22ed862017-04-03 13:13:52 +03003290 spec->match_value,
Ariel Levkovich71c6e862018-05-13 14:33:34 +03003291 ib_flow, flow_attr, &flow_act,
3292 prev_type);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003293 if (err < 0)
3294 goto free;
3295
Ariel Levkovich71c6e862018-05-13 14:33:34 +03003296 prev_type = ((union ib_flow_spec *)ib_flow)->type;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003297 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3298 }
3299
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003300 if (!flow_is_multicast_only(flow_attr))
3301 set_underlay_qp(dev, spec, underlay_qpn);
3302
Mark Bloch018a94e2018-01-16 14:44:29 +00003303 if (dev->rep) {
3304 void *misc;
3305
3306 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3307 misc_parameters);
3308 MLX5_SET(fte_match_set_misc, misc, source_port,
3309 dev->rep->vport);
3310 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3311 misc_parameters);
3312 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3313 }
3314
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03003315 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003316
3317 if (is_egress &&
3318 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3319 err = -EINVAL;
3320 goto free;
3321 }
3322
Raed Salem3b3233f2018-05-31 16:43:39 +03003323 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
Mark Bloch171c7625b2018-10-03 00:03:35 +00003324 struct mlx5_ib_mcounters *mcounters;
3325
Raed Salem3b3233f2018-05-31 16:43:39 +03003326 err = flow_counters_set_data(flow_act.counters, ucmd);
3327 if (err)
3328 goto free;
3329
Mark Bloch171c7625b2018-10-03 00:03:35 +00003330 mcounters = to_mcounters(flow_act.counters);
Raed Salem3b3233f2018-05-31 16:43:39 +03003331 handler->ibcounters = flow_act.counters;
3332 dest_arr[dest_num].type =
3333 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
Mark Bloch171c7625b2018-10-03 00:03:35 +00003334 dest_arr[dest_num].counter_id =
3335 mlx5_fc_id(mcounters->hw_cntrs_hndl);
Raed Salem3b3233f2018-05-31 16:43:39 +03003336 dest_num++;
3337 }
3338
Boris Pismenny075572d2017-08-16 09:33:30 +03003339 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
Raed Salem3b3233f2018-05-31 16:43:39 +03003340 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3341 rule_dst = NULL;
3342 dest_num = 0;
3343 }
Slava Shwartsmana22ed862017-04-03 13:13:52 +03003344 } else {
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003345 if (is_egress)
3346 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3347 else
3348 flow_act.action |=
Raed Salem3b3233f2018-05-31 16:43:39 +03003349 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003350 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
Slava Shwartsmana22ed862017-04-03 13:13:52 +03003351 }
Moses Reuben2ac693f2017-01-18 14:59:50 +02003352
Paul Blakeyd5634fe2018-09-20 12:17:48 +02003353 if ((flow_act.flags & FLOW_ACT_HAS_TAG) &&
Moses Reuben2ac693f2017-01-18 14:59:50 +02003354 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3355 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3356 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
Boris Pismenny075572d2017-08-16 09:33:30 +03003357 flow_act.flow_tag, flow_attr->type);
Moses Reuben2ac693f2017-01-18 14:59:50 +02003358 err = -EINVAL;
3359 goto free;
3360 }
Mark Bloch74491de2016-08-31 11:24:25 +00003361 handler->rule = mlx5_add_flow_rules(ft, spec,
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02003362 &flow_act,
Slava Shwartsmana22ed862017-04-03 13:13:52 +03003363 rule_dst, dest_num);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003364
3365 if (IS_ERR(handler->rule)) {
3366 err = PTR_ERR(handler->rule);
3367 goto free;
3368 }
3369
Maor Gottliebd9d49802016-08-28 14:16:33 +03003370 ft_prio->refcount++;
Maor Gottlieb5497adc2016-08-28 14:16:31 +03003371 handler->prio = ft_prio;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003372 handler->dev = dev;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003373
3374 ft_prio->flow_table = ft;
3375free:
Raed Salem3b3233f2018-05-31 16:43:39 +03003376 if (err && handler) {
3377 if (handler->ibcounters &&
3378 atomic_read(&handler->ibcounters->usecnt) == 1)
3379 counters_clear_description(handler->ibcounters);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003380 kfree(handler);
Raed Salem3b3233f2018-05-31 16:43:39 +03003381 }
Maor Gottliebc5bb1732016-07-04 17:23:05 +03003382 kvfree(spec);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003383 return err ? ERR_PTR(err) : handler;
3384}
3385
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003386static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3387 struct mlx5_ib_flow_prio *ft_prio,
3388 const struct ib_flow_attr *flow_attr,
3389 struct mlx5_flow_destination *dst)
3390{
Raed Salem3b3233f2018-05-31 16:43:39 +03003391 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003392}
3393
Maor Gottlieb35d190112016-03-07 18:51:47 +02003394static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3395 struct mlx5_ib_flow_prio *ft_prio,
3396 struct ib_flow_attr *flow_attr,
3397 struct mlx5_flow_destination *dst)
3398{
3399 struct mlx5_ib_flow_handler *handler_dst = NULL;
3400 struct mlx5_ib_flow_handler *handler = NULL;
3401
3402 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3403 if (!IS_ERR(handler)) {
3404 handler_dst = create_flow_rule(dev, ft_prio,
3405 flow_attr, dst);
3406 if (IS_ERR(handler_dst)) {
Mark Bloch74491de2016-08-31 11:24:25 +00003407 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03003408 ft_prio->refcount--;
Maor Gottlieb35d190112016-03-07 18:51:47 +02003409 kfree(handler);
3410 handler = handler_dst;
3411 } else {
3412 list_add(&handler_dst->list, &handler->list);
3413 }
3414 }
3415
3416 return handler;
3417}
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003418enum {
3419 LEFTOVERS_MC,
3420 LEFTOVERS_UC,
3421};
3422
3423static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3424 struct mlx5_ib_flow_prio *ft_prio,
3425 struct ib_flow_attr *flow_attr,
3426 struct mlx5_flow_destination *dst)
3427{
3428 struct mlx5_ib_flow_handler *handler_ucast = NULL;
3429 struct mlx5_ib_flow_handler *handler = NULL;
3430
3431 static struct {
3432 struct ib_flow_attr flow_attr;
3433 struct ib_flow_spec_eth eth_flow;
3434 } leftovers_specs[] = {
3435 [LEFTOVERS_MC] = {
3436 .flow_attr = {
3437 .num_of_specs = 1,
3438 .size = sizeof(leftovers_specs[0])
3439 },
3440 .eth_flow = {
3441 .type = IB_FLOW_SPEC_ETH,
3442 .size = sizeof(struct ib_flow_spec_eth),
3443 .mask = {.dst_mac = {0x1} },
3444 .val = {.dst_mac = {0x1} }
3445 }
3446 },
3447 [LEFTOVERS_UC] = {
3448 .flow_attr = {
3449 .num_of_specs = 1,
3450 .size = sizeof(leftovers_specs[0])
3451 },
3452 .eth_flow = {
3453 .type = IB_FLOW_SPEC_ETH,
3454 .size = sizeof(struct ib_flow_spec_eth),
3455 .mask = {.dst_mac = {0x1} },
3456 .val = {.dst_mac = {} }
3457 }
3458 }
3459 };
3460
3461 handler = create_flow_rule(dev, ft_prio,
3462 &leftovers_specs[LEFTOVERS_MC].flow_attr,
3463 dst);
3464 if (!IS_ERR(handler) &&
3465 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3466 handler_ucast = create_flow_rule(dev, ft_prio,
3467 &leftovers_specs[LEFTOVERS_UC].flow_attr,
3468 dst);
3469 if (IS_ERR(handler_ucast)) {
Mark Bloch74491de2016-08-31 11:24:25 +00003470 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03003471 ft_prio->refcount--;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003472 kfree(handler);
3473 handler = handler_ucast;
3474 } else {
3475 list_add(&handler_ucast->list, &handler->list);
3476 }
3477 }
3478
3479 return handler;
3480}
3481
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003482static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3483 struct mlx5_ib_flow_prio *ft_rx,
3484 struct mlx5_ib_flow_prio *ft_tx,
3485 struct mlx5_flow_destination *dst)
3486{
3487 struct mlx5_ib_flow_handler *handler_rx;
3488 struct mlx5_ib_flow_handler *handler_tx;
3489 int err;
3490 static const struct ib_flow_attr flow_attr = {
3491 .num_of_specs = 0,
3492 .size = sizeof(flow_attr)
3493 };
3494
3495 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3496 if (IS_ERR(handler_rx)) {
3497 err = PTR_ERR(handler_rx);
3498 goto err;
3499 }
3500
3501 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3502 if (IS_ERR(handler_tx)) {
3503 err = PTR_ERR(handler_tx);
3504 goto err_tx;
3505 }
3506
3507 list_add(&handler_tx->list, &handler_rx->list);
3508
3509 return handler_rx;
3510
3511err_tx:
Mark Bloch74491de2016-08-31 11:24:25 +00003512 mlx5_del_flow_rules(handler_rx->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003513 ft_rx->refcount--;
3514 kfree(handler_rx);
3515err:
3516 return ERR_PTR(err);
3517}
3518
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003519static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3520 struct ib_flow_attr *flow_attr,
Matan Barak59082a32018-05-31 16:43:35 +03003521 int domain,
3522 struct ib_udata *udata)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003523{
3524 struct mlx5_ib_dev *dev = to_mdev(qp->device);
Yishai Hadasd9f88e52016-08-28 10:58:37 +03003525 struct mlx5_ib_qp *mqp = to_mqp(qp);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003526 struct mlx5_ib_flow_handler *handler = NULL;
3527 struct mlx5_flow_destination *dst = NULL;
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003528 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003529 struct mlx5_ib_flow_prio *ft_prio;
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003530 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
Raed Salem3b3233f2018-05-31 16:43:39 +03003531 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3532 size_t min_ucmd_sz, required_ucmd_sz;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003533 int err;
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003534 int underlay_qpn;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003535
Raed Salem3b3233f2018-05-31 16:43:39 +03003536 if (udata && udata->inlen) {
3537 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3538 sizeof(ucmd_hdr.reserved);
3539 if (udata->inlen < min_ucmd_sz)
3540 return ERR_PTR(-EOPNOTSUPP);
3541
3542 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3543 if (err)
3544 return ERR_PTR(err);
3545
3546 /* currently supports only one counters data */
3547 if (ucmd_hdr.ncounters_data > 1)
3548 return ERR_PTR(-EINVAL);
3549
3550 required_ucmd_sz = min_ucmd_sz +
3551 sizeof(struct mlx5_ib_flow_counters_data) *
3552 ucmd_hdr.ncounters_data;
3553 if (udata->inlen > required_ucmd_sz &&
3554 !ib_is_udata_cleared(udata, required_ucmd_sz,
3555 udata->inlen - required_ucmd_sz))
3556 return ERR_PTR(-EOPNOTSUPP);
3557
3558 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3559 if (!ucmd)
3560 return ERR_PTR(-ENOMEM);
3561
3562 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
Gustavo A. R. Silva299eafe2018-06-07 14:19:15 -05003563 if (err)
3564 goto free_ucmd;
Raed Salem3b3233f2018-05-31 16:43:39 +03003565 }
Matan Barak59082a32018-05-31 16:43:35 +03003566
Gustavo A. R. Silva299eafe2018-06-07 14:19:15 -05003567 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3568 err = -ENOMEM;
3569 goto free_ucmd;
3570 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003571
3572 if (domain != IB_FLOW_DOMAIN_USER ||
Daniel Jurgens508562d2018-01-04 17:25:34 +02003573 flow_attr->port > dev->num_ports ||
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003574 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
Gustavo A. R. Silva299eafe2018-06-07 14:19:15 -05003575 IB_FLOW_ATTR_FLAGS_EGRESS))) {
3576 err = -EINVAL;
3577 goto free_ucmd;
3578 }
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003579
3580 if (is_egress &&
3581 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
Gustavo A. R. Silva299eafe2018-06-07 14:19:15 -05003582 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3583 err = -EINVAL;
3584 goto free_ucmd;
3585 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003586
3587 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
Gustavo A. R. Silva299eafe2018-06-07 14:19:15 -05003588 if (!dst) {
3589 err = -ENOMEM;
3590 goto free_ucmd;
3591 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003592
Mark Bloch9a4ca382018-01-16 14:42:35 +00003593 mutex_lock(&dev->flow_db->lock);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003594
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003595 ft_prio = get_flow_table(dev, flow_attr,
3596 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003597 if (IS_ERR(ft_prio)) {
3598 err = PTR_ERR(ft_prio);
3599 goto unlock;
3600 }
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003601 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3602 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3603 if (IS_ERR(ft_prio_tx)) {
3604 err = PTR_ERR(ft_prio_tx);
3605 ft_prio_tx = NULL;
3606 goto destroy_ft;
3607 }
3608 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003609
Aviad Yehezkel802c2122018-03-28 09:27:53 +03003610 if (is_egress) {
3611 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3612 } else {
3613 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3614 if (mqp->flags & MLX5_IB_QP_RSS)
3615 dst->tir_num = mqp->rss_qp.tirn;
3616 else
3617 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3618 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003619
3620 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02003621 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
3622 handler = create_dont_trap_rule(dev, ft_prio,
3623 flow_attr, dst);
3624 } else {
Yishai Hadasa550ddf2017-08-17 15:52:33 +03003625 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3626 mqp->underlay_qpn : 0;
3627 handler = _create_flow_rule(dev, ft_prio, flow_attr,
Raed Salem3b3233f2018-05-31 16:43:39 +03003628 dst, underlay_qpn, ucmd);
Maor Gottlieb35d190112016-03-07 18:51:47 +02003629 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003630 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3631 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3632 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3633 dst);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003634 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3635 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003636 } else {
3637 err = -EINVAL;
3638 goto destroy_ft;
3639 }
3640
3641 if (IS_ERR(handler)) {
3642 err = PTR_ERR(handler);
3643 handler = NULL;
3644 goto destroy_ft;
3645 }
3646
Mark Bloch9a4ca382018-01-16 14:42:35 +00003647 mutex_unlock(&dev->flow_db->lock);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003648 kfree(dst);
Raed Salem3b3233f2018-05-31 16:43:39 +03003649 kfree(ucmd);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003650
3651 return &handler->ibflow;
3652
3653destroy_ft:
3654 put_flow_table(dev, ft_prio, false);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03003655 if (ft_prio_tx)
3656 put_flow_table(dev, ft_prio_tx, false);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003657unlock:
Mark Bloch9a4ca382018-01-16 14:42:35 +00003658 mutex_unlock(&dev->flow_db->lock);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003659 kfree(dst);
Gustavo A. R. Silva299eafe2018-06-07 14:19:15 -05003660free_ucmd:
Raed Salem3b3233f2018-05-31 16:43:39 +03003661 kfree(ucmd);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003662 return ERR_PTR(err);
3663}
3664
Mark Blochb47fd4f2018-09-06 17:27:07 +03003665static struct mlx5_ib_flow_prio *
3666_get_flow_table(struct mlx5_ib_dev *dev,
3667 struct mlx5_ib_flow_matcher *fs_matcher,
3668 bool mcast)
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003669{
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003670 struct mlx5_flow_namespace *ns = NULL;
3671 struct mlx5_ib_flow_prio *prio;
Mark Blochb47fd4f2018-09-06 17:27:07 +03003672 int max_table_size;
3673 u32 flags = 0;
3674 int priority;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003675
Mark Blochb47fd4f2018-09-06 17:27:07 +03003676 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3677 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3678 log_max_ft_size));
3679 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3680 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3681 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3682 reformat_l3_tunnel_to_l2))
3683 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3684 } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
3685 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3686 log_max_ft_size));
3687 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3688 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3689 }
3690
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003691 if (max_table_size < MLX5_FS_MAX_ENTRIES)
3692 return ERR_PTR(-ENOMEM);
3693
3694 if (mcast)
3695 priority = MLX5_IB_FLOW_MCAST_PRIO;
3696 else
Mark Blochb47fd4f2018-09-06 17:27:07 +03003697 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003698
Mark Blochb47fd4f2018-09-06 17:27:07 +03003699 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003700 if (!ns)
3701 return ERR_PTR(-ENOTSUPP);
3702
Mark Blochb47fd4f2018-09-06 17:27:07 +03003703 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3704 prio = &dev->flow_db->prios[priority];
3705 else
3706 prio = &dev->flow_db->egress_prios[priority];
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003707
3708 if (prio->flow_table)
3709 return prio;
3710
3711 return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
Mark Blochb47fd4f2018-09-06 17:27:07 +03003712 MLX5_FS_MAX_TYPES, flags);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003713}
3714
3715static struct mlx5_ib_flow_handler *
3716_create_raw_flow_rule(struct mlx5_ib_dev *dev,
3717 struct mlx5_ib_flow_prio *ft_prio,
3718 struct mlx5_flow_destination *dst,
3719 struct mlx5_ib_flow_matcher *fs_matcher,
Mark Blochb823dd62018-09-06 17:27:05 +03003720 struct mlx5_flow_act *flow_act,
Mark Blochbfc5d832018-11-20 20:31:08 +02003721 void *cmd_in, int inlen,
3722 int dst_num)
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003723{
3724 struct mlx5_ib_flow_handler *handler;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003725 struct mlx5_flow_spec *spec;
3726 struct mlx5_flow_table *ft = ft_prio->flow_table;
3727 int err = 0;
3728
3729 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3730 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3731 if (!handler || !spec) {
3732 err = -ENOMEM;
3733 goto free;
3734 }
3735
3736 INIT_LIST_HEAD(&handler->list);
3737
3738 memcpy(spec->match_value, cmd_in, inlen);
3739 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
3740 fs_matcher->mask_len);
3741 spec->match_criteria_enable = fs_matcher->match_criteria_enable;
3742
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003743 handler->rule = mlx5_add_flow_rules(ft, spec,
Mark Blochbfc5d832018-11-20 20:31:08 +02003744 flow_act, dst, dst_num);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003745
3746 if (IS_ERR(handler->rule)) {
3747 err = PTR_ERR(handler->rule);
3748 goto free;
3749 }
3750
3751 ft_prio->refcount++;
3752 handler->prio = ft_prio;
3753 handler->dev = dev;
3754 ft_prio->flow_table = ft;
3755
3756free:
3757 if (err)
3758 kfree(handler);
3759 kvfree(spec);
3760 return err ? ERR_PTR(err) : handler;
3761}
3762
3763static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
3764 void *match_v)
3765{
3766 void *match_c;
3767 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
3768 void *dmac, *dmac_mask;
3769 void *ipv4, *ipv4_mask;
3770
3771 if (!(fs_matcher->match_criteria_enable &
3772 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
3773 return false;
3774
3775 match_c = fs_matcher->matcher_mask.match_params;
3776 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
3777 outer_headers);
3778 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
3779 outer_headers);
3780
3781 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
3782 dmac_47_16);
3783 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
3784 dmac_47_16);
3785
3786 if (is_multicast_ether_addr(dmac) &&
3787 is_multicast_ether_addr(dmac_mask))
3788 return true;
3789
3790 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
3791 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3792
3793 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
3794 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3795
3796 if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
3797 ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
3798 return true;
3799
3800 return false;
3801}
3802
Yishai Hadas32269442018-07-23 15:25:09 +03003803struct mlx5_ib_flow_handler *
3804mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
3805 struct mlx5_ib_flow_matcher *fs_matcher,
Mark Blochb823dd62018-09-06 17:27:05 +03003806 struct mlx5_flow_act *flow_act,
Mark Blochbfc5d832018-11-20 20:31:08 +02003807 u32 counter_id,
Yishai Hadas32269442018-07-23 15:25:09 +03003808 void *cmd_in, int inlen, int dest_id,
3809 int dest_type)
3810{
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003811 struct mlx5_flow_destination *dst;
3812 struct mlx5_ib_flow_prio *ft_prio;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003813 struct mlx5_ib_flow_handler *handler;
Mark Blochbfc5d832018-11-20 20:31:08 +02003814 int dst_num = 0;
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003815 bool mcast;
3816 int err;
3817
3818 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
3819 return ERR_PTR(-EOPNOTSUPP);
3820
3821 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
3822 return ERR_PTR(-ENOMEM);
3823
Mark Blochbfc5d832018-11-20 20:31:08 +02003824 dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003825 if (!dst)
3826 return ERR_PTR(-ENOMEM);
3827
3828 mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
3829 mutex_lock(&dev->flow_db->lock);
3830
Mark Blochb47fd4f2018-09-06 17:27:07 +03003831 ft_prio = _get_flow_table(dev, fs_matcher, mcast);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003832 if (IS_ERR(ft_prio)) {
3833 err = PTR_ERR(ft_prio);
3834 goto unlock;
3835 }
3836
Yishai Hadas6346f0b2018-07-23 15:25:11 +03003837 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
Mark Blochbfc5d832018-11-20 20:31:08 +02003838 dst[dst_num].type = dest_type;
3839 dst[dst_num].tir_num = dest_id;
Mark Blochb823dd62018-09-06 17:27:05 +03003840 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
Mark Blocha7ee18b2018-09-06 17:27:08 +03003841 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
Mark Blochbfc5d832018-11-20 20:31:08 +02003842 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
3843 dst[dst_num].ft_num = dest_id;
Mark Blochb823dd62018-09-06 17:27:05 +03003844 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
Mark Blocha7ee18b2018-09-06 17:27:08 +03003845 } else {
Mark Blochbfc5d832018-11-20 20:31:08 +02003846 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
Mark Blocha7ee18b2018-09-06 17:27:08 +03003847 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
Yishai Hadas6346f0b2018-07-23 15:25:11 +03003848 }
3849
Mark Blochbfc5d832018-11-20 20:31:08 +02003850 dst_num++;
3851
3852 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3853 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3854 dst[dst_num].counter_id = counter_id;
3855 dst_num++;
3856 }
3857
Mark Blochb823dd62018-09-06 17:27:05 +03003858 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
Mark Blochbfc5d832018-11-20 20:31:08 +02003859 cmd_in, inlen, dst_num);
Yishai Hadasd4be3f42018-07-23 15:25:10 +03003860
3861 if (IS_ERR(handler)) {
3862 err = PTR_ERR(handler);
3863 goto destroy_ft;
3864 }
3865
3866 mutex_unlock(&dev->flow_db->lock);
3867 atomic_inc(&fs_matcher->usecnt);
3868 handler->flow_matcher = fs_matcher;
3869
3870 kfree(dst);
3871
3872 return handler;
3873
3874destroy_ft:
3875 put_flow_table(dev, ft_prio, false);
3876unlock:
3877 mutex_unlock(&dev->flow_db->lock);
3878 kfree(dst);
3879
3880 return ERR_PTR(err);
Yishai Hadas32269442018-07-23 15:25:09 +03003881}
3882
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03003883static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
3884{
3885 u32 flags = 0;
3886
3887 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
3888 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
3889
3890 return flags;
3891}
3892
3893#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
3894static struct ib_flow_action *
3895mlx5_ib_create_flow_action_esp(struct ib_device *device,
3896 const struct ib_flow_action_attrs_esp *attr,
3897 struct uverbs_attr_bundle *attrs)
3898{
3899 struct mlx5_ib_dev *mdev = to_mdev(device);
3900 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
3901 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
3902 struct mlx5_ib_flow_action *action;
3903 u64 action_flags;
3904 u64 flags;
3905 int err = 0;
3906
Jason Gunthorpebccd0622018-07-26 16:37:14 -06003907 err = uverbs_get_flags64(
3908 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
3909 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
3910 if (err)
3911 return ERR_PTR(err);
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03003912
3913 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
3914
3915 /* We current only support a subset of the standard features. Only a
3916 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
3917 * (with overlap). Full offload mode isn't supported.
3918 */
3919 if (!attr->keymat || attr->replay || attr->encap ||
3920 attr->spi || attr->seq || attr->tfc_pad ||
3921 attr->hard_limit_pkts ||
3922 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3923 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
3924 return ERR_PTR(-EOPNOTSUPP);
3925
3926 if (attr->keymat->protocol !=
3927 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
3928 return ERR_PTR(-EOPNOTSUPP);
3929
3930 aes_gcm = &attr->keymat->keymat.aes_gcm;
3931
3932 if (aes_gcm->icv_len != 16 ||
3933 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
3934 return ERR_PTR(-EOPNOTSUPP);
3935
3936 action = kmalloc(sizeof(*action), GFP_KERNEL);
3937 if (!action)
3938 return ERR_PTR(-ENOMEM);
3939
3940 action->esp_aes_gcm.ib_flags = attr->flags;
3941 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
3942 sizeof(accel_attrs.keymat.aes_gcm.aes_key));
3943 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
3944 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
3945 sizeof(accel_attrs.keymat.aes_gcm.salt));
3946 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
3947 sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
3948 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
3949 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
3950 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
3951
3952 accel_attrs.esn = attr->esn;
3953 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
3954 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
3955 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
3956 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3957
3958 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
3959 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
3960
3961 action->esp_aes_gcm.ctx =
3962 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
3963 if (IS_ERR(action->esp_aes_gcm.ctx)) {
3964 err = PTR_ERR(action->esp_aes_gcm.ctx);
3965 goto err_parse;
3966 }
3967
3968 action->esp_aes_gcm.ib_flags = attr->flags;
3969
3970 return &action->ib_action;
3971
3972err_parse:
3973 kfree(action);
3974 return ERR_PTR(err);
3975}
3976
Matan Barak349705c2018-03-28 09:27:51 +03003977static int
3978mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
3979 const struct ib_flow_action_attrs_esp *attr,
3980 struct uverbs_attr_bundle *attrs)
3981{
3982 struct mlx5_ib_flow_action *maction = to_mflow_act(action);
3983 struct mlx5_accel_esp_xfrm_attrs accel_attrs;
3984 int err = 0;
3985
3986 if (attr->keymat || attr->replay || attr->encap ||
3987 attr->spi || attr->seq || attr->tfc_pad ||
3988 attr->hard_limit_pkts ||
3989 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3990 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
3991 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
3992 return -EOPNOTSUPP;
3993
3994 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
3995 * be modified.
3996 */
3997 if (!(maction->esp_aes_gcm.ib_flags &
3998 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
3999 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4000 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4001 return -EINVAL;
4002
4003 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4004 sizeof(accel_attrs));
4005
4006 accel_attrs.esn = attr->esn;
4007 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4008 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4009 else
4010 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4011
4012 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4013 &accel_attrs);
4014 if (err)
4015 return err;
4016
4017 maction->esp_aes_gcm.ib_flags &=
4018 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4019 maction->esp_aes_gcm.ib_flags |=
4020 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4021
4022 return 0;
4023}
4024
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03004025static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4026{
4027 struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4028
4029 switch (action->type) {
4030 case IB_FLOW_ACTION_ESP:
4031 /*
4032 * We only support aes_gcm by now, so we implicitly know this is
4033 * the underline crypto.
4034 */
4035 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4036 break;
Mark Blochb4749bf2018-08-28 14:18:51 +03004037 case IB_FLOW_ACTION_UNSPECIFIED:
4038 mlx5_ib_destroy_flow_action_raw(maction);
4039 break;
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03004040 default:
4041 WARN_ON(true);
4042 break;
4043 }
4044
4045 kfree(maction);
4046 return 0;
4047}
4048
Eli Cohene126ba92013-07-07 17:25:49 +03004049static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4050{
4051 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
Yishai Hadas81e30882017-06-08 16:15:09 +03004052 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
Eli Cohene126ba92013-07-07 17:25:49 +03004053 int err;
Yishai Hadas539ec982018-09-20 21:39:25 +03004054 u16 uid;
4055
4056 uid = ibqp->pd ?
4057 to_mpd(ibqp->pd)->uid : 0;
Eli Cohene126ba92013-07-07 17:25:49 +03004058
Yishai Hadas81e30882017-06-08 16:15:09 +03004059 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4060 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4061 return -EOPNOTSUPP;
4062 }
4063
Yishai Hadas539ec982018-09-20 21:39:25 +03004064 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
Eli Cohene126ba92013-07-07 17:25:49 +03004065 if (err)
4066 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4067 ibqp->qp_num, gid->raw);
4068
4069 return err;
4070}
4071
4072static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4073{
4074 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4075 int err;
Yishai Hadas539ec982018-09-20 21:39:25 +03004076 u16 uid;
Eli Cohene126ba92013-07-07 17:25:49 +03004077
Yishai Hadas539ec982018-09-20 21:39:25 +03004078 uid = ibqp->pd ?
4079 to_mpd(ibqp->pd)->uid : 0;
4080 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
Eli Cohene126ba92013-07-07 17:25:49 +03004081 if (err)
4082 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4083 ibqp->qp_num, gid->raw);
4084
4085 return err;
4086}
4087
4088static int init_node_data(struct mlx5_ib_dev *dev)
4089{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03004090 int err;
Eli Cohene126ba92013-07-07 17:25:49 +03004091
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03004092 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
Eli Cohene126ba92013-07-07 17:25:49 +03004093 if (err)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03004094 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03004095
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03004096 dev->mdev->rev_id = dev->mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +03004097
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03004098 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
Eli Cohene126ba92013-07-07 17:25:49 +03004099}
4100
Parav Pandit508a5232018-10-11 22:31:54 +03004101static ssize_t fw_pages_show(struct device *device,
4102 struct device_attribute *attr, char *buf)
Eli Cohene126ba92013-07-07 17:25:49 +03004103{
4104 struct mlx5_ib_dev *dev =
4105 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
4106
Jack Morgenstein9603b612014-07-28 23:30:22 +03004107 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03004108}
Parav Pandit508a5232018-10-11 22:31:54 +03004109static DEVICE_ATTR_RO(fw_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03004110
Parav Pandit508a5232018-10-11 22:31:54 +03004111static ssize_t reg_pages_show(struct device *device,
Eli Cohene126ba92013-07-07 17:25:49 +03004112 struct device_attribute *attr, char *buf)
4113{
4114 struct mlx5_ib_dev *dev =
4115 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
4116
Haggai Eran6aec21f2014-12-11 17:04:23 +02004117 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
Eli Cohene126ba92013-07-07 17:25:49 +03004118}
Parav Pandit508a5232018-10-11 22:31:54 +03004119static DEVICE_ATTR_RO(reg_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03004120
Parav Pandit508a5232018-10-11 22:31:54 +03004121static ssize_t hca_type_show(struct device *device,
4122 struct device_attribute *attr, char *buf)
Eli Cohene126ba92013-07-07 17:25:49 +03004123{
4124 struct mlx5_ib_dev *dev =
4125 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03004126 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
Eli Cohene126ba92013-07-07 17:25:49 +03004127}
Parav Pandit508a5232018-10-11 22:31:54 +03004128static DEVICE_ATTR_RO(hca_type);
Eli Cohene126ba92013-07-07 17:25:49 +03004129
Parav Pandit508a5232018-10-11 22:31:54 +03004130static ssize_t hw_rev_show(struct device *device,
4131 struct device_attribute *attr, char *buf)
Eli Cohene126ba92013-07-07 17:25:49 +03004132{
4133 struct mlx5_ib_dev *dev =
4134 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03004135 return sprintf(buf, "%x\n", dev->mdev->rev_id);
Eli Cohene126ba92013-07-07 17:25:49 +03004136}
Parav Pandit508a5232018-10-11 22:31:54 +03004137static DEVICE_ATTR_RO(hw_rev);
Eli Cohene126ba92013-07-07 17:25:49 +03004138
Parav Pandit508a5232018-10-11 22:31:54 +03004139static ssize_t board_id_show(struct device *device,
4140 struct device_attribute *attr, char *buf)
Eli Cohene126ba92013-07-07 17:25:49 +03004141{
4142 struct mlx5_ib_dev *dev =
4143 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
4144 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
Jack Morgenstein9603b612014-07-28 23:30:22 +03004145 dev->mdev->board_id);
Eli Cohene126ba92013-07-07 17:25:49 +03004146}
Parav Pandit508a5232018-10-11 22:31:54 +03004147static DEVICE_ATTR_RO(board_id);
Eli Cohene126ba92013-07-07 17:25:49 +03004148
Parav Pandit508a5232018-10-11 22:31:54 +03004149static struct attribute *mlx5_class_attributes[] = {
4150 &dev_attr_hw_rev.attr,
4151 &dev_attr_hca_type.attr,
4152 &dev_attr_board_id.attr,
4153 &dev_attr_fw_pages.attr,
4154 &dev_attr_reg_pages.attr,
4155 NULL,
4156};
Eli Cohene126ba92013-07-07 17:25:49 +03004157
Parav Pandit508a5232018-10-11 22:31:54 +03004158static const struct attribute_group mlx5_attr_group = {
4159 .attrs = mlx5_class_attributes,
Eli Cohene126ba92013-07-07 17:25:49 +03004160};
4161
Haggai Eran7722f472016-02-29 15:45:07 +02004162static void pkey_change_handler(struct work_struct *work)
4163{
4164 struct mlx5_ib_port_resources *ports =
4165 container_of(work, struct mlx5_ib_port_resources,
4166 pkey_change_work);
4167
4168 mutex_lock(&ports->devr->mutex);
4169 mlx5_ib_gsi_pkey_change(ports->gsi);
4170 mutex_unlock(&ports->devr->mutex);
4171}
4172
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03004173static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4174{
4175 struct mlx5_ib_qp *mqp;
4176 struct mlx5_ib_cq *send_mcq, *recv_mcq;
4177 struct mlx5_core_cq *mcq;
4178 struct list_head cq_armed_list;
4179 unsigned long flags_qp;
4180 unsigned long flags_cq;
4181 unsigned long flags;
4182
4183 INIT_LIST_HEAD(&cq_armed_list);
4184
4185 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4186 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4187 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4188 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4189 if (mqp->sq.tail != mqp->sq.head) {
4190 send_mcq = to_mcq(mqp->ibqp.send_cq);
4191 spin_lock_irqsave(&send_mcq->lock, flags_cq);
4192 if (send_mcq->mcq.comp &&
4193 mqp->ibqp.send_cq->comp_handler) {
4194 if (!send_mcq->mcq.reset_notify_added) {
4195 send_mcq->mcq.reset_notify_added = 1;
4196 list_add_tail(&send_mcq->mcq.reset_notify,
4197 &cq_armed_list);
4198 }
4199 }
4200 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4201 }
4202 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4203 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4204 /* no handling is needed for SRQ */
4205 if (!mqp->ibqp.srq) {
4206 if (mqp->rq.tail != mqp->rq.head) {
4207 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4208 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4209 if (recv_mcq->mcq.comp &&
4210 mqp->ibqp.recv_cq->comp_handler) {
4211 if (!recv_mcq->mcq.reset_notify_added) {
4212 recv_mcq->mcq.reset_notify_added = 1;
4213 list_add_tail(&recv_mcq->mcq.reset_notify,
4214 &cq_armed_list);
4215 }
4216 }
4217 spin_unlock_irqrestore(&recv_mcq->lock,
4218 flags_cq);
4219 }
4220 }
4221 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4222 }
4223 /*At that point all inflight post send were put to be executed as of we
4224 * lock/unlock above locks Now need to arm all involved CQs.
4225 */
4226 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4227 mcq->comp(mcq);
4228 }
4229 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4230}
4231
Maor Gottlieb03404e82017-05-30 10:29:13 +03004232static void delay_drop_handler(struct work_struct *work)
4233{
4234 int err;
4235 struct mlx5_ib_delay_drop *delay_drop =
4236 container_of(work, struct mlx5_ib_delay_drop,
4237 delay_drop_work);
4238
Maor Gottliebfe248c32017-05-30 10:29:14 +03004239 atomic_inc(&delay_drop->events_cnt);
4240
Maor Gottlieb03404e82017-05-30 10:29:13 +03004241 mutex_lock(&delay_drop->lock);
4242 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4243 delay_drop->timeout);
4244 if (err) {
4245 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4246 delay_drop->timeout);
4247 delay_drop->activate = false;
4248 }
4249 mutex_unlock(&delay_drop->lock);
4250}
4251
Saeed Mahameed09e574f2018-11-26 14:39:04 -08004252static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4253 struct ib_event *ibev)
4254{
4255 switch (eqe->sub_type) {
4256 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
4257 schedule_work(&ibdev->delay_drop.delay_drop_work);
4258 break;
4259 default: /* do nothing */
4260 return;
4261 }
4262}
4263
Saeed Mahameed134e9342018-11-26 14:39:02 -08004264static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4265 struct ib_event *ibev)
4266{
4267 u8 port = (eqe->data.port.port >> 4) & 0xf;
4268
4269 ibev->element.port_num = port;
4270
4271 switch (eqe->sub_type) {
4272 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4273 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4274 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4275 /* In RoCE, port up/down events are handled in
4276 * mlx5_netdev_event().
4277 */
4278 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4279 IB_LINK_LAYER_ETHERNET)
4280 return -EINVAL;
4281
4282 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4283 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4284 break;
4285
4286 case MLX5_PORT_CHANGE_SUBTYPE_LID:
4287 ibev->event = IB_EVENT_LID_CHANGE;
4288 break;
4289
4290 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4291 ibev->event = IB_EVENT_PKEY_CHANGE;
4292 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4293 break;
4294
4295 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4296 ibev->event = IB_EVENT_GID_CHANGE;
4297 break;
4298
4299 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4300 ibev->event = IB_EVENT_CLIENT_REREGISTER;
4301 break;
4302 default:
4303 return -EINVAL;
4304 }
4305
4306 return 0;
4307}
4308
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004309static void mlx5_ib_handle_event(struct work_struct *_work)
Eli Cohene126ba92013-07-07 17:25:49 +03004310{
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004311 struct mlx5_ib_event_work *work =
4312 container_of(_work, struct mlx5_ib_event_work, work);
4313 struct mlx5_ib_dev *ibdev;
Eli Cohene126ba92013-07-07 17:25:49 +03004314 struct ib_event ibev;
Eli Cohendbaaff22016-10-27 16:36:44 +03004315 bool fatal = false;
Eli Cohene126ba92013-07-07 17:25:49 +03004316
Saeed Mahameeddf097a22018-11-26 14:39:00 -08004317 if (work->is_slave) {
4318 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004319 if (!ibdev)
4320 goto out;
4321 } else {
Saeed Mahameeddf097a22018-11-26 14:39:00 -08004322 ibdev = work->dev;
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004323 }
4324
4325 switch (work->event) {
Eli Cohene126ba92013-07-07 17:25:49 +03004326 case MLX5_DEV_EVENT_SYS_ERROR:
Eli Cohene126ba92013-07-07 17:25:49 +03004327 ibev.event = IB_EVENT_DEVICE_FATAL;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03004328 mlx5_ib_handle_internal_error(ibdev);
Saeed Mahameed134e9342018-11-26 14:39:02 -08004329 ibev.element.port_num = (u8)(unsigned long)work->param;
Eli Cohendbaaff22016-10-27 16:36:44 +03004330 fatal = true;
Eli Cohene126ba92013-07-07 17:25:49 +03004331 break;
Saeed Mahameed134e9342018-11-26 14:39:02 -08004332 case MLX5_EVENT_TYPE_PORT_CHANGE:
4333 if (handle_port_change(ibdev, work->param, &ibev))
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004334 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03004335 break;
Saeed Mahameed09e574f2018-11-26 14:39:04 -08004336 case MLX5_EVENT_TYPE_GENERAL_EVENT:
4337 handle_general_event(ibdev, work->param, &ibev);
4338 /* fall through */
Saeed Mahameedbdc37922016-09-29 19:35:38 +03004339 default:
Maor Gottlieb03404e82017-05-30 10:29:13 +03004340 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03004341 }
4342
Saeed Mahameed134e9342018-11-26 14:39:02 -08004343 ibev.device = &ibdev->ib_dev;
Eli Cohene126ba92013-07-07 17:25:49 +03004344
Saeed Mahameed134e9342018-11-26 14:39:02 -08004345 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4346 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
Maor Gottlieb03404e82017-05-30 10:29:13 +03004347 goto out;
Eli Cohena0c84c32013-09-11 16:35:27 +03004348 }
4349
Eli Cohene126ba92013-07-07 17:25:49 +03004350 if (ibdev->ib_active)
4351 ib_dispatch_event(&ibev);
Eli Cohendbaaff22016-10-27 16:36:44 +03004352
4353 if (fatal)
4354 ibdev->ib_active = false;
Maor Gottlieb03404e82017-05-30 10:29:13 +03004355out:
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004356 kfree(work);
4357}
4358
Saeed Mahameeddf097a22018-11-26 14:39:00 -08004359static int mlx5_ib_event(struct notifier_block *nb,
4360 unsigned long event, void *param)
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004361{
4362 struct mlx5_ib_event_work *work;
4363
4364 work = kmalloc(sizeof(*work), GFP_ATOMIC);
Leon Romanovsky10bea9c2018-01-19 13:07:11 +02004365 if (!work)
Saeed Mahameeddf097a22018-11-26 14:39:00 -08004366 return NOTIFY_DONE;
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02004367
Leon Romanovsky10bea9c2018-01-19 13:07:11 +02004368 INIT_WORK(&work->work, mlx5_ib_handle_event);
Saeed Mahameeddf097a22018-11-26 14:39:00 -08004369 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4370 work->is_slave = false;
Leon Romanovsky10bea9c2018-01-19 13:07:11 +02004371 work->param = param;
Leon Romanovsky10bea9c2018-01-19 13:07:11 +02004372 work->event = event;
4373
4374 queue_work(mlx5_ib_event_wq, &work->work);
Saeed Mahameeddf097a22018-11-26 14:39:00 -08004375
4376 return NOTIFY_OK;
4377}
4378
4379static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4380 unsigned long event, void *param)
4381{
4382 struct mlx5_ib_event_work *work;
4383
4384 work = kmalloc(sizeof(*work), GFP_ATOMIC);
4385 if (!work)
4386 return NOTIFY_DONE;
4387
4388 INIT_WORK(&work->work, mlx5_ib_handle_event);
4389 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4390 work->is_slave = true;
4391 work->param = param;
4392 work->event = event;
4393 queue_work(mlx5_ib_event_wq, &work->work);
4394
4395 return NOTIFY_OK;
Eli Cohene126ba92013-07-07 17:25:49 +03004396}
4397
Maor Gottliebc43f1112017-01-18 14:10:33 +02004398static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4399{
4400 struct mlx5_hca_vport_context vport_ctx;
4401 int err;
4402 int port;
4403
Daniel Jurgens508562d2018-01-04 17:25:34 +02004404 for (port = 1; port <= dev->num_ports; port++) {
Maor Gottliebc43f1112017-01-18 14:10:33 +02004405 dev->mdev->port_caps[port - 1].has_smi = false;
4406 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4407 MLX5_CAP_PORT_TYPE_IB) {
4408 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4409 err = mlx5_query_hca_vport_context(dev->mdev, 0,
4410 port, 0,
4411 &vport_ctx);
4412 if (err) {
4413 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4414 port, err);
4415 return err;
4416 }
4417 dev->mdev->port_caps[port - 1].has_smi =
4418 vport_ctx.has_smi;
4419 } else {
4420 dev->mdev->port_caps[port - 1].has_smi = true;
4421 }
4422 }
4423 }
4424 return 0;
4425}
4426
Eli Cohene126ba92013-07-07 17:25:49 +03004427static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4428{
4429 int port;
4430
Daniel Jurgens508562d2018-01-04 17:25:34 +02004431 for (port = 1; port <= dev->num_ports; port++)
Eli Cohene126ba92013-07-07 17:25:49 +03004432 mlx5_query_ext_port_caps(dev, port);
4433}
4434
Daniel Jurgens32f69e42018-01-04 17:25:36 +02004435static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
Eli Cohene126ba92013-07-07 17:25:49 +03004436{
4437 struct ib_device_attr *dprops = NULL;
4438 struct ib_port_attr *pprops = NULL;
Dan Carpenterf614fc12015-01-12 11:56:58 +03004439 int err = -ENOMEM;
Matan Barak2528e332015-06-11 16:35:25 +03004440 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
Eli Cohene126ba92013-07-07 17:25:49 +03004441
4442 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
4443 if (!pprops)
4444 goto out;
4445
4446 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4447 if (!dprops)
4448 goto out;
4449
Maor Gottliebc43f1112017-01-18 14:10:33 +02004450 err = set_has_smi_cap(dev);
4451 if (err)
4452 goto out;
4453
Matan Barak2528e332015-06-11 16:35:25 +03004454 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
Eli Cohene126ba92013-07-07 17:25:49 +03004455 if (err) {
4456 mlx5_ib_warn(dev, "query_device failed %d\n", err);
4457 goto out;
4458 }
4459
Daniel Jurgens32f69e42018-01-04 17:25:36 +02004460 memset(pprops, 0, sizeof(*pprops));
4461 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4462 if (err) {
4463 mlx5_ib_warn(dev, "query_port %d failed %d\n",
4464 port, err);
4465 goto out;
Eli Cohene126ba92013-07-07 17:25:49 +03004466 }
4467
Daniel Jurgens32f69e42018-01-04 17:25:36 +02004468 dev->mdev->port_caps[port - 1].pkey_table_len =
4469 dprops->max_pkeys;
4470 dev->mdev->port_caps[port - 1].gid_table_len =
4471 pprops->gid_tbl_len;
4472 mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4473 port, dprops->max_pkeys, pprops->gid_tbl_len);
4474
Eli Cohene126ba92013-07-07 17:25:49 +03004475out:
4476 kfree(pprops);
4477 kfree(dprops);
4478
4479 return err;
4480}
4481
4482static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4483{
4484 int err;
4485
4486 err = mlx5_mr_cache_cleanup(dev);
4487 if (err)
4488 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4489
Mark Bloch32927e22018-03-20 15:45:37 +02004490 if (dev->umrc.qp)
4491 mlx5_ib_destroy_qp(dev->umrc.qp);
4492 if (dev->umrc.cq)
4493 ib_free_cq(dev->umrc.cq);
4494 if (dev->umrc.pd)
4495 ib_dealloc_pd(dev->umrc.pd);
Eli Cohene126ba92013-07-07 17:25:49 +03004496}
4497
4498enum {
4499 MAX_UMR_WR = 128,
4500};
4501
4502static int create_umr_res(struct mlx5_ib_dev *dev)
4503{
4504 struct ib_qp_init_attr *init_attr = NULL;
4505 struct ib_qp_attr *attr = NULL;
4506 struct ib_pd *pd;
4507 struct ib_cq *cq;
4508 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +03004509 int ret;
4510
4511 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4512 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4513 if (!attr || !init_attr) {
4514 ret = -ENOMEM;
4515 goto error_0;
4516 }
4517
Christoph Hellwiged082d32016-09-05 12:56:17 +02004518 pd = ib_alloc_pd(&dev->ib_dev, 0);
Eli Cohene126ba92013-07-07 17:25:49 +03004519 if (IS_ERR(pd)) {
4520 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4521 ret = PTR_ERR(pd);
4522 goto error_0;
4523 }
4524
Christoph Hellwigadd08d72016-03-03 09:38:22 +01004525 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
Eli Cohene126ba92013-07-07 17:25:49 +03004526 if (IS_ERR(cq)) {
4527 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4528 ret = PTR_ERR(cq);
4529 goto error_2;
4530 }
Eli Cohene126ba92013-07-07 17:25:49 +03004531
4532 init_attr->send_cq = cq;
4533 init_attr->recv_cq = cq;
4534 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4535 init_attr->cap.max_send_wr = MAX_UMR_WR;
4536 init_attr->cap.max_send_sge = 1;
4537 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4538 init_attr->port_num = 1;
4539 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4540 if (IS_ERR(qp)) {
4541 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4542 ret = PTR_ERR(qp);
4543 goto error_3;
4544 }
4545 qp->device = &dev->ib_dev;
4546 qp->real_qp = qp;
4547 qp->uobject = NULL;
4548 qp->qp_type = MLX5_IB_QPT_REG_UMR;
Majd Dibbiny31fde032017-10-30 14:23:13 +02004549 qp->send_cq = init_attr->send_cq;
4550 qp->recv_cq = init_attr->recv_cq;
Eli Cohene126ba92013-07-07 17:25:49 +03004551
4552 attr->qp_state = IB_QPS_INIT;
4553 attr->port_num = 1;
4554 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4555 IB_QP_PORT, NULL);
4556 if (ret) {
4557 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4558 goto error_4;
4559 }
4560
4561 memset(attr, 0, sizeof(*attr));
4562 attr->qp_state = IB_QPS_RTR;
4563 attr->path_mtu = IB_MTU_256;
4564
4565 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4566 if (ret) {
4567 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4568 goto error_4;
4569 }
4570
4571 memset(attr, 0, sizeof(*attr));
4572 attr->qp_state = IB_QPS_RTS;
4573 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4574 if (ret) {
4575 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4576 goto error_4;
4577 }
4578
4579 dev->umrc.qp = qp;
4580 dev->umrc.cq = cq;
Eli Cohene126ba92013-07-07 17:25:49 +03004581 dev->umrc.pd = pd;
4582
4583 sema_init(&dev->umrc.sem, MAX_UMR_WR);
4584 ret = mlx5_mr_cache_init(dev);
4585 if (ret) {
4586 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4587 goto error_4;
4588 }
4589
4590 kfree(attr);
4591 kfree(init_attr);
4592
4593 return 0;
4594
4595error_4:
4596 mlx5_ib_destroy_qp(qp);
Mark Bloch32927e22018-03-20 15:45:37 +02004597 dev->umrc.qp = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03004598
4599error_3:
Christoph Hellwigadd08d72016-03-03 09:38:22 +01004600 ib_free_cq(cq);
Mark Bloch32927e22018-03-20 15:45:37 +02004601 dev->umrc.cq = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03004602
4603error_2:
Eli Cohene126ba92013-07-07 17:25:49 +03004604 ib_dealloc_pd(pd);
Mark Bloch32927e22018-03-20 15:45:37 +02004605 dev->umrc.pd = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03004606
4607error_0:
4608 kfree(attr);
4609 kfree(init_attr);
4610 return ret;
4611}
4612
Max Gurtovoy6e8484c2017-05-28 10:53:11 +03004613static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4614{
4615 switch (umr_fence_cap) {
4616 case MLX5_CAP_UMR_FENCE_NONE:
4617 return MLX5_FENCE_MODE_NONE;
4618 case MLX5_CAP_UMR_FENCE_SMALL:
4619 return MLX5_FENCE_MODE_INITIATOR_SMALL;
4620 default:
4621 return MLX5_FENCE_MODE_STRONG_ORDERING;
4622 }
4623}
4624
Eli Cohene126ba92013-07-07 17:25:49 +03004625static int create_dev_resources(struct mlx5_ib_resources *devr)
4626{
4627 struct ib_srq_init_attr attr;
4628 struct mlx5_ib_dev *dev;
Matan Barakbcf4c1e2015-06-11 16:35:20 +03004629 struct ib_cq_init_attr cq_attr = {.cqe = 1};
Haggai Eran7722f472016-02-29 15:45:07 +02004630 int port;
Eli Cohene126ba92013-07-07 17:25:49 +03004631 int ret = 0;
4632
4633 dev = container_of(devr, struct mlx5_ib_dev, devr);
4634
Haggai Erand16e91d2016-02-29 15:45:05 +02004635 mutex_init(&devr->mutex);
4636
Eli Cohene126ba92013-07-07 17:25:49 +03004637 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
4638 if (IS_ERR(devr->p0)) {
4639 ret = PTR_ERR(devr->p0);
4640 goto error0;
4641 }
4642 devr->p0->device = &dev->ib_dev;
4643 devr->p0->uobject = NULL;
4644 atomic_set(&devr->p0->usecnt, 0);
4645
Matan Barakbcf4c1e2015-06-11 16:35:20 +03004646 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03004647 if (IS_ERR(devr->c0)) {
4648 ret = PTR_ERR(devr->c0);
4649 goto error1;
4650 }
4651 devr->c0->device = &dev->ib_dev;
4652 devr->c0->uobject = NULL;
4653 devr->c0->comp_handler = NULL;
4654 devr->c0->event_handler = NULL;
4655 devr->c0->cq_context = NULL;
4656 atomic_set(&devr->c0->usecnt, 0);
4657
4658 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
4659 if (IS_ERR(devr->x0)) {
4660 ret = PTR_ERR(devr->x0);
4661 goto error2;
4662 }
4663 devr->x0->device = &dev->ib_dev;
4664 devr->x0->inode = NULL;
4665 atomic_set(&devr->x0->usecnt, 0);
4666 mutex_init(&devr->x0->tgt_qp_mutex);
4667 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4668
4669 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
4670 if (IS_ERR(devr->x1)) {
4671 ret = PTR_ERR(devr->x1);
4672 goto error3;
4673 }
4674 devr->x1->device = &dev->ib_dev;
4675 devr->x1->inode = NULL;
4676 atomic_set(&devr->x1->usecnt, 0);
4677 mutex_init(&devr->x1->tgt_qp_mutex);
4678 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4679
4680 memset(&attr, 0, sizeof(attr));
4681 attr.attr.max_sge = 1;
4682 attr.attr.max_wr = 1;
4683 attr.srq_type = IB_SRQT_XRC;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03004684 attr.ext.cq = devr->c0;
Eli Cohene126ba92013-07-07 17:25:49 +03004685 attr.ext.xrc.xrcd = devr->x0;
4686
4687 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
4688 if (IS_ERR(devr->s0)) {
4689 ret = PTR_ERR(devr->s0);
4690 goto error4;
4691 }
4692 devr->s0->device = &dev->ib_dev;
4693 devr->s0->pd = devr->p0;
4694 devr->s0->uobject = NULL;
4695 devr->s0->event_handler = NULL;
4696 devr->s0->srq_context = NULL;
4697 devr->s0->srq_type = IB_SRQT_XRC;
4698 devr->s0->ext.xrc.xrcd = devr->x0;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03004699 devr->s0->ext.cq = devr->c0;
Eli Cohene126ba92013-07-07 17:25:49 +03004700 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03004701 atomic_inc(&devr->s0->ext.cq->usecnt);
Eli Cohene126ba92013-07-07 17:25:49 +03004702 atomic_inc(&devr->p0->usecnt);
4703 atomic_set(&devr->s0->usecnt, 0);
4704
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03004705 memset(&attr, 0, sizeof(attr));
4706 attr.attr.max_sge = 1;
4707 attr.attr.max_wr = 1;
4708 attr.srq_type = IB_SRQT_BASIC;
4709 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
4710 if (IS_ERR(devr->s1)) {
4711 ret = PTR_ERR(devr->s1);
4712 goto error5;
4713 }
4714 devr->s1->device = &dev->ib_dev;
4715 devr->s1->pd = devr->p0;
4716 devr->s1->uobject = NULL;
4717 devr->s1->event_handler = NULL;
4718 devr->s1->srq_context = NULL;
4719 devr->s1->srq_type = IB_SRQT_BASIC;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03004720 devr->s1->ext.cq = devr->c0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03004721 atomic_inc(&devr->p0->usecnt);
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03004722 atomic_set(&devr->s1->usecnt, 0);
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03004723
Haggai Eran7722f472016-02-29 15:45:07 +02004724 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
4725 INIT_WORK(&devr->ports[port].pkey_change_work,
4726 pkey_change_handler);
4727 devr->ports[port].devr = devr;
4728 }
4729
Eli Cohene126ba92013-07-07 17:25:49 +03004730 return 0;
4731
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03004732error5:
4733 mlx5_ib_destroy_srq(devr->s0);
Eli Cohene126ba92013-07-07 17:25:49 +03004734error4:
4735 mlx5_ib_dealloc_xrcd(devr->x1);
4736error3:
4737 mlx5_ib_dealloc_xrcd(devr->x0);
4738error2:
4739 mlx5_ib_destroy_cq(devr->c0);
4740error1:
4741 mlx5_ib_dealloc_pd(devr->p0);
4742error0:
4743 return ret;
4744}
4745
4746static void destroy_dev_resources(struct mlx5_ib_resources *devr)
4747{
Haggai Eran7722f472016-02-29 15:45:07 +02004748 struct mlx5_ib_dev *dev =
4749 container_of(devr, struct mlx5_ib_dev, devr);
4750 int port;
4751
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03004752 mlx5_ib_destroy_srq(devr->s1);
Eli Cohene126ba92013-07-07 17:25:49 +03004753 mlx5_ib_destroy_srq(devr->s0);
4754 mlx5_ib_dealloc_xrcd(devr->x0);
4755 mlx5_ib_dealloc_xrcd(devr->x1);
4756 mlx5_ib_destroy_cq(devr->c0);
4757 mlx5_ib_dealloc_pd(devr->p0);
Haggai Eran7722f472016-02-29 15:45:07 +02004758
4759 /* Make sure no change P_Key work items are still executing */
4760 for (port = 0; port < dev->num_ports; ++port)
4761 cancel_work_sync(&devr->ports[port].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03004762}
4763
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004764static u32 get_core_cap_flags(struct ib_device *ibdev,
4765 struct mlx5_hca_vport_context *rep)
Achiad Shochate53505a2015-12-23 18:47:25 +02004766{
4767 struct mlx5_ib_dev *dev = to_mdev(ibdev);
4768 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
4769 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
4770 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
Daniel Jurgens85c7c012018-01-04 17:25:43 +02004771 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
Achiad Shochate53505a2015-12-23 18:47:25 +02004772 u32 ret = 0;
4773
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004774 if (rep->grh_required)
4775 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
4776
Achiad Shochate53505a2015-12-23 18:47:25 +02004777 if (ll == IB_LINK_LAYER_INFINIBAND)
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004778 return ret | RDMA_CORE_PORT_IBA_IB;
Achiad Shochate53505a2015-12-23 18:47:25 +02004779
Daniel Jurgens85c7c012018-01-04 17:25:43 +02004780 if (raw_support)
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004781 ret |= RDMA_CORE_PORT_RAW_PACKET;
Or Gerlitz72cd5712017-01-24 13:02:36 +02004782
Achiad Shochate53505a2015-12-23 18:47:25 +02004783 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
Or Gerlitz72cd5712017-01-24 13:02:36 +02004784 return ret;
Achiad Shochate53505a2015-12-23 18:47:25 +02004785
4786 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
Or Gerlitz72cd5712017-01-24 13:02:36 +02004787 return ret;
Achiad Shochate53505a2015-12-23 18:47:25 +02004788
4789 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
4790 ret |= RDMA_CORE_PORT_IBA_ROCE;
4791
4792 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
4793 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
4794
4795 return ret;
4796}
4797
Ira Weiny77386132015-05-13 20:02:58 -04004798static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
4799 struct ib_port_immutable *immutable)
4800{
4801 struct ib_port_attr attr;
Or Gerlitzca5b91d2016-11-27 16:51:36 +02004802 struct mlx5_ib_dev *dev = to_mdev(ibdev);
4803 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004804 struct mlx5_hca_vport_context rep = {0};
Ira Weiny77386132015-05-13 20:02:58 -04004805 int err;
4806
Or Gerlitzc4550c62017-01-24 13:02:39 +02004807 err = ib_query_port(ibdev, port_num, &attr);
Ira Weiny77386132015-05-13 20:02:58 -04004808 if (err)
4809 return err;
4810
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004811 if (ll == IB_LINK_LAYER_INFINIBAND) {
4812 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
4813 &rep);
4814 if (err)
4815 return err;
4816 }
4817
Ira Weiny77386132015-05-13 20:02:58 -04004818 immutable->pkey_tbl_len = attr.pkey_tbl_len;
4819 immutable->gid_tbl_len = attr.gid_tbl_len;
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03004820 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02004821 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
4822 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04004823
4824 return 0;
4825}
4826
Mark Bloch8e6efa32017-11-06 12:22:13 +00004827static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
4828 struct ib_port_immutable *immutable)
4829{
4830 struct ib_port_attr attr;
4831 int err;
4832
4833 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4834
4835 err = ib_query_port(ibdev, port_num, &attr);
4836 if (err)
4837 return err;
4838
4839 immutable->pkey_tbl_len = attr.pkey_tbl_len;
4840 immutable->gid_tbl_len = attr.gid_tbl_len;
4841 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4842
4843 return 0;
4844}
4845
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03004846static void get_dev_fw_str(struct ib_device *ibdev, char *str)
Ira Weinyc7342822016-06-15 02:22:01 -04004847{
4848 struct mlx5_ib_dev *dev =
4849 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03004850 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
4851 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
4852 fw_rev_sub(dev->mdev));
Ira Weinyc7342822016-06-15 02:22:01 -04004853}
4854
Or Gerlitz45f95ac2016-11-27 16:51:35 +02004855static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03004856{
4857 struct mlx5_core_dev *mdev = dev->mdev;
4858 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
4859 MLX5_FLOW_NAMESPACE_LAG);
4860 struct mlx5_flow_table *ft;
4861 int err;
4862
4863 if (!ns || !mlx5_lag_is_active(mdev))
4864 return 0;
4865
4866 err = mlx5_cmd_create_vport_lag(mdev);
4867 if (err)
4868 return err;
4869
4870 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
4871 if (IS_ERR(ft)) {
4872 err = PTR_ERR(ft);
4873 goto err_destroy_vport_lag;
4874 }
4875
Mark Bloch9a4ca382018-01-16 14:42:35 +00004876 dev->flow_db->lag_demux_ft = ft;
Aviv Heller9ef9c642016-09-18 20:48:01 +03004877 return 0;
4878
4879err_destroy_vport_lag:
4880 mlx5_cmd_destroy_vport_lag(mdev);
4881 return err;
4882}
4883
Or Gerlitz45f95ac2016-11-27 16:51:35 +02004884static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03004885{
4886 struct mlx5_core_dev *mdev = dev->mdev;
4887
Mark Bloch9a4ca382018-01-16 14:42:35 +00004888 if (dev->flow_db->lag_demux_ft) {
4889 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
4890 dev->flow_db->lag_demux_ft = NULL;
Aviv Heller9ef9c642016-09-18 20:48:01 +03004891
4892 mlx5_cmd_destroy_vport_lag(mdev);
4893 }
4894}
4895
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +02004896static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004897{
Achiad Shochate53505a2015-12-23 18:47:25 +02004898 int err;
4899
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +02004900 dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
4901 err = register_netdevice_notifier(&dev->roce[port_num].nb);
Aviv Heller5ec8c832016-09-18 20:48:00 +03004902 if (err) {
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +02004903 dev->roce[port_num].nb.notifier_call = NULL;
Achiad Shochate53505a2015-12-23 18:47:25 +02004904 return err;
Aviv Heller5ec8c832016-09-18 20:48:00 +03004905 }
Achiad Shochate53505a2015-12-23 18:47:25 +02004906
Or Gerlitzd012f5d2016-11-27 16:51:34 +02004907 return 0;
4908}
Achiad Shochate53505a2015-12-23 18:47:25 +02004909
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +02004910static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
Eli Cohene126ba92013-07-07 17:25:49 +03004911{
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +02004912 if (dev->roce[port_num].nb.notifier_call) {
4913 unregister_netdevice_notifier(&dev->roce[port_num].nb);
4914 dev->roce[port_num].nb.notifier_call = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03004915 }
4916}
4917
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03004918static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03004919{
Eli Cohene126ba92013-07-07 17:25:49 +03004920 int err;
4921
Or Gerlitzca5b91d2016-11-27 16:51:36 +02004922 if (MLX5_CAP_GEN(dev->mdev, roce)) {
4923 err = mlx5_nic_vport_enable_roce(dev->mdev);
4924 if (err)
Mark Bloch8e6efa32017-11-06 12:22:13 +00004925 return err;
Or Gerlitzca5b91d2016-11-27 16:51:36 +02004926 }
Achiad Shochate53505a2015-12-23 18:47:25 +02004927
Or Gerlitz45f95ac2016-11-27 16:51:35 +02004928 err = mlx5_eth_lag_init(dev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03004929 if (err)
4930 goto err_disable_roce;
4931
Achiad Shochate53505a2015-12-23 18:47:25 +02004932 return 0;
4933
Aviv Heller9ef9c642016-09-18 20:48:01 +03004934err_disable_roce:
Or Gerlitzca5b91d2016-11-27 16:51:36 +02004935 if (MLX5_CAP_GEN(dev->mdev, roce))
4936 mlx5_nic_vport_disable_roce(dev->mdev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03004937
Achiad Shochate53505a2015-12-23 18:47:25 +02004938 return err;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004939}
4940
Or Gerlitz45f95ac2016-11-27 16:51:35 +02004941static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004942{
Or Gerlitz45f95ac2016-11-27 16:51:35 +02004943 mlx5_eth_lag_cleanup(dev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02004944 if (MLX5_CAP_GEN(dev->mdev, roce))
4945 mlx5_nic_vport_disable_roce(dev->mdev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02004946}
4947
Parav Pandite1f24a72017-04-16 07:29:29 +03004948struct mlx5_ib_counter {
Kamal Heib7c16f472017-01-18 15:25:09 +02004949 const char *name;
4950 size_t offset;
4951};
4952
4953#define INIT_Q_COUNTER(_name) \
4954 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
4955
Parav Pandite1f24a72017-04-16 07:29:29 +03004956static const struct mlx5_ib_counter basic_q_cnts[] = {
Kamal Heib7c16f472017-01-18 15:25:09 +02004957 INIT_Q_COUNTER(rx_write_requests),
4958 INIT_Q_COUNTER(rx_read_requests),
4959 INIT_Q_COUNTER(rx_atomic_requests),
4960 INIT_Q_COUNTER(out_of_buffer),
4961};
4962
Parav Pandite1f24a72017-04-16 07:29:29 +03004963static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
Kamal Heib7c16f472017-01-18 15:25:09 +02004964 INIT_Q_COUNTER(out_of_sequence),
4965};
4966
Parav Pandite1f24a72017-04-16 07:29:29 +03004967static const struct mlx5_ib_counter retrans_q_cnts[] = {
Kamal Heib7c16f472017-01-18 15:25:09 +02004968 INIT_Q_COUNTER(duplicate_request),
4969 INIT_Q_COUNTER(rnr_nak_retry_err),
4970 INIT_Q_COUNTER(packet_seq_err),
4971 INIT_Q_COUNTER(implied_nak_seq_err),
4972 INIT_Q_COUNTER(local_ack_timeout_err),
4973};
4974
Parav Pandite1f24a72017-04-16 07:29:29 +03004975#define INIT_CONG_COUNTER(_name) \
4976 { .name = #_name, .offset = \
4977 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
4978
4979static const struct mlx5_ib_counter cong_cnts[] = {
4980 INIT_CONG_COUNTER(rp_cnp_ignored),
4981 INIT_CONG_COUNTER(rp_cnp_handled),
4982 INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
4983 INIT_CONG_COUNTER(np_cnp_sent),
4984};
4985
Parav Pandit58dcb602017-06-19 07:19:37 +03004986static const struct mlx5_ib_counter extended_err_cnts[] = {
4987 INIT_Q_COUNTER(resp_local_length_error),
4988 INIT_Q_COUNTER(resp_cqe_error),
4989 INIT_Q_COUNTER(req_cqe_error),
4990 INIT_Q_COUNTER(req_remote_invalid_request),
4991 INIT_Q_COUNTER(req_remote_access_errors),
4992 INIT_Q_COUNTER(resp_remote_access_errors),
4993 INIT_Q_COUNTER(resp_cqe_flush_error),
4994 INIT_Q_COUNTER(req_cqe_flush_error),
4995};
4996
Talat Batheesh9f876f32018-06-21 15:37:56 +03004997#define INIT_EXT_PPCNT_COUNTER(_name) \
4998 { .name = #_name, .offset = \
4999 MLX5_BYTE_OFF(ppcnt_reg, \
5000 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5001
5002static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5003 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5004};
5005
Parav Pandite1f24a72017-04-16 07:29:29 +03005006static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
Mark Bloch0837e862016-06-17 15:10:55 +03005007{
Daniel Jurgensaac44922018-01-04 17:25:40 +02005008 int i;
Mark Bloch0837e862016-06-17 15:10:55 +03005009
Kamal Heib7c16f472017-01-18 15:25:09 +02005010 for (i = 0; i < dev->num_ports; i++) {
Parav Pandit921c0f52018-07-08 13:40:30 +03005011 if (dev->port[i].cnts.set_id_valid)
Daniel Jurgensaac44922018-01-04 17:25:40 +02005012 mlx5_core_dealloc_q_counter(dev->mdev,
5013 dev->port[i].cnts.set_id);
Parav Pandite1f24a72017-04-16 07:29:29 +03005014 kfree(dev->port[i].cnts.names);
5015 kfree(dev->port[i].cnts.offsets);
Kamal Heib7c16f472017-01-18 15:25:09 +02005016 }
5017}
5018
Parav Pandite1f24a72017-04-16 07:29:29 +03005019static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5020 struct mlx5_ib_counters *cnts)
Kamal Heib7c16f472017-01-18 15:25:09 +02005021{
5022 u32 num_counters;
5023
5024 num_counters = ARRAY_SIZE(basic_q_cnts);
5025
5026 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5027 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5028
5029 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5030 num_counters += ARRAY_SIZE(retrans_q_cnts);
Parav Pandit58dcb602017-06-19 07:19:37 +03005031
5032 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5033 num_counters += ARRAY_SIZE(extended_err_cnts);
5034
Parav Pandite1f24a72017-04-16 07:29:29 +03005035 cnts->num_q_counters = num_counters;
Kamal Heib7c16f472017-01-18 15:25:09 +02005036
Parav Pandite1f24a72017-04-16 07:29:29 +03005037 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5038 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5039 num_counters += ARRAY_SIZE(cong_cnts);
5040 }
Talat Batheesh9f876f32018-06-21 15:37:56 +03005041 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5042 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5043 num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5044 }
Parav Pandite1f24a72017-04-16 07:29:29 +03005045 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5046 if (!cnts->names)
Kamal Heib7c16f472017-01-18 15:25:09 +02005047 return -ENOMEM;
5048
Parav Pandite1f24a72017-04-16 07:29:29 +03005049 cnts->offsets = kcalloc(num_counters,
5050 sizeof(cnts->offsets), GFP_KERNEL);
5051 if (!cnts->offsets)
Kamal Heib7c16f472017-01-18 15:25:09 +02005052 goto err_names;
5053
Kamal Heib7c16f472017-01-18 15:25:09 +02005054 return 0;
5055
5056err_names:
Parav Pandite1f24a72017-04-16 07:29:29 +03005057 kfree(cnts->names);
Daniel Jurgensaac44922018-01-04 17:25:40 +02005058 cnts->names = NULL;
Kamal Heib7c16f472017-01-18 15:25:09 +02005059 return -ENOMEM;
5060}
5061
Parav Pandite1f24a72017-04-16 07:29:29 +03005062static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5063 const char **names,
5064 size_t *offsets)
Kamal Heib7c16f472017-01-18 15:25:09 +02005065{
5066 int i;
5067 int j = 0;
5068
5069 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5070 names[j] = basic_q_cnts[i].name;
5071 offsets[j] = basic_q_cnts[i].offset;
5072 }
5073
5074 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5075 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5076 names[j] = out_of_seq_q_cnts[i].name;
5077 offsets[j] = out_of_seq_q_cnts[i].offset;
5078 }
5079 }
5080
5081 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5082 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5083 names[j] = retrans_q_cnts[i].name;
5084 offsets[j] = retrans_q_cnts[i].offset;
5085 }
5086 }
Parav Pandite1f24a72017-04-16 07:29:29 +03005087
Parav Pandit58dcb602017-06-19 07:19:37 +03005088 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5089 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5090 names[j] = extended_err_cnts[i].name;
5091 offsets[j] = extended_err_cnts[i].offset;
5092 }
5093 }
5094
Parav Pandite1f24a72017-04-16 07:29:29 +03005095 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5096 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5097 names[j] = cong_cnts[i].name;
5098 offsets[j] = cong_cnts[i].offset;
5099 }
5100 }
Talat Batheesh9f876f32018-06-21 15:37:56 +03005101
5102 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5103 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5104 names[j] = ext_ppcnt_cnts[i].name;
5105 offsets[j] = ext_ppcnt_cnts[i].offset;
5106 }
5107 }
Mark Bloch0837e862016-06-17 15:10:55 +03005108}
5109
Parav Pandite1f24a72017-04-16 07:29:29 +03005110static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
Mark Bloch0837e862016-06-17 15:10:55 +03005111{
Daniel Jurgensaac44922018-01-04 17:25:40 +02005112 int err = 0;
Mark Bloch0837e862016-06-17 15:10:55 +03005113 int i;
Mark Bloch0837e862016-06-17 15:10:55 +03005114
5115 for (i = 0; i < dev->num_ports; i++) {
Daniel Jurgensaac44922018-01-04 17:25:40 +02005116 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5117 if (err)
5118 goto err_alloc;
Kamal Heib7c16f472017-01-18 15:25:09 +02005119
Daniel Jurgensaac44922018-01-04 17:25:40 +02005120 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5121 dev->port[i].cnts.offsets);
5122
5123 err = mlx5_core_alloc_q_counter(dev->mdev,
5124 &dev->port[i].cnts.set_id);
5125 if (err) {
Mark Bloch0837e862016-06-17 15:10:55 +03005126 mlx5_ib_warn(dev,
5127 "couldn't allocate queue counter for port %d, err %d\n",
Daniel Jurgensaac44922018-01-04 17:25:40 +02005128 i + 1, err);
5129 goto err_alloc;
Mark Bloch0837e862016-06-17 15:10:55 +03005130 }
Daniel Jurgensaac44922018-01-04 17:25:40 +02005131 dev->port[i].cnts.set_id_valid = true;
Mark Bloch0837e862016-06-17 15:10:55 +03005132 }
5133
5134 return 0;
5135
Daniel Jurgensaac44922018-01-04 17:25:40 +02005136err_alloc:
5137 mlx5_ib_dealloc_counters(dev);
5138 return err;
Mark Bloch0837e862016-06-17 15:10:55 +03005139}
5140
Mark Bloch0ad17a82016-06-17 15:10:56 +03005141static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5142 u8 port_num)
5143{
Kamal Heib7c16f472017-01-18 15:25:09 +02005144 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5145 struct mlx5_ib_port *port = &dev->port[port_num - 1];
Mark Bloch0ad17a82016-06-17 15:10:56 +03005146
5147 /* We support only per port stats */
5148 if (port_num == 0)
5149 return NULL;
5150
Parav Pandite1f24a72017-04-16 07:29:29 +03005151 return rdma_alloc_hw_stats_struct(port->cnts.names,
5152 port->cnts.num_q_counters +
Talat Batheesh9f876f32018-06-21 15:37:56 +03005153 port->cnts.num_cong_counters +
5154 port->cnts.num_ext_ppcnt_counters,
Mark Bloch0ad17a82016-06-17 15:10:56 +03005155 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5156}
5157
Daniel Jurgensaac44922018-01-04 17:25:40 +02005158static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
Parav Pandite1f24a72017-04-16 07:29:29 +03005159 struct mlx5_ib_port *port,
5160 struct rdma_hw_stats *stats)
5161{
5162 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5163 void *out;
5164 __be32 val;
5165 int ret, i;
5166
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03005167 out = kvzalloc(outlen, GFP_KERNEL);
Parav Pandite1f24a72017-04-16 07:29:29 +03005168 if (!out)
5169 return -ENOMEM;
5170
Daniel Jurgensaac44922018-01-04 17:25:40 +02005171 ret = mlx5_core_query_q_counter(mdev,
Parav Pandite1f24a72017-04-16 07:29:29 +03005172 port->cnts.set_id, 0,
5173 out, outlen);
5174 if (ret)
5175 goto free;
5176
5177 for (i = 0; i < port->cnts.num_q_counters; i++) {
5178 val = *(__be32 *)(out + port->cnts.offsets[i]);
5179 stats->value[i] = (u64)be32_to_cpu(val);
5180 }
5181
5182free:
5183 kvfree(out);
5184 return ret;
5185}
5186
Talat Batheesh9f876f32018-06-21 15:37:56 +03005187static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5188 struct mlx5_ib_port *port,
5189 struct rdma_hw_stats *stats)
5190{
5191 int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
5192 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5193 int ret, i;
5194 void *out;
5195
5196 out = kvzalloc(sz, GFP_KERNEL);
5197 if (!out)
5198 return -ENOMEM;
5199
5200 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5201 if (ret)
5202 goto free;
5203
5204 for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
5205 stats->value[i + offset] =
5206 be64_to_cpup((__be64 *)(out +
5207 port->cnts.offsets[i + offset]));
5208 }
5209
5210free:
5211 kvfree(out);
5212 return ret;
5213}
5214
Mark Bloch0ad17a82016-06-17 15:10:56 +03005215static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5216 struct rdma_hw_stats *stats,
Kamal Heib7c16f472017-01-18 15:25:09 +02005217 u8 port_num, int index)
Mark Bloch0ad17a82016-06-17 15:10:56 +03005218{
5219 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Kamal Heib7c16f472017-01-18 15:25:09 +02005220 struct mlx5_ib_port *port = &dev->port[port_num - 1];
Daniel Jurgensaac44922018-01-04 17:25:40 +02005221 struct mlx5_core_dev *mdev;
Parav Pandite1f24a72017-04-16 07:29:29 +03005222 int ret, num_counters;
Daniel Jurgensaac44922018-01-04 17:25:40 +02005223 u8 mdev_port_num;
Mark Bloch0ad17a82016-06-17 15:10:56 +03005224
Kamal Heib7c16f472017-01-18 15:25:09 +02005225 if (!stats)
Parav Pandite1f24a72017-04-16 07:29:29 +03005226 return -EINVAL;
Mark Bloch0ad17a82016-06-17 15:10:56 +03005227
Talat Batheesh9f876f32018-06-21 15:37:56 +03005228 num_counters = port->cnts.num_q_counters +
5229 port->cnts.num_cong_counters +
5230 port->cnts.num_ext_ppcnt_counters;
Daniel Jurgensaac44922018-01-04 17:25:40 +02005231
5232 /* q_counters are per IB device, query the master mdev */
5233 ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
Mark Bloch0ad17a82016-06-17 15:10:56 +03005234 if (ret)
Parav Pandite1f24a72017-04-16 07:29:29 +03005235 return ret;
Mark Bloch0ad17a82016-06-17 15:10:56 +03005236
Talat Batheesh9f876f32018-06-21 15:37:56 +03005237 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5238 ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
5239 if (ret)
5240 return ret;
5241 }
5242
Parav Pandite1f24a72017-04-16 07:29:29 +03005243 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
Daniel Jurgensaac44922018-01-04 17:25:40 +02005244 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5245 &mdev_port_num);
5246 if (!mdev) {
5247 /* If port is not affiliated yet, its in down state
5248 * which doesn't have any counters yet, so it would be
5249 * zero. So no need to read from the HCA.
5250 */
5251 goto done;
5252 }
Majd Dibbiny71a0ff62017-12-21 17:38:26 +02005253 ret = mlx5_lag_query_cong_counters(dev->mdev,
5254 stats->value +
5255 port->cnts.num_q_counters,
5256 port->cnts.num_cong_counters,
5257 port->cnts.offsets +
5258 port->cnts.num_q_counters);
Daniel Jurgensaac44922018-01-04 17:25:40 +02005259
5260 mlx5_ib_put_native_port_mdev(dev, port_num);
Parav Pandite1f24a72017-04-16 07:29:29 +03005261 if (ret)
5262 return ret;
Mark Bloch0ad17a82016-06-17 15:10:56 +03005263 }
Kamal Heib7c16f472017-01-18 15:25:09 +02005264
Daniel Jurgensaac44922018-01-04 17:25:40 +02005265done:
Parav Pandite1f24a72017-04-16 07:29:29 +03005266 return num_counters;
Mark Bloch0ad17a82016-06-17 15:10:56 +03005267}
5268
Denis Drozdovf6a8a192018-08-14 14:08:51 +03005269static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5270 enum rdma_netdev_t type,
5271 struct rdma_netdev_alloc_params *params)
Erez Shitrit693dfd52017-04-27 17:01:34 +03005272{
5273 if (type != RDMA_NETDEV_IPOIB)
Denis Drozdovf6a8a192018-08-14 14:08:51 +03005274 return -EOPNOTSUPP;
Erez Shitrit693dfd52017-04-27 17:01:34 +03005275
Denis Drozdovf6a8a192018-08-14 14:08:51 +03005276 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
Erez Shitrit693dfd52017-04-27 17:01:34 +03005277}
5278
Maor Gottliebfe248c32017-05-30 10:29:14 +03005279static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5280{
5281 if (!dev->delay_drop.dbg)
5282 return;
5283 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5284 kfree(dev->delay_drop.dbg);
5285 dev->delay_drop.dbg = NULL;
5286}
5287
Maor Gottlieb03404e82017-05-30 10:29:13 +03005288static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5289{
5290 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5291 return;
5292
5293 cancel_work_sync(&dev->delay_drop.delay_drop_work);
Maor Gottliebfe248c32017-05-30 10:29:14 +03005294 delay_drop_debugfs_cleanup(dev);
5295}
5296
5297static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5298 size_t count, loff_t *pos)
5299{
5300 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5301 char lbuf[20];
5302 int len;
5303
5304 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5305 return simple_read_from_buffer(buf, count, pos, lbuf, len);
5306}
5307
5308static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5309 size_t count, loff_t *pos)
5310{
5311 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5312 u32 timeout;
5313 u32 var;
5314
5315 if (kstrtouint_from_user(buf, count, 0, &var))
5316 return -EFAULT;
5317
5318 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5319 1000);
5320 if (timeout != var)
5321 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5322 timeout);
5323
5324 delay_drop->timeout = timeout;
5325
5326 return count;
5327}
5328
5329static const struct file_operations fops_delay_drop_timeout = {
5330 .owner = THIS_MODULE,
5331 .open = simple_open,
5332 .write = delay_drop_timeout_write,
5333 .read = delay_drop_timeout_read,
5334};
5335
5336static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
5337{
5338 struct mlx5_ib_dbg_delay_drop *dbg;
5339
5340 if (!mlx5_debugfs_root)
5341 return 0;
5342
5343 dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
5344 if (!dbg)
5345 return -ENOMEM;
5346
Sudip Mukherjeecbafad82017-09-18 12:28:48 +01005347 dev->delay_drop.dbg = dbg;
5348
Maor Gottliebfe248c32017-05-30 10:29:14 +03005349 dbg->dir_debugfs =
5350 debugfs_create_dir("delay_drop",
5351 dev->mdev->priv.dbg_root);
5352 if (!dbg->dir_debugfs)
Sudip Mukherjeecbafad82017-09-18 12:28:48 +01005353 goto out_debugfs;
Maor Gottliebfe248c32017-05-30 10:29:14 +03005354
5355 dbg->events_cnt_debugfs =
5356 debugfs_create_atomic_t("num_timeout_events", 0400,
5357 dbg->dir_debugfs,
5358 &dev->delay_drop.events_cnt);
5359 if (!dbg->events_cnt_debugfs)
5360 goto out_debugfs;
5361
5362 dbg->rqs_cnt_debugfs =
5363 debugfs_create_atomic_t("num_rqs", 0400,
5364 dbg->dir_debugfs,
5365 &dev->delay_drop.rqs_cnt);
5366 if (!dbg->rqs_cnt_debugfs)
5367 goto out_debugfs;
5368
5369 dbg->timeout_debugfs =
5370 debugfs_create_file("timeout", 0600,
5371 dbg->dir_debugfs,
5372 &dev->delay_drop,
5373 &fops_delay_drop_timeout);
5374 if (!dbg->timeout_debugfs)
5375 goto out_debugfs;
5376
5377 return 0;
5378
5379out_debugfs:
5380 delay_drop_debugfs_cleanup(dev);
5381 return -ENOMEM;
Maor Gottlieb03404e82017-05-30 10:29:13 +03005382}
5383
5384static void init_delay_drop(struct mlx5_ib_dev *dev)
5385{
5386 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5387 return;
5388
5389 mutex_init(&dev->delay_drop.lock);
5390 dev->delay_drop.dev = dev;
5391 dev->delay_drop.activate = false;
5392 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5393 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
Maor Gottliebfe248c32017-05-30 10:29:14 +03005394 atomic_set(&dev->delay_drop.rqs_cnt, 0);
5395 atomic_set(&dev->delay_drop.events_cnt, 0);
5396
5397 if (delay_drop_debugfs_init(dev))
5398 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
Maor Gottlieb03404e82017-05-30 10:29:13 +03005399}
5400
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005401/* The mlx5_ib_multiport_mutex should be held when calling this function */
5402static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5403 struct mlx5_ib_multiport_info *mpi)
5404{
5405 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5406 struct mlx5_ib_port *port = &ibdev->port[port_num];
5407 int comps;
5408 int err;
5409 int i;
5410
Parav Pandita9e546e2018-01-04 17:25:39 +02005411 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5412
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005413 spin_lock(&port->mp.mpi_lock);
5414 if (!mpi->ibdev) {
5415 spin_unlock(&port->mp.mpi_lock);
5416 return;
5417 }
Saeed Mahameeddf097a22018-11-26 14:39:00 -08005418
5419 if (mpi->mdev_events.notifier_call)
5420 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5421 mpi->mdev_events.notifier_call = NULL;
5422
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005423 mpi->ibdev = NULL;
5424
5425 spin_unlock(&port->mp.mpi_lock);
5426 mlx5_remove_netdev_notifier(ibdev, port_num);
5427 spin_lock(&port->mp.mpi_lock);
5428
5429 comps = mpi->mdev_refcnt;
5430 if (comps) {
5431 mpi->unaffiliate = true;
5432 init_completion(&mpi->unref_comp);
5433 spin_unlock(&port->mp.mpi_lock);
5434
5435 for (i = 0; i < comps; i++)
5436 wait_for_completion(&mpi->unref_comp);
5437
5438 spin_lock(&port->mp.mpi_lock);
5439 mpi->unaffiliate = false;
5440 }
5441
5442 port->mp.mpi = NULL;
5443
5444 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5445
5446 spin_unlock(&port->mp.mpi_lock);
5447
5448 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5449
5450 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5451 /* Log an error, still needed to cleanup the pointers and add
5452 * it back to the list.
5453 */
5454 if (err)
5455 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5456 port_num + 1);
5457
5458 ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
5459}
5460
5461/* The mlx5_ib_multiport_mutex should be held when calling this function */
5462static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5463 struct mlx5_ib_multiport_info *mpi)
5464{
5465 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5466 int err;
5467
5468 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5469 if (ibdev->port[port_num].mp.mpi) {
Qing Huang25771882018-07-23 14:15:08 -07005470 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5471 port_num + 1);
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005472 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5473 return false;
5474 }
5475
5476 ibdev->port[port_num].mp.mpi = mpi;
5477 mpi->ibdev = ibdev;
Saeed Mahameeddf097a22018-11-26 14:39:00 -08005478 mpi->mdev_events.notifier_call = NULL;
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005479 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5480
5481 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5482 if (err)
5483 goto unbind;
5484
5485 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5486 if (err)
5487 goto unbind;
5488
5489 err = mlx5_add_netdev_notifier(ibdev, port_num);
5490 if (err) {
5491 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5492 port_num + 1);
5493 goto unbind;
5494 }
5495
Saeed Mahameeddf097a22018-11-26 14:39:00 -08005496 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5497 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5498
Parav Pandita9e546e2018-01-04 17:25:39 +02005499 err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
5500 if (err)
5501 goto unbind;
5502
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005503 return true;
5504
5505unbind:
5506 mlx5_ib_unbind_slave_port(ibdev, mpi);
5507 return false;
5508}
5509
5510static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5511{
5512 int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5513 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5514 port_num + 1);
5515 struct mlx5_ib_multiport_info *mpi;
5516 int err;
5517 int i;
5518
5519 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5520 return 0;
5521
5522 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5523 &dev->sys_image_guid);
5524 if (err)
5525 return err;
5526
5527 err = mlx5_nic_vport_enable_roce(dev->mdev);
5528 if (err)
5529 return err;
5530
5531 mutex_lock(&mlx5_ib_multiport_mutex);
5532 for (i = 0; i < dev->num_ports; i++) {
5533 bool bound = false;
5534
5535 /* build a stub multiport info struct for the native port. */
5536 if (i == port_num) {
5537 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5538 if (!mpi) {
5539 mutex_unlock(&mlx5_ib_multiport_mutex);
5540 mlx5_nic_vport_disable_roce(dev->mdev);
5541 return -ENOMEM;
5542 }
5543
5544 mpi->is_master = true;
5545 mpi->mdev = dev->mdev;
5546 mpi->sys_image_guid = dev->sys_image_guid;
5547 dev->port[i].mp.mpi = mpi;
5548 mpi->ibdev = dev;
5549 mpi = NULL;
5550 continue;
5551 }
5552
5553 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5554 list) {
5555 if (dev->sys_image_guid == mpi->sys_image_guid &&
5556 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5557 bound = mlx5_ib_bind_slave_port(dev, mpi);
5558 }
5559
5560 if (bound) {
5561 dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
5562 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5563 list_del(&mpi->list);
5564 break;
5565 }
5566 }
5567 if (!bound) {
5568 get_port_caps(dev, i + 1);
5569 mlx5_ib_dbg(dev, "no free port found for port %d\n",
5570 i + 1);
5571 }
5572 }
5573
5574 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5575 mutex_unlock(&mlx5_ib_multiport_mutex);
5576 return err;
5577}
5578
5579static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
5580{
5581 int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5582 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5583 port_num + 1);
5584 int i;
5585
5586 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5587 return;
5588
5589 mutex_lock(&mlx5_ib_multiport_mutex);
5590 for (i = 0; i < dev->num_ports; i++) {
5591 if (dev->port[i].mp.mpi) {
5592 /* Destroy the native port stub */
5593 if (i == port_num) {
5594 kfree(dev->port[i].mp.mpi);
5595 dev->port[i].mp.mpi = NULL;
5596 } else {
5597 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
5598 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
5599 }
5600 }
5601 }
5602
5603 mlx5_ib_dbg(dev, "removing from devlist\n");
5604 list_del(&dev->ib_dev_list);
5605 mutex_unlock(&mlx5_ib_multiport_mutex);
5606
5607 mlx5_nic_vport_disable_roce(dev->mdev);
5608}
5609
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03005610ADD_UVERBS_ATTRIBUTES_SIMPLE(
5611 mlx5_ib_dm,
5612 UVERBS_OBJECT_DM,
5613 UVERBS_METHOD_DM_ALLOC,
5614 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
5615 UVERBS_ATTR_TYPE(u64),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03005616 UA_MANDATORY),
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03005617 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
5618 UVERBS_ATTR_TYPE(u16),
Jason Gunthorpe83bb4442018-07-04 08:50:29 +03005619 UA_MANDATORY));
Ariel Levkovich24da0012018-04-05 18:53:27 +03005620
Jason Gunthorpe9a119cd2018-07-04 08:50:28 +03005621ADD_UVERBS_ATTRIBUTES_SIMPLE(
5622 mlx5_ib_flow_action,
5623 UVERBS_OBJECT_FLOW_ACTION,
5624 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
Jason Gunthorpebccd0622018-07-26 16:37:14 -06005625 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
5626 enum mlx5_ib_uapi_flow_action_flags));
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03005627
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02005628static const struct uapi_definition mlx5_ib_defs[] = {
5629#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02005630 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02005631 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
5632#endif
5633
5634 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
5635 &mlx5_ib_flow_action),
5636 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
5637 {}
5638};
5639
Raed Salem1a1e03d2018-05-31 16:43:41 +03005640static int mlx5_ib_read_counters(struct ib_counters *counters,
5641 struct ib_counters_read_attr *read_attr,
5642 struct uverbs_attr_bundle *attrs)
5643{
5644 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5645 struct mlx5_read_counters_attr mread_attr = {};
5646 struct mlx5_ib_flow_counters_desc *desc;
5647 int ret, i;
5648
5649 mutex_lock(&mcounters->mcntrs_mutex);
5650 if (mcounters->cntrs_max_index > read_attr->ncounters) {
5651 ret = -EINVAL;
5652 goto err_bound;
5653 }
5654
5655 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
5656 GFP_KERNEL);
5657 if (!mread_attr.out) {
5658 ret = -ENOMEM;
5659 goto err_bound;
5660 }
5661
5662 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
5663 mread_attr.flags = read_attr->flags;
5664 ret = mcounters->read_counters(counters->device, &mread_attr);
5665 if (ret)
5666 goto err_read;
5667
5668 /* do the pass over the counters data array to assign according to the
5669 * descriptions and indexing pairs
5670 */
5671 desc = mcounters->counters_data;
5672 for (i = 0; i < mcounters->ncounters; i++)
5673 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
5674
5675err_read:
5676 kfree(mread_attr.out);
5677err_bound:
5678 mutex_unlock(&mcounters->mcntrs_mutex);
5679 return ret;
5680}
5681
Raed Salemb29e2a12018-05-31 16:43:38 +03005682static int mlx5_ib_destroy_counters(struct ib_counters *counters)
5683{
5684 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5685
Raed Salem3b3233f2018-05-31 16:43:39 +03005686 counters_clear_description(counters);
5687 if (mcounters->hw_cntrs_hndl)
5688 mlx5_fc_destroy(to_mdev(counters->device)->mdev,
5689 mcounters->hw_cntrs_hndl);
5690
Raed Salemb29e2a12018-05-31 16:43:38 +03005691 kfree(mcounters);
5692
5693 return 0;
5694}
5695
5696static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
5697 struct uverbs_attr_bundle *attrs)
5698{
5699 struct mlx5_ib_mcounters *mcounters;
5700
5701 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
5702 if (!mcounters)
5703 return ERR_PTR(-ENOMEM);
5704
Raed Salem3b3233f2018-05-31 16:43:39 +03005705 mutex_init(&mcounters->mcntrs_mutex);
5706
Raed Salemb29e2a12018-05-31 16:43:38 +03005707 return &mcounters->ibcntrs;
5708}
5709
Mark Blochb5ca15a2018-01-23 11:16:30 +00005710void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03005711{
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005712 mlx5_ib_cleanup_multiport_master(dev);
Mark Bloch3cc297d2018-01-01 13:07:03 +02005713#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5714 cleanup_srcu_struct(&dev->mr_srcu);
5715#endif
Mark Bloch16c19752018-01-01 13:06:58 +02005716 kfree(dev->port);
5717}
5718
Mark Blochb5ca15a2018-01-23 11:16:30 +00005719int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02005720{
5721 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03005722 int err;
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005723 int i;
Eli Cohene126ba92013-07-07 17:25:49 +03005724
Daniel Jurgens508562d2018-01-04 17:25:34 +02005725 dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
Mark Bloch0837e862016-06-17 15:10:55 +03005726 GFP_KERNEL);
5727 if (!dev->port)
Mark Bloch16c19752018-01-01 13:06:58 +02005728 return -ENOMEM;
Mark Bloch0837e862016-06-17 15:10:55 +03005729
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005730 for (i = 0; i < dev->num_ports; i++) {
5731 spin_lock_init(&dev->port[i].mp.mpi_lock);
5732 rwlock_init(&dev->roce[i].netdev_lock);
5733 }
5734
5735 err = mlx5_ib_init_multiport_master(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03005736 if (err)
Mark Bloch0837e862016-06-17 15:10:55 +03005737 goto err_free_port;
Eli Cohene126ba92013-07-07 17:25:49 +03005738
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005739 if (!mlx5_core_mp_enabled(mdev)) {
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005740 for (i = 1; i <= dev->num_ports; i++) {
5741 err = get_port_caps(dev, i);
5742 if (err)
5743 break;
5744 }
5745 } else {
5746 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
5747 }
5748 if (err)
5749 goto err_mp;
5750
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03005751 if (mlx5_use_mad_ifc(dev))
5752 get_ext_port_caps(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03005753
Eli Cohene126ba92013-07-07 17:25:49 +03005754 dev->ib_dev.owner = THIS_MODULE;
5755 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
Sagi Grimbergc6790aa2015-09-24 10:34:23 +03005756 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
Daniel Jurgens508562d2018-01-04 17:25:34 +02005757 dev->ib_dev.phys_port_cnt = dev->num_ports;
Saeed Mahameedf2f3df52018-11-19 10:52:38 -08005758 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08005759 dev->ib_dev.dev.parent = &mdev->pdev->dev;
Eli Cohene126ba92013-07-07 17:25:49 +03005760
Mark Bloch3cc297d2018-01-01 13:07:03 +02005761 mutex_init(&dev->cap_mask_mutex);
5762 INIT_LIST_HEAD(&dev->qp_list);
5763 spin_lock_init(&dev->reset_flow_resource_lock);
5764
Ariel Levkovich24da0012018-04-05 18:53:27 +03005765 spin_lock_init(&dev->memic.memic_lock);
5766 dev->memic.dev = mdev;
5767
Mark Bloch3cc297d2018-01-01 13:07:03 +02005768#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5769 err = init_srcu_struct(&dev->mr_srcu);
5770 if (err)
5771 goto err_free_port;
5772#endif
5773
Mark Bloch16c19752018-01-01 13:06:58 +02005774 return 0;
Daniel Jurgens32f69e42018-01-04 17:25:36 +02005775err_mp:
5776 mlx5_ib_cleanup_multiport_master(dev);
Mark Bloch16c19752018-01-01 13:06:58 +02005777
5778err_free_port:
5779 kfree(dev->port);
5780
5781 return -ENOMEM;
5782}
5783
Mark Bloch9a4ca382018-01-16 14:42:35 +00005784static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
5785{
5786 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
5787
5788 if (!dev->flow_db)
5789 return -ENOMEM;
5790
5791 mutex_init(&dev->flow_db->lock);
5792
5793 return 0;
5794}
5795
Mark Blochb5ca15a2018-01-23 11:16:30 +00005796int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
5797{
5798 struct mlx5_ib_dev *nic_dev;
5799
5800 nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
5801
5802 if (!nic_dev)
5803 return -EINVAL;
5804
5805 dev->flow_db = nic_dev->flow_db;
5806
5807 return 0;
5808}
5809
Mark Bloch9a4ca382018-01-16 14:42:35 +00005810static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
5811{
5812 kfree(dev->flow_db);
5813}
5814
Kamal Heib96458233e2018-12-10 21:09:38 +02005815static const struct ib_device_ops mlx5_ib_dev_ops = {
5816 .add_gid = mlx5_ib_add_gid,
5817 .alloc_mr = mlx5_ib_alloc_mr,
5818 .alloc_pd = mlx5_ib_alloc_pd,
5819 .alloc_ucontext = mlx5_ib_alloc_ucontext,
5820 .attach_mcast = mlx5_ib_mcg_attach,
5821 .check_mr_status = mlx5_ib_check_mr_status,
5822 .create_ah = mlx5_ib_create_ah,
5823 .create_counters = mlx5_ib_create_counters,
5824 .create_cq = mlx5_ib_create_cq,
5825 .create_flow = mlx5_ib_create_flow,
5826 .create_qp = mlx5_ib_create_qp,
5827 .create_srq = mlx5_ib_create_srq,
5828 .dealloc_pd = mlx5_ib_dealloc_pd,
5829 .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
5830 .del_gid = mlx5_ib_del_gid,
5831 .dereg_mr = mlx5_ib_dereg_mr,
5832 .destroy_ah = mlx5_ib_destroy_ah,
5833 .destroy_counters = mlx5_ib_destroy_counters,
5834 .destroy_cq = mlx5_ib_destroy_cq,
5835 .destroy_flow = mlx5_ib_destroy_flow,
5836 .destroy_flow_action = mlx5_ib_destroy_flow_action,
5837 .destroy_qp = mlx5_ib_destroy_qp,
5838 .destroy_srq = mlx5_ib_destroy_srq,
5839 .detach_mcast = mlx5_ib_mcg_detach,
5840 .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
5841 .drain_rq = mlx5_ib_drain_rq,
5842 .drain_sq = mlx5_ib_drain_sq,
5843 .get_dev_fw_str = get_dev_fw_str,
5844 .get_dma_mr = mlx5_ib_get_dma_mr,
5845 .get_link_layer = mlx5_ib_port_link_layer,
5846 .map_mr_sg = mlx5_ib_map_mr_sg,
5847 .mmap = mlx5_ib_mmap,
5848 .modify_cq = mlx5_ib_modify_cq,
5849 .modify_device = mlx5_ib_modify_device,
5850 .modify_port = mlx5_ib_modify_port,
5851 .modify_qp = mlx5_ib_modify_qp,
5852 .modify_srq = mlx5_ib_modify_srq,
5853 .poll_cq = mlx5_ib_poll_cq,
5854 .post_recv = mlx5_ib_post_recv,
5855 .post_send = mlx5_ib_post_send,
5856 .post_srq_recv = mlx5_ib_post_srq_recv,
5857 .process_mad = mlx5_ib_process_mad,
5858 .query_ah = mlx5_ib_query_ah,
5859 .query_device = mlx5_ib_query_device,
5860 .query_gid = mlx5_ib_query_gid,
5861 .query_pkey = mlx5_ib_query_pkey,
5862 .query_qp = mlx5_ib_query_qp,
5863 .query_srq = mlx5_ib_query_srq,
5864 .read_counters = mlx5_ib_read_counters,
5865 .reg_user_mr = mlx5_ib_reg_user_mr,
5866 .req_notify_cq = mlx5_ib_arm_cq,
5867 .rereg_user_mr = mlx5_ib_rereg_user_mr,
5868 .resize_cq = mlx5_ib_resize_cq,
5869};
5870
5871static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
5872 .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
5873 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
5874};
5875
5876static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
5877 .rdma_netdev_get_params = mlx5_ib_rn_get_params,
5878};
5879
5880static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
5881 .get_vf_config = mlx5_ib_get_vf_config,
5882 .get_vf_stats = mlx5_ib_get_vf_stats,
5883 .set_vf_guid = mlx5_ib_set_vf_guid,
5884 .set_vf_link_state = mlx5_ib_set_vf_link_state,
5885};
5886
5887static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
5888 .alloc_mw = mlx5_ib_alloc_mw,
5889 .dealloc_mw = mlx5_ib_dealloc_mw,
5890};
5891
5892static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
5893 .alloc_xrcd = mlx5_ib_alloc_xrcd,
5894 .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
5895};
5896
5897static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
5898 .alloc_dm = mlx5_ib_alloc_dm,
5899 .dealloc_dm = mlx5_ib_dealloc_dm,
5900 .reg_dm_mr = mlx5_ib_reg_dm_mr,
5901};
5902
Mark Blochb5ca15a2018-01-23 11:16:30 +00005903int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02005904{
5905 struct mlx5_core_dev *mdev = dev->mdev;
Mark Bloch16c19752018-01-01 13:06:58 +02005906 int err;
5907
Eli Cohene126ba92013-07-07 17:25:49 +03005908 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
5909 dev->ib_dev.uverbs_cmd_mask =
5910 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
5911 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
5912 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
5913 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
5914 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
Moni Shoua41c450f2016-11-23 08:23:26 +02005915 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
5916 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
Eli Cohene126ba92013-07-07 17:25:49 +03005917 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Noa Osherovich56e11d62016-02-29 16:46:51 +02005918 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Eli Cohene126ba92013-07-07 17:25:49 +03005919 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
5920 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
5921 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
5922 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
5923 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
5924 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
5925 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
5926 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
5927 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
5928 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
5929 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
5930 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
5931 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
5932 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
5933 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
5934 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
5935 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Haggai Eran1707cb42015-02-08 13:28:52 +02005936 dev->ib_dev.uverbs_ex_cmd_mask =
Matan Barakd4584dd2016-01-28 17:51:46 +02005937 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
5938 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
Bodong Wang7d29f342016-12-01 13:43:16 +02005939 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
Yonatan Cohenb0e9df62017-11-13 10:51:15 +02005940 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
Kamal Heib96458233e2018-12-10 21:09:38 +02005941 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
5942 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
5943 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Eli Cohene126ba92013-07-07 17:25:49 +03005944
Denis Drozdovf6a8a192018-08-14 14:08:51 +03005945 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
5946 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
Kamal Heib96458233e2018-12-10 21:09:38 +02005947 ib_set_device_ops(&dev->ib_dev,
5948 &mlx5_ib_dev_ipoib_enhanced_ops);
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07005949
Kamal Heib96458233e2018-12-10 21:09:38 +02005950 if (mlx5_core_is_pf(mdev))
5951 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03005952
Max Gurtovoy6e8484c2017-05-28 10:53:11 +03005953 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
5954
Matan Barakd2370e02016-02-29 18:05:30 +02005955 if (MLX5_CAP_GEN(mdev, imaicl)) {
Matan Barakd2370e02016-02-29 18:05:30 +02005956 dev->ib_dev.uverbs_cmd_mask |=
5957 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
5958 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
Kamal Heib96458233e2018-12-10 21:09:38 +02005959 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
Matan Barakd2370e02016-02-29 18:05:30 +02005960 }
5961
Saeed Mahameed938fe832015-05-28 22:28:41 +03005962 if (MLX5_CAP_GEN(mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03005963 dev->ib_dev.uverbs_cmd_mask |=
5964 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
5965 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
Kamal Heib96458233e2018-12-10 21:09:38 +02005966 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
Eli Cohene126ba92013-07-07 17:25:49 +03005967 }
5968
Kamal Heib96458233e2018-12-10 21:09:38 +02005969 if (MLX5_CAP_DEV_MEM(mdev, memic))
5970 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
Ariel Levkovich24da0012018-04-05 18:53:27 +03005971
Jason Gunthorpedfb631a2018-11-12 22:59:49 +02005972 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
Kamal Heib96458233e2018-12-10 21:09:38 +02005973 MLX5_ACCEL_IPSEC_CAP_DEVICE)
5974 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
Matan Barak0ede73b2018-03-19 15:02:34 +02005975 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
Kamal Heib96458233e2018-12-10 21:09:38 +02005976 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
Yishai Hadas81e30882017-06-08 16:15:09 +03005977
Jason Gunthorpe36e235c2018-11-12 22:59:53 +02005978 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
5979 dev->ib_dev.driver_def = mlx5_ib_defs;
5980
Eli Cohene126ba92013-07-07 17:25:49 +03005981 err = init_node_data(dev);
5982 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02005983 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03005984
Mark Blochc8b89922018-01-01 13:07:02 +02005985 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
Jason Gunthorpee7996a92018-01-29 13:26:40 -07005986 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5987 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
Mark Blocha560f1d2018-09-17 13:30:47 +03005988 mutex_init(&dev->lb.mutex);
Mark Blochc8b89922018-01-01 13:07:02 +02005989
Mark Bloch16c19752018-01-01 13:06:58 +02005990 return 0;
5991}
5992
Kamal Heib96458233e2018-12-10 21:09:38 +02005993static const struct ib_device_ops mlx5_ib_dev_port_ops = {
5994 .get_port_immutable = mlx5_port_immutable,
5995 .query_port = mlx5_ib_query_port,
5996};
5997
Mark Bloch8e6efa32017-11-06 12:22:13 +00005998static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5999{
Kamal Heib96458233e2018-12-10 21:09:38 +02006000 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
Mark Bloch8e6efa32017-11-06 12:22:13 +00006001 return 0;
6002}
6003
Kamal Heib96458233e2018-12-10 21:09:38 +02006004static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6005 .get_port_immutable = mlx5_port_rep_immutable,
6006 .query_port = mlx5_ib_rep_query_port,
6007};
6008
Mark Blochb5ca15a2018-01-23 11:16:30 +00006009int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
Mark Bloch8e6efa32017-11-06 12:22:13 +00006010{
Kamal Heib96458233e2018-12-10 21:09:38 +02006011 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
Mark Bloch8e6efa32017-11-06 12:22:13 +00006012 return 0;
6013}
6014
Kamal Heib96458233e2018-12-10 21:09:38 +02006015static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6016 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6017 .create_wq = mlx5_ib_create_wq,
6018 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6019 .destroy_wq = mlx5_ib_destroy_wq,
6020 .get_netdev = mlx5_ib_get_netdev,
6021 .modify_wq = mlx5_ib_modify_wq,
6022};
6023
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006024static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
Mark Bloch8e6efa32017-11-06 12:22:13 +00006025{
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006026 u8 port_num;
Mark Bloch8e6efa32017-11-06 12:22:13 +00006027 int i;
6028
6029 for (i = 0; i < dev->num_ports; i++) {
6030 dev->roce[i].dev = dev;
6031 dev->roce[i].native_port_num = i + 1;
6032 dev->roce[i].last_port_state = IB_PORT_DOWN;
6033 }
6034
Mark Bloch8e6efa32017-11-06 12:22:13 +00006035 dev->ib_dev.uverbs_ex_cmd_mask |=
6036 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6037 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6038 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6039 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6040 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Kamal Heib96458233e2018-12-10 21:09:38 +02006041 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
Mark Bloch8e6efa32017-11-06 12:22:13 +00006042
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006043 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6044
Mark Bloch8e6efa32017-11-06 12:22:13 +00006045 return mlx5_add_netdev_notifier(dev, port_num);
6046}
6047
6048static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6049{
6050 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6051
6052 mlx5_remove_netdev_notifier(dev, port_num);
6053}
6054
6055int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
6056{
6057 struct mlx5_core_dev *mdev = dev->mdev;
6058 enum rdma_link_layer ll;
6059 int port_type_cap;
6060 int err = 0;
Mark Bloch8e6efa32017-11-06 12:22:13 +00006061
Mark Bloch8e6efa32017-11-06 12:22:13 +00006062 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6063 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6064
6065 if (ll == IB_LINK_LAYER_ETHERNET)
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006066 err = mlx5_ib_stage_common_roce_init(dev);
Mark Bloch8e6efa32017-11-06 12:22:13 +00006067
6068 return err;
6069}
6070
6071void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
6072{
6073 mlx5_ib_stage_common_roce_cleanup(dev);
6074}
6075
Mark Bloch16c19752018-01-01 13:06:58 +02006076static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6077{
6078 struct mlx5_core_dev *mdev = dev->mdev;
6079 enum rdma_link_layer ll;
6080 int port_type_cap;
6081 int err;
6082
6083 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6084 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6085
Achiad Shochatfc24fc52015-12-23 18:47:17 +02006086 if (ll == IB_LINK_LAYER_ETHERNET) {
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006087 err = mlx5_ib_stage_common_roce_init(dev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02006088 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02006089 return err;
Mark Bloch8e6efa32017-11-06 12:22:13 +00006090
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006091 err = mlx5_enable_eth(dev);
Mark Bloch8e6efa32017-11-06 12:22:13 +00006092 if (err)
6093 goto cleanup;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02006094 }
6095
Mark Bloch16c19752018-01-01 13:06:58 +02006096 return 0;
Mark Bloch8e6efa32017-11-06 12:22:13 +00006097cleanup:
6098 mlx5_ib_stage_common_roce_cleanup(dev);
6099
6100 return err;
Mark Bloch16c19752018-01-01 13:06:58 +02006101}
Eli Cohene126ba92013-07-07 17:25:49 +03006102
Mark Bloch16c19752018-01-01 13:06:58 +02006103static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6104{
6105 struct mlx5_core_dev *mdev = dev->mdev;
6106 enum rdma_link_layer ll;
6107 int port_type_cap;
Eli Cohene126ba92013-07-07 17:25:49 +03006108
Mark Bloch16c19752018-01-01 13:06:58 +02006109 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6110 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6111
6112 if (ll == IB_LINK_LAYER_ETHERNET) {
6113 mlx5_disable_eth(dev);
Mark Bloch8e6efa32017-11-06 12:22:13 +00006114 mlx5_ib_stage_common_roce_cleanup(dev);
Kamal Heib45bded22017-01-18 14:10:32 +02006115 }
Mark Bloch16c19752018-01-01 13:06:58 +02006116}
Haggai Eran6aec21f2014-12-11 17:04:23 +02006117
Mark Blochb5ca15a2018-01-23 11:16:30 +00006118int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006119{
6120 return create_dev_resources(&dev->devr);
6121}
Parav Pandit4a2da0b2017-05-30 10:05:15 +03006122
Mark Blochb5ca15a2018-01-23 11:16:30 +00006123void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006124{
6125 destroy_dev_resources(&dev->devr);
6126}
6127
6128static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6129{
Mark Bloch07321b32018-01-01 13:07:00 +02006130 mlx5_ib_internal_fill_odp_caps(dev);
6131
Mark Bloch16c19752018-01-01 13:06:58 +02006132 return mlx5_ib_odp_init_one(dev);
6133}
6134
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08006135void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
6136{
6137 mlx5_ib_odp_cleanup_one(dev);
6138}
6139
Kamal Heib96458233e2018-12-10 21:09:38 +02006140static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6141 .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6142 .get_hw_stats = mlx5_ib_get_hw_stats,
6143};
6144
Mark Blochb5ca15a2018-01-23 11:16:30 +00006145int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006146{
Mark Bloch5e1e7612018-01-01 13:07:01 +02006147 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
Kamal Heib96458233e2018-12-10 21:09:38 +02006148 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
Mark Bloch5e1e7612018-01-01 13:07:01 +02006149
6150 return mlx5_ib_alloc_counters(dev);
6151 }
Mark Bloch16c19752018-01-01 13:06:58 +02006152
6153 return 0;
6154}
6155
Mark Blochb5ca15a2018-01-23 11:16:30 +00006156void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006157{
6158 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6159 mlx5_ib_dealloc_counters(dev);
6160}
6161
6162static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6163{
Parav Pandita9e546e2018-01-04 17:25:39 +02006164 return mlx5_ib_init_cong_debugfs(dev,
6165 mlx5_core_native_port_num(dev->mdev) - 1);
Mark Bloch16c19752018-01-01 13:06:58 +02006166}
6167
6168static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6169{
Parav Pandita9e546e2018-01-04 17:25:39 +02006170 mlx5_ib_cleanup_cong_debugfs(dev,
6171 mlx5_core_native_port_num(dev->mdev) - 1);
Mark Bloch16c19752018-01-01 13:06:58 +02006172}
6173
6174static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6175{
Eli Cohen5fe9dec2017-01-03 23:55:25 +02006176 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
Leon Romanovsky444261c2018-04-23 17:01:56 +03006177 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
Mark Bloch16c19752018-01-01 13:06:58 +02006178}
6179
6180static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6181{
6182 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6183}
6184
Mark Blochb5ca15a2018-01-23 11:16:30 +00006185int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006186{
6187 int err;
Eli Cohen5fe9dec2017-01-03 23:55:25 +02006188
6189 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6190 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02006191 return err;
Eli Cohen5fe9dec2017-01-03 23:55:25 +02006192
6193 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6194 if (err)
Mark Bloch16c19752018-01-01 13:06:58 +02006195 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
Eli Cohen5fe9dec2017-01-03 23:55:25 +02006196
Mark Bloch16c19752018-01-01 13:06:58 +02006197 return err;
6198}
Mark Bloch0837e862016-06-17 15:10:55 +03006199
Mark Blochb5ca15a2018-01-23 11:16:30 +00006200void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006201{
6202 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6203 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6204}
Eli Cohene126ba92013-07-07 17:25:49 +03006205
Mark Blochb5ca15a2018-01-23 11:16:30 +00006206int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006207{
Jason Gunthorpee349f852018-09-25 16:58:09 -06006208 const char *name;
6209
Parav Pandit508a5232018-10-11 22:31:54 +03006210 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
Jason Gunthorpee349f852018-09-25 16:58:09 -06006211 if (!mlx5_lag_is_active(dev->mdev))
6212 name = "mlx5_%d";
6213 else
6214 name = "mlx5_bond_%d";
6215 return ib_register_device(&dev->ib_dev, name, NULL);
Mark Bloch16c19752018-01-01 13:06:58 +02006216}
6217
David S. Miller03fe2de2018-03-23 11:24:57 -04006218void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
Mark Bloch42cea832018-03-14 09:14:15 +02006219{
6220 destroy_umrc_res(dev);
6221}
6222
Mark Blochb5ca15a2018-01-23 11:16:30 +00006223void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006224{
6225 ib_unregister_device(&dev->ib_dev);
6226}
6227
David S. Miller03fe2de2018-03-23 11:24:57 -04006228int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
Mark Bloch16c19752018-01-01 13:06:58 +02006229{
6230 return create_umr_res(dev);
6231}
6232
Mark Bloch16c19752018-01-01 13:06:58 +02006233static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6234{
Maor Gottlieb03404e82017-05-30 10:29:13 +03006235 init_delay_drop(dev);
6236
Mark Bloch16c19752018-01-01 13:06:58 +02006237 return 0;
6238}
6239
6240static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6241{
6242 cancel_delay_drop(dev);
6243}
6244
Mark Blochfc385b72018-01-16 14:34:48 +00006245static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
6246{
6247 mlx5_ib_register_vport_reps(dev);
6248
6249 return 0;
6250}
6251
6252static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
6253{
6254 mlx5_ib_unregister_vport_reps(dev);
6255}
6256
Saeed Mahameeddf097a22018-11-26 14:39:00 -08006257static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6258{
6259 dev->mdev_events.notifier_call = mlx5_ib_event;
6260 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6261 return 0;
6262}
6263
6264static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6265{
6266 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6267}
6268
Leon Romanovsky81773ce2018-11-28 20:53:39 +02006269static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6270{
6271 int uid;
6272
Yishai Hadasfb981532018-11-26 08:28:36 +02006273 uid = mlx5_ib_devx_create(dev, false);
Leon Romanovsky81773ce2018-11-28 20:53:39 +02006274 if (uid > 0)
6275 dev->devx_whitelist_uid = uid;
6276
6277 return 0;
6278}
6279static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6280{
6281 if (dev->devx_whitelist_uid)
6282 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6283}
6284
Mark Blochb5ca15a2018-01-23 11:16:30 +00006285void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6286 const struct mlx5_ib_profile *profile,
6287 int stage)
Mark Bloch16c19752018-01-01 13:06:58 +02006288{
6289 /* Number of stages to cleanup */
6290 while (stage) {
6291 stage--;
6292 if (profile->stage[stage].cleanup)
6293 profile->stage[stage].cleanup(dev);
6294 }
6295
6296 ib_dealloc_device((struct ib_device *)dev);
6297}
6298
Mark Blochb5ca15a2018-01-23 11:16:30 +00006299void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6300 const struct mlx5_ib_profile *profile)
Mark Bloch16c19752018-01-01 13:06:58 +02006301{
Mark Bloch16c19752018-01-01 13:06:58 +02006302 int err;
6303 int i;
Mark Bloch16c19752018-01-01 13:06:58 +02006304
Mark Bloch16c19752018-01-01 13:06:58 +02006305 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6306 if (profile->stage[i].init) {
6307 err = profile->stage[i].init(dev);
6308 if (err)
6309 goto err_out;
6310 }
6311 }
6312
6313 dev->profile = profile;
Eli Cohene126ba92013-07-07 17:25:49 +03006314 dev->ib_active = true;
6315
Jack Morgenstein9603b612014-07-28 23:30:22 +03006316 return dev;
Eli Cohene126ba92013-07-07 17:25:49 +03006317
Mark Bloch16c19752018-01-01 13:06:58 +02006318err_out:
6319 __mlx5_ib_remove(dev, profile, i);
Eli Cohene126ba92013-07-07 17:25:49 +03006320
Jack Morgenstein9603b612014-07-28 23:30:22 +03006321 return NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03006322}
6323
Mark Bloch16c19752018-01-01 13:06:58 +02006324static const struct mlx5_ib_profile pf_profile = {
6325 STAGE_CREATE(MLX5_IB_STAGE_INIT,
6326 mlx5_ib_stage_init_init,
6327 mlx5_ib_stage_init_cleanup),
Mark Bloch9a4ca382018-01-16 14:42:35 +00006328 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6329 mlx5_ib_stage_flow_db_init,
6330 mlx5_ib_stage_flow_db_cleanup),
Mark Bloch16c19752018-01-01 13:06:58 +02006331 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6332 mlx5_ib_stage_caps_init,
6333 NULL),
Mark Bloch8e6efa32017-11-06 12:22:13 +00006334 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6335 mlx5_ib_stage_non_default_cb,
6336 NULL),
Mark Bloch16c19752018-01-01 13:06:58 +02006337 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6338 mlx5_ib_stage_roce_init,
6339 mlx5_ib_stage_roce_cleanup),
Leon Romanovskyf3da6572018-11-28 20:53:41 +02006340 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6341 mlx5_init_srq_table,
6342 mlx5_cleanup_srq_table),
Mark Bloch16c19752018-01-01 13:06:58 +02006343 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6344 mlx5_ib_stage_dev_res_init,
6345 mlx5_ib_stage_dev_res_cleanup),
Saeed Mahameeddf097a22018-11-26 14:39:00 -08006346 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6347 mlx5_ib_stage_dev_notifier_init,
6348 mlx5_ib_stage_dev_notifier_cleanup),
Mark Bloch16c19752018-01-01 13:06:58 +02006349 STAGE_CREATE(MLX5_IB_STAGE_ODP,
6350 mlx5_ib_stage_odp_init,
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08006351 mlx5_ib_stage_odp_cleanup),
Mark Bloch16c19752018-01-01 13:06:58 +02006352 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6353 mlx5_ib_stage_counters_init,
6354 mlx5_ib_stage_counters_cleanup),
6355 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
6356 mlx5_ib_stage_cong_debugfs_init,
6357 mlx5_ib_stage_cong_debugfs_cleanup),
6358 STAGE_CREATE(MLX5_IB_STAGE_UAR,
6359 mlx5_ib_stage_uar_init,
6360 mlx5_ib_stage_uar_cleanup),
6361 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6362 mlx5_ib_stage_bfrag_init,
6363 mlx5_ib_stage_bfrag_cleanup),
Mark Bloch42cea832018-03-14 09:14:15 +02006364 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6365 NULL,
6366 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
Leon Romanovsky81773ce2018-11-28 20:53:39 +02006367 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6368 mlx5_ib_stage_devx_init,
6369 mlx5_ib_stage_devx_cleanup),
Mark Bloch16c19752018-01-01 13:06:58 +02006370 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6371 mlx5_ib_stage_ib_reg_init,
6372 mlx5_ib_stage_ib_reg_cleanup),
Mark Bloch42cea832018-03-14 09:14:15 +02006373 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6374 mlx5_ib_stage_post_ib_reg_umr_init,
6375 NULL),
Mark Bloch16c19752018-01-01 13:06:58 +02006376 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
6377 mlx5_ib_stage_delay_drop_init,
6378 mlx5_ib_stage_delay_drop_cleanup),
Mark Bloch16c19752018-01-01 13:06:58 +02006379};
6380
Mark Blochb5ca15a2018-01-23 11:16:30 +00006381static const struct mlx5_ib_profile nic_rep_profile = {
6382 STAGE_CREATE(MLX5_IB_STAGE_INIT,
6383 mlx5_ib_stage_init_init,
6384 mlx5_ib_stage_init_cleanup),
6385 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6386 mlx5_ib_stage_flow_db_init,
6387 mlx5_ib_stage_flow_db_cleanup),
6388 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6389 mlx5_ib_stage_caps_init,
6390 NULL),
6391 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6392 mlx5_ib_stage_rep_non_default_cb,
6393 NULL),
6394 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6395 mlx5_ib_stage_rep_roce_init,
6396 mlx5_ib_stage_rep_roce_cleanup),
Leon Romanovskyf3da6572018-11-28 20:53:41 +02006397 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6398 mlx5_init_srq_table,
6399 mlx5_cleanup_srq_table),
Mark Blochb5ca15a2018-01-23 11:16:30 +00006400 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6401 mlx5_ib_stage_dev_res_init,
6402 mlx5_ib_stage_dev_res_cleanup),
Saeed Mahameeddf097a22018-11-26 14:39:00 -08006403 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6404 mlx5_ib_stage_dev_notifier_init,
6405 mlx5_ib_stage_dev_notifier_cleanup),
Mark Blochb5ca15a2018-01-23 11:16:30 +00006406 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6407 mlx5_ib_stage_counters_init,
6408 mlx5_ib_stage_counters_cleanup),
6409 STAGE_CREATE(MLX5_IB_STAGE_UAR,
6410 mlx5_ib_stage_uar_init,
6411 mlx5_ib_stage_uar_cleanup),
6412 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6413 mlx5_ib_stage_bfrag_init,
6414 mlx5_ib_stage_bfrag_cleanup),
David S. Miller03fe2de2018-03-23 11:24:57 -04006415 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6416 NULL,
6417 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
Mark Blochb5ca15a2018-01-23 11:16:30 +00006418 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6419 mlx5_ib_stage_ib_reg_init,
6420 mlx5_ib_stage_ib_reg_cleanup),
David S. Miller03fe2de2018-03-23 11:24:57 -04006421 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6422 mlx5_ib_stage_post_ib_reg_umr_init,
6423 NULL),
Mark Blochb5ca15a2018-01-23 11:16:30 +00006424 STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
6425 mlx5_ib_stage_rep_reg_init,
6426 mlx5_ib_stage_rep_reg_cleanup),
6427};
6428
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006429static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006430{
6431 struct mlx5_ib_multiport_info *mpi;
6432 struct mlx5_ib_dev *dev;
6433 bool bound = false;
6434 int err;
6435
6436 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6437 if (!mpi)
6438 return NULL;
6439
6440 mpi->mdev = mdev;
6441
6442 err = mlx5_query_nic_vport_system_image_guid(mdev,
6443 &mpi->sys_image_guid);
6444 if (err) {
6445 kfree(mpi);
6446 return NULL;
6447 }
6448
6449 mutex_lock(&mlx5_ib_multiport_mutex);
6450 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6451 if (dev->sys_image_guid == mpi->sys_image_guid)
6452 bound = mlx5_ib_bind_slave_port(dev, mpi);
6453
6454 if (bound) {
6455 rdma_roce_rescan_device(&dev->ib_dev);
6456 break;
6457 }
6458 }
6459
6460 if (!bound) {
6461 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6462 dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006463 }
6464 mutex_unlock(&mlx5_ib_multiport_mutex);
6465
6466 return mpi;
6467}
6468
Mark Bloch16c19752018-01-01 13:06:58 +02006469static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6470{
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006471 enum rdma_link_layer ll;
Mark Blochb5ca15a2018-01-23 11:16:30 +00006472 struct mlx5_ib_dev *dev;
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006473 int port_type_cap;
6474
Mark Blochb5ca15a2018-01-23 11:16:30 +00006475 printk_once(KERN_INFO "%s", mlx5_version);
6476
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006477 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6478 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6479
Leon Romanovskye3f1ed12018-07-08 12:55:43 +03006480 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
6481 return mlx5_ib_add_slave_port(mdev);
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006482
Mark Blochb5ca15a2018-01-23 11:16:30 +00006483 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
6484 if (!dev)
6485 return NULL;
6486
6487 dev->mdev = mdev;
6488 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6489 MLX5_CAP_GEN(mdev, num_vhca_ports));
6490
Or Gerlitzaff22522018-05-31 11:40:17 +03006491 if (MLX5_ESWITCH_MANAGER(mdev) &&
Mark Blochb5ca15a2018-01-23 11:16:30 +00006492 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
6493 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
6494
6495 return __mlx5_ib_add(dev, &nic_rep_profile);
6496 }
6497
6498 return __mlx5_ib_add(dev, &pf_profile);
Mark Bloch16c19752018-01-01 13:06:58 +02006499}
6500
Jack Morgenstein9603b612014-07-28 23:30:22 +03006501static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
Eli Cohene126ba92013-07-07 17:25:49 +03006502{
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006503 struct mlx5_ib_multiport_info *mpi;
6504 struct mlx5_ib_dev *dev;
Haggai Eran6aec21f2014-12-11 17:04:23 +02006505
Daniel Jurgens32f69e42018-01-04 17:25:36 +02006506 if (mlx5_core_is_mp_slave(mdev)) {
6507 mpi = context;
6508 mutex_lock(&mlx5_ib_multiport_mutex);
6509 if (mpi->ibdev)
6510 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
6511 list_del(&mpi->list);
6512 mutex_unlock(&mlx5_ib_multiport_mutex);
6513 return;
6514 }
6515
6516 dev = context;
Mark Bloch16c19752018-01-01 13:06:58 +02006517 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03006518}
6519
Jack Morgenstein9603b612014-07-28 23:30:22 +03006520static struct mlx5_interface mlx5_ib_interface = {
6521 .add = mlx5_ib_add,
6522 .remove = mlx5_ib_remove,
Saeed Mahameed64613d942015-04-02 17:07:34 +03006523 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
Eli Cohene126ba92013-07-07 17:25:49 +03006524};
6525
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02006526unsigned long mlx5_ib_get_xlt_emergency_page(void)
6527{
6528 mutex_lock(&xlt_emergency_page_mutex);
6529 return xlt_emergency_page;
6530}
6531
6532void mlx5_ib_put_xlt_emergency_page(void)
6533{
6534 mutex_unlock(&xlt_emergency_page_mutex);
6535}
6536
Eli Cohene126ba92013-07-07 17:25:49 +03006537static int __init mlx5_ib_init(void)
6538{
Haggai Eran6aec21f2014-12-11 17:04:23 +02006539 int err;
6540
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02006541 xlt_emergency_page = __get_free_page(GFP_KERNEL);
6542 if (!xlt_emergency_page)
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02006543 return -ENOMEM;
6544
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02006545 mutex_init(&xlt_emergency_page_mutex);
6546
6547 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
6548 if (!mlx5_ib_event_wq) {
6549 free_page(xlt_emergency_page);
6550 return -ENOMEM;
6551 }
6552
Artemy Kovalyov81713d32017-01-18 16:58:11 +02006553 mlx5_ib_odp_init();
Jack Morgenstein9603b612014-07-28 23:30:22 +03006554
Haggai Eran6aec21f2014-12-11 17:04:23 +02006555 err = mlx5_register_interface(&mlx5_ib_interface);
Haggai Eran6aec21f2014-12-11 17:04:23 +02006556
6557 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03006558}
6559
6560static void __exit mlx5_ib_cleanup(void)
6561{
Jack Morgenstein9603b612014-07-28 23:30:22 +03006562 mlx5_unregister_interface(&mlx5_ib_interface);
Daniel Jurgensd69a24e2018-01-04 17:25:37 +02006563 destroy_workqueue(mlx5_ib_event_wq);
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02006564 mutex_destroy(&xlt_emergency_page_mutex);
6565 free_page(xlt_emergency_page);
Eli Cohene126ba92013-07-07 17:25:49 +03006566}
6567
6568module_init(mlx5_ib_init);
6569module_exit(mlx5_ib_cleanup);