blob: 2e85f73c323542881e1cf7606765f766ad64d98f [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Christoph Hellwigadec6402015-08-28 09:27:19 +020033#include <linux/highmem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030034#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
Guy Levi37aa5c32016-04-27 16:49:50 +030040#if defined(CONFIG_X86)
41#include <asm/pat.h>
42#endif
Eli Cohene126ba92013-07-07 17:25:49 +030043#include <linux/sched.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030044#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030045#include <rdma/ib_user_verbs.h>
Achiad Shochat3f89a642015-12-23 18:47:21 +020046#include <rdma/ib_addr.h>
Achiad Shochat2811ba52015-12-23 18:47:24 +020047#include <rdma/ib_cache.h>
Achiad Shochatada68c32016-02-22 18:17:23 +020048#include <linux/mlx5/port.h>
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030049#include <linux/mlx5/vport.h>
Maor Gottlieb7c2344c2016-06-17 14:56:44 +030050#include <linux/list.h>
Eli Cohene126ba92013-07-07 17:25:49 +030051#include <rdma/ib_smi.h>
52#include <rdma/ib_umem.h>
Maor Gottlieb038d2ef2016-01-11 10:26:07 +020053#include <linux/in.h>
54#include <linux/etherdevice.h>
55#include <linux/mlx5/fs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030056#include "mlx5_ib.h"
57
58#define DRIVER_NAME "mlx5_ib"
Amir Vadai169a1d82014-02-19 17:47:31 +020059#define DRIVER_VERSION "2.2-1"
60#define DRIVER_RELDATE "Feb 2014"
Eli Cohene126ba92013-07-07 17:25:49 +030061
62MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
63MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
64MODULE_LICENSE("Dual BSD/GPL");
65MODULE_VERSION(DRIVER_VERSION);
66
Eli Cohene126ba92013-07-07 17:25:49 +030067static char mlx5_version[] =
68 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
69 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
70
Eran Ben Elishada7525d2015-12-14 16:34:10 +020071enum {
72 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
73};
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030074
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030075static enum rdma_link_layer
Achiad Shochatebd61f62015-12-23 18:47:16 +020076mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030077{
Achiad Shochatebd61f62015-12-23 18:47:16 +020078 switch (port_type_cap) {
Majd Dibbiny1b5daf12015-06-04 19:30:46 +030079 case MLX5_CAP_PORT_TYPE_IB:
80 return IB_LINK_LAYER_INFINIBAND;
81 case MLX5_CAP_PORT_TYPE_ETH:
82 return IB_LINK_LAYER_ETHERNET;
83 default:
84 return IB_LINK_LAYER_UNSPECIFIED;
85 }
86}
87
Achiad Shochatebd61f62015-12-23 18:47:16 +020088static enum rdma_link_layer
89mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
90{
91 struct mlx5_ib_dev *dev = to_mdev(device);
92 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
93
94 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
95}
96
Achiad Shochatfc24fc52015-12-23 18:47:17 +020097static int mlx5_netdev_event(struct notifier_block *this,
98 unsigned long event, void *ptr)
99{
100 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
101 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
102 roce.nb);
103
Aviv Heller5ec8c832016-09-18 20:48:00 +0300104 switch (event) {
105 case NETDEV_REGISTER:
106 case NETDEV_UNREGISTER:
107 write_lock(&ibdev->roce.netdev_lock);
108 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
109 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
110 NULL : ndev;
111 write_unlock(&ibdev->roce.netdev_lock);
112 break;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200113
Aviv Heller5ec8c832016-09-18 20:48:00 +0300114 case NETDEV_UP:
Aviv Heller88621df2016-09-18 20:48:02 +0300115 case NETDEV_DOWN: {
116 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
117 struct net_device *upper = NULL;
118
119 if (lag_ndev) {
120 upper = netdev_master_upper_dev_get(lag_ndev);
121 dev_put(lag_ndev);
122 }
123
124 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
125 && ibdev->ib_active) {
Bart Van Assche626bc022016-12-05 17:18:08 -0800126 struct ib_event ibev = { };
Aviv Heller5ec8c832016-09-18 20:48:00 +0300127
128 ibev.device = &ibdev->ib_dev;
129 ibev.event = (event == NETDEV_UP) ?
130 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
131 ibev.element.port_num = 1;
132 ib_dispatch_event(&ibev);
133 }
134 break;
Aviv Heller88621df2016-09-18 20:48:02 +0300135 }
Aviv Heller5ec8c832016-09-18 20:48:00 +0300136
137 default:
138 break;
139 }
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200140
141 return NOTIFY_DONE;
142}
143
144static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
145 u8 port_num)
146{
147 struct mlx5_ib_dev *ibdev = to_mdev(device);
148 struct net_device *ndev;
149
Aviv Heller88621df2016-09-18 20:48:02 +0300150 ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
151 if (ndev)
152 return ndev;
153
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200154 /* Ensure ndev does not disappear before we invoke dev_hold()
155 */
156 read_lock(&ibdev->roce.netdev_lock);
157 ndev = ibdev->roce.netdev;
158 if (ndev)
159 dev_hold(ndev);
160 read_unlock(&ibdev->roce.netdev_lock);
161
162 return ndev;
163}
164
Achiad Shochat3f89a642015-12-23 18:47:21 +0200165static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
166 struct ib_port_attr *props)
167{
168 struct mlx5_ib_dev *dev = to_mdev(device);
Aviv Heller88621df2016-09-18 20:48:02 +0300169 struct net_device *ndev, *upper;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200170 enum ib_mtu ndev_ib_mtu;
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200171 u16 qkey_viol_cntr;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200172
173 memset(props, 0, sizeof(*props));
174
175 props->port_cap_flags |= IB_PORT_CM_SUP;
176 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
177
178 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
179 roce_address_table_size);
180 props->max_mtu = IB_MTU_4096;
181 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
182 props->pkey_tbl_len = 1;
183 props->state = IB_PORT_DOWN;
184 props->phys_state = 3;
185
Leon Romanovskyc876a1b2016-01-09 13:06:25 +0200186 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
187 props->qkey_viol_cntr = qkey_viol_cntr;
Achiad Shochat3f89a642015-12-23 18:47:21 +0200188
189 ndev = mlx5_ib_get_netdev(device, port_num);
190 if (!ndev)
191 return 0;
192
Aviv Heller88621df2016-09-18 20:48:02 +0300193 if (mlx5_lag_is_active(dev->mdev)) {
194 rcu_read_lock();
195 upper = netdev_master_upper_dev_get_rcu(ndev);
196 if (upper) {
197 dev_put(ndev);
198 ndev = upper;
199 dev_hold(ndev);
200 }
201 rcu_read_unlock();
202 }
203
Achiad Shochat3f89a642015-12-23 18:47:21 +0200204 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
205 props->state = IB_PORT_ACTIVE;
206 props->phys_state = 5;
207 }
208
209 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
210
211 dev_put(ndev);
212
213 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
214
215 props->active_width = IB_WIDTH_4X; /* TODO */
216 props->active_speed = IB_SPEED_QDR; /* TODO */
217
218 return 0;
219}
220
Achiad Shochat3cca2602015-12-23 18:47:23 +0200221static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
222 const struct ib_gid_attr *attr,
223 void *mlx5_addr)
224{
225#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
226 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
227 source_l3_address);
228 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
229 source_mac_47_32);
230
231 if (!gid)
232 return;
233
234 ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
235
236 if (is_vlan_dev(attr->ndev)) {
237 MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
238 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
239 }
240
241 switch (attr->gid_type) {
242 case IB_GID_TYPE_IB:
243 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
244 break;
245 case IB_GID_TYPE_ROCE_UDP_ENCAP:
246 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
247 break;
248
249 default:
250 WARN_ON(true);
251 }
252
253 if (attr->gid_type != IB_GID_TYPE_IB) {
254 if (ipv6_addr_v4mapped((void *)gid))
255 MLX5_SET_RA(mlx5_addr, roce_l3_type,
256 MLX5_ROCE_L3_TYPE_IPV4);
257 else
258 MLX5_SET_RA(mlx5_addr, roce_l3_type,
259 MLX5_ROCE_L3_TYPE_IPV6);
260 }
261
262 if ((attr->gid_type == IB_GID_TYPE_IB) ||
263 !ipv6_addr_v4mapped((void *)gid))
264 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
265 else
266 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
267}
268
269static int set_roce_addr(struct ib_device *device, u8 port_num,
270 unsigned int index,
271 const union ib_gid *gid,
272 const struct ib_gid_attr *attr)
273{
Saeed Mahameedc4f287c2016-07-19 20:17:12 +0300274 struct mlx5_ib_dev *dev = to_mdev(device);
275 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
276 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
Achiad Shochat3cca2602015-12-23 18:47:23 +0200277 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
278 enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
279
280 if (ll != IB_LINK_LAYER_ETHERNET)
281 return -EINVAL;
282
Achiad Shochat3cca2602015-12-23 18:47:23 +0200283 ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
284
285 MLX5_SET(set_roce_address_in, in, roce_address_index, index);
286 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
Achiad Shochat3cca2602015-12-23 18:47:23 +0200287 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
288}
289
290static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
291 unsigned int index, const union ib_gid *gid,
292 const struct ib_gid_attr *attr,
293 __always_unused void **context)
294{
295 return set_roce_addr(device, port_num, index, gid, attr);
296}
297
298static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
299 unsigned int index, __always_unused void **context)
300{
301 return set_roce_addr(device, port_num, index, NULL, NULL);
302}
303
Achiad Shochat2811ba52015-12-23 18:47:24 +0200304__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
305 int index)
306{
307 struct ib_gid_attr attr;
308 union ib_gid gid;
309
310 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
311 return 0;
312
313 if (!attr.ndev)
314 return 0;
315
316 dev_put(attr.ndev);
317
318 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
319 return 0;
320
321 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
322}
323
Majd Dibbinyed884512017-01-18 14:10:35 +0200324int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
325 int index, enum ib_gid_type *gid_type)
326{
327 struct ib_gid_attr attr;
328 union ib_gid gid;
329 int ret;
330
331 ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
332 if (ret)
333 return ret;
334
335 if (!attr.ndev)
336 return -ENODEV;
337
338 dev_put(attr.ndev);
339
340 *gid_type = attr.gid_type;
341
342 return 0;
343}
344
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300345static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
346{
Noa Osherovich7fae6652016-09-12 19:16:23 +0300347 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
348 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
349 return 0;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300350}
351
352enum {
353 MLX5_VPORT_ACCESS_METHOD_MAD,
354 MLX5_VPORT_ACCESS_METHOD_HCA,
355 MLX5_VPORT_ACCESS_METHOD_NIC,
356};
357
358static int mlx5_get_vport_access_method(struct ib_device *ibdev)
359{
360 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
361 return MLX5_VPORT_ACCESS_METHOD_MAD;
362
Achiad Shochatebd61f62015-12-23 18:47:16 +0200363 if (mlx5_ib_port_link_layer(ibdev, 1) ==
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300364 IB_LINK_LAYER_ETHERNET)
365 return MLX5_VPORT_ACCESS_METHOD_NIC;
366
367 return MLX5_VPORT_ACCESS_METHOD_HCA;
368}
369
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200370static void get_atomic_caps(struct mlx5_ib_dev *dev,
371 struct ib_device_attr *props)
372{
373 u8 tmp;
374 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
375 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
376 u8 atomic_req_8B_endianness_mode =
377 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
378
379 /* Check if HW supports 8 bytes standard atomic operations and capable
380 * of host endianness respond
381 */
382 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
383 if (((atomic_operations & tmp) == tmp) &&
384 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
385 (atomic_req_8B_endianness_mode)) {
386 props->atomic_cap = IB_ATOMIC_HCA;
387 } else {
388 props->atomic_cap = IB_ATOMIC_NONE;
389 }
390}
391
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300392static int mlx5_query_system_image_guid(struct ib_device *ibdev,
393 __be64 *sys_image_guid)
394{
395 struct mlx5_ib_dev *dev = to_mdev(ibdev);
396 struct mlx5_core_dev *mdev = dev->mdev;
397 u64 tmp;
398 int err;
399
400 switch (mlx5_get_vport_access_method(ibdev)) {
401 case MLX5_VPORT_ACCESS_METHOD_MAD:
402 return mlx5_query_mad_ifc_system_image_guid(ibdev,
403 sys_image_guid);
404
405 case MLX5_VPORT_ACCESS_METHOD_HCA:
406 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200407 break;
408
409 case MLX5_VPORT_ACCESS_METHOD_NIC:
410 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
411 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300412
413 default:
414 return -EINVAL;
415 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200416
417 if (!err)
418 *sys_image_guid = cpu_to_be64(tmp);
419
420 return err;
421
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300422}
423
424static int mlx5_query_max_pkeys(struct ib_device *ibdev,
425 u16 *max_pkeys)
426{
427 struct mlx5_ib_dev *dev = to_mdev(ibdev);
428 struct mlx5_core_dev *mdev = dev->mdev;
429
430 switch (mlx5_get_vport_access_method(ibdev)) {
431 case MLX5_VPORT_ACCESS_METHOD_MAD:
432 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
433
434 case MLX5_VPORT_ACCESS_METHOD_HCA:
435 case MLX5_VPORT_ACCESS_METHOD_NIC:
436 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
437 pkey_table_size));
438 return 0;
439
440 default:
441 return -EINVAL;
442 }
443}
444
445static int mlx5_query_vendor_id(struct ib_device *ibdev,
446 u32 *vendor_id)
447{
448 struct mlx5_ib_dev *dev = to_mdev(ibdev);
449
450 switch (mlx5_get_vport_access_method(ibdev)) {
451 case MLX5_VPORT_ACCESS_METHOD_MAD:
452 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
453
454 case MLX5_VPORT_ACCESS_METHOD_HCA:
455 case MLX5_VPORT_ACCESS_METHOD_NIC:
456 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
457
458 default:
459 return -EINVAL;
460 }
461}
462
463static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
464 __be64 *node_guid)
465{
466 u64 tmp;
467 int err;
468
469 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
470 case MLX5_VPORT_ACCESS_METHOD_MAD:
471 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
472
473 case MLX5_VPORT_ACCESS_METHOD_HCA:
474 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
Achiad Shochat3f89a642015-12-23 18:47:21 +0200475 break;
476
477 case MLX5_VPORT_ACCESS_METHOD_NIC:
478 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
479 break;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300480
481 default:
482 return -EINVAL;
483 }
Achiad Shochat3f89a642015-12-23 18:47:21 +0200484
485 if (!err)
486 *node_guid = cpu_to_be64(tmp);
487
488 return err;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300489}
490
491struct mlx5_reg_node_desc {
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700492 u8 desc[IB_DEVICE_NODE_DESC_MAX];
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300493};
494
495static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
496{
497 struct mlx5_reg_node_desc in;
498
499 if (mlx5_use_mad_ifc(dev))
500 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
501
502 memset(&in, 0, sizeof(in));
503
504 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
505 sizeof(struct mlx5_reg_node_desc),
506 MLX5_REG_NODE_DESC, 0, 0);
507}
508
Eli Cohene126ba92013-07-07 17:25:49 +0300509static int mlx5_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300510 struct ib_device_attr *props,
511 struct ib_udata *uhw)
Eli Cohene126ba92013-07-07 17:25:49 +0300512{
513 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300514 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300515 int err = -ENOMEM;
Eli Cohen288c01b2016-10-27 16:36:45 +0300516 int max_sq_desc;
Eli Cohene126ba92013-07-07 17:25:49 +0300517 int max_rq_sg;
518 int max_sq_sg;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300519 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
Bodong Wang402ca532016-06-17 15:02:20 +0300520 struct mlx5_ib_query_device_resp resp = {};
521 size_t resp_len;
522 u64 max_tso;
Eli Cohene126ba92013-07-07 17:25:49 +0300523
Bodong Wang402ca532016-06-17 15:02:20 +0300524 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
525 if (uhw->outlen && uhw->outlen < resp_len)
526 return -EINVAL;
527 else
528 resp.response_length = resp_len;
529
530 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
Matan Barak2528e332015-06-11 16:35:25 +0300531 return -EINVAL;
532
Eli Cohene126ba92013-07-07 17:25:49 +0300533 memset(props, 0, sizeof(*props));
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300534 err = mlx5_query_system_image_guid(ibdev,
535 &props->sys_image_guid);
536 if (err)
537 return err;
538
539 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
540 if (err)
541 return err;
542
543 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
544 if (err)
545 return err;
Eli Cohene126ba92013-07-07 17:25:49 +0300546
Jack Morgenstein9603b612014-07-28 23:30:22 +0300547 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
548 (fw_rev_min(dev->mdev) << 16) |
549 fw_rev_sub(dev->mdev);
Eli Cohene126ba92013-07-07 17:25:49 +0300550 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
551 IB_DEVICE_PORT_ACTIVE_EVENT |
552 IB_DEVICE_SYS_IMAGE_GUID |
Eli Cohen1a4c3a32014-02-06 17:41:25 +0200553 IB_DEVICE_RC_RNR_NAK_GEN;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300554
555 if (MLX5_CAP_GEN(mdev, pkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300556 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300557 if (MLX5_CAP_GEN(mdev, qkv))
Eli Cohene126ba92013-07-07 17:25:49 +0300558 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300559 if (MLX5_CAP_GEN(mdev, apm))
Eli Cohene126ba92013-07-07 17:25:49 +0300560 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300561 if (MLX5_CAP_GEN(mdev, xrc))
Eli Cohene126ba92013-07-07 17:25:49 +0300562 props->device_cap_flags |= IB_DEVICE_XRC;
Matan Barakd2370e02016-02-29 18:05:30 +0200563 if (MLX5_CAP_GEN(mdev, imaicl)) {
564 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
565 IB_DEVICE_MEM_WINDOW_TYPE_2B;
566 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
Sagi Grimbergb005d312016-02-29 19:07:33 +0200567 /* We support 'Gappy' memory registration too */
568 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
Matan Barakd2370e02016-02-29 18:05:30 +0200569 }
Eli Cohene126ba92013-07-07 17:25:49 +0300570 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300571 if (MLX5_CAP_GEN(mdev, sho)) {
Sagi Grimberg2dea9092014-02-23 14:19:13 +0200572 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
573 /* At this stage no support for signature handover */
574 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
575 IB_PROT_T10DIF_TYPE_2 |
576 IB_PROT_T10DIF_TYPE_3;
577 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
578 IB_GUARD_T10DIF_CSUM;
579 }
Saeed Mahameed938fe832015-05-28 22:28:41 +0300580 if (MLX5_CAP_GEN(mdev, block_lb_mc))
Eli Cohenf360d882014-04-02 00:10:16 +0300581 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Eli Cohene126ba92013-07-07 17:25:49 +0300582
Bodong Wang402ca532016-06-17 15:02:20 +0300583 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
584 if (MLX5_CAP_ETH(mdev, csum_cap))
Bodong Wang88115fe2015-12-18 13:53:20 +0200585 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
586
Bodong Wang402ca532016-06-17 15:02:20 +0300587 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
588 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
589 if (max_tso) {
590 resp.tso_caps.max_tso = 1 << max_tso;
591 resp.tso_caps.supported_qpts |=
592 1 << IB_QPT_RAW_PACKET;
593 resp.response_length += sizeof(resp.tso_caps);
594 }
595 }
Yishai Hadas31f69a82016-08-28 11:28:45 +0300596
597 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
598 resp.rss_caps.rx_hash_function =
599 MLX5_RX_HASH_FUNC_TOEPLITZ;
600 resp.rss_caps.rx_hash_fields_mask =
601 MLX5_RX_HASH_SRC_IPV4 |
602 MLX5_RX_HASH_DST_IPV4 |
603 MLX5_RX_HASH_SRC_IPV6 |
604 MLX5_RX_HASH_DST_IPV6 |
605 MLX5_RX_HASH_SRC_PORT_TCP |
606 MLX5_RX_HASH_DST_PORT_TCP |
607 MLX5_RX_HASH_SRC_PORT_UDP |
608 MLX5_RX_HASH_DST_PORT_UDP;
609 resp.response_length += sizeof(resp.rss_caps);
610 }
611 } else {
612 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
613 resp.response_length += sizeof(resp.tso_caps);
614 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
615 resp.response_length += sizeof(resp.rss_caps);
Bodong Wang402ca532016-06-17 15:02:20 +0300616 }
617
Erez Shitritf0313962016-02-21 16:27:17 +0200618 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
619 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
620 props->device_cap_flags |= IB_DEVICE_UD_TSO;
621 }
622
Majd Dibbinycff5a0f2016-04-17 17:19:38 +0300623 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
624 MLX5_CAP_ETH(dev->mdev, scatter_fcs))
625 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
626
Maor Gottliebda6d6ba32016-06-04 15:15:28 +0300627 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
628 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
629
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300630 props->vendor_part_id = mdev->pdev->device;
631 props->hw_ver = mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +0300632
633 props->max_mr_size = ~0ull;
Sagi Grimberge0238a62015-07-21 14:40:12 +0300634 props->page_size_cap = ~(min_page_size - 1);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300635 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
636 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
637 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
638 sizeof(struct mlx5_wqe_data_seg);
Eli Cohen288c01b2016-10-27 16:36:45 +0300639 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
640 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
641 sizeof(struct mlx5_wqe_raddr_seg)) /
642 sizeof(struct mlx5_wqe_data_seg);
Eli Cohene126ba92013-07-07 17:25:49 +0300643 props->max_sge = min(max_rq_sg, max_sq_sg);
Sagi Grimberg986ef952016-03-31 19:03:25 +0300644 props->max_sge_rd = MLX5_MAX_SGE_RD;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300645 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
Leon Romanovsky9f177682016-01-14 08:11:40 +0200646 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300647 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
648 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
649 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
650 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
651 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
652 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
653 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
Eli Cohene126ba92013-07-07 17:25:49 +0300654 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300655 props->max_srq_sge = max_rq_sg - 1;
Sagi Grimberg911f4332016-03-03 13:37:51 +0200656 props->max_fast_reg_page_list_len =
657 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
Eran Ben Elishada7525d2015-12-14 16:34:10 +0200658 get_atomic_caps(dev, props);
Eli Cohen81bea282013-09-11 16:35:30 +0300659 props->masked_atomic_cap = IB_ATOMIC_NONE;
Saeed Mahameed938fe832015-05-28 22:28:41 +0300660 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
661 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
Eli Cohene126ba92013-07-07 17:25:49 +0300662 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
663 props->max_mcast_grp;
664 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
Maor Gottlieb86695a62016-10-27 16:36:38 +0300665 props->max_ah = INT_MAX;
Matan Barak7c60bcb2015-12-15 20:30:11 +0200666 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
667 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300668
Haggai Eran8cdd3122014-12-11 17:04:20 +0200669#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Saeed Mahameed938fe832015-05-28 22:28:41 +0300670 if (MLX5_CAP_GEN(mdev, pg))
Haggai Eran8cdd3122014-12-11 17:04:20 +0200671 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
672 props->odp_caps = dev->odp_caps;
673#endif
674
Leon Romanovsky051f2632015-12-20 12:16:11 +0200675 if (MLX5_CAP_GEN(mdev, cd))
676 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
677
Eli Coheneff901d2016-03-11 22:58:42 +0200678 if (!mlx5_core_is_pf(mdev))
679 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
680
Yishai Hadas31f69a82016-08-28 11:28:45 +0300681 if (mlx5_ib_port_link_layer(ibdev, 1) ==
682 IB_LINK_LAYER_ETHERNET) {
683 props->rss_caps.max_rwq_indirection_tables =
684 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
685 props->rss_caps.max_rwq_indirection_table_size =
686 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
687 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
688 props->max_wq_type_rq =
689 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
690 }
691
Bodong Wang7e43a2a2016-10-31 12:16:44 +0200692 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
693 resp.cqe_comp_caps.max_num =
694 MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
695 MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
696 resp.cqe_comp_caps.supported_format =
697 MLX5_IB_CQE_RES_FORMAT_HASH |
698 MLX5_IB_CQE_RES_FORMAT_CSUM;
699 resp.response_length += sizeof(resp.cqe_comp_caps);
700 }
701
Bodong Wangd9491672016-12-01 13:43:13 +0200702 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
703 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
704 MLX5_CAP_GEN(mdev, qos)) {
705 resp.packet_pacing_caps.qp_rate_limit_max =
706 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
707 resp.packet_pacing_caps.qp_rate_limit_min =
708 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
709 resp.packet_pacing_caps.supported_qpts |=
710 1 << IB_QPT_RAW_PACKET;
711 }
712 resp.response_length += sizeof(resp.packet_pacing_caps);
713 }
714
Leon Romanovsky9f885202017-01-02 11:37:39 +0200715 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
716 uhw->outlen)) {
717 resp.mlx5_ib_support_multi_pkt_send_wqes =
718 MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
719 resp.response_length +=
720 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
721 }
722
723 if (field_avail(typeof(resp), reserved, uhw->outlen))
724 resp.response_length += sizeof(resp.reserved);
725
Bodong Wang402ca532016-06-17 15:02:20 +0300726 if (uhw->outlen) {
727 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
728
729 if (err)
730 return err;
731 }
732
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300733 return 0;
734}
Eli Cohene126ba92013-07-07 17:25:49 +0300735
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300736enum mlx5_ib_width {
737 MLX5_IB_WIDTH_1X = 1 << 0,
738 MLX5_IB_WIDTH_2X = 1 << 1,
739 MLX5_IB_WIDTH_4X = 1 << 2,
740 MLX5_IB_WIDTH_8X = 1 << 3,
741 MLX5_IB_WIDTH_12X = 1 << 4
742};
743
744static int translate_active_width(struct ib_device *ibdev, u8 active_width,
745 u8 *ib_width)
746{
747 struct mlx5_ib_dev *dev = to_mdev(ibdev);
748 int err = 0;
749
750 if (active_width & MLX5_IB_WIDTH_1X) {
751 *ib_width = IB_WIDTH_1X;
752 } else if (active_width & MLX5_IB_WIDTH_2X) {
753 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
754 (int)active_width);
755 err = -EINVAL;
756 } else if (active_width & MLX5_IB_WIDTH_4X) {
757 *ib_width = IB_WIDTH_4X;
758 } else if (active_width & MLX5_IB_WIDTH_8X) {
759 *ib_width = IB_WIDTH_8X;
760 } else if (active_width & MLX5_IB_WIDTH_12X) {
761 *ib_width = IB_WIDTH_12X;
762 } else {
763 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
764 (int)active_width);
765 err = -EINVAL;
766 }
767
768 return err;
769}
770
771static int mlx5_mtu_to_ib_mtu(int mtu)
772{
773 switch (mtu) {
774 case 256: return 1;
775 case 512: return 2;
776 case 1024: return 3;
777 case 2048: return 4;
778 case 4096: return 5;
779 default:
780 pr_warn("invalid mtu\n");
781 return -1;
782 }
783}
784
785enum ib_max_vl_num {
786 __IB_MAX_VL_0 = 1,
787 __IB_MAX_VL_0_1 = 2,
788 __IB_MAX_VL_0_3 = 3,
789 __IB_MAX_VL_0_7 = 4,
790 __IB_MAX_VL_0_14 = 5,
791};
792
793enum mlx5_vl_hw_cap {
794 MLX5_VL_HW_0 = 1,
795 MLX5_VL_HW_0_1 = 2,
796 MLX5_VL_HW_0_2 = 3,
797 MLX5_VL_HW_0_3 = 4,
798 MLX5_VL_HW_0_4 = 5,
799 MLX5_VL_HW_0_5 = 6,
800 MLX5_VL_HW_0_6 = 7,
801 MLX5_VL_HW_0_7 = 8,
802 MLX5_VL_HW_0_14 = 15
803};
804
805static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
806 u8 *max_vl_num)
807{
808 switch (vl_hw_cap) {
809 case MLX5_VL_HW_0:
810 *max_vl_num = __IB_MAX_VL_0;
811 break;
812 case MLX5_VL_HW_0_1:
813 *max_vl_num = __IB_MAX_VL_0_1;
814 break;
815 case MLX5_VL_HW_0_3:
816 *max_vl_num = __IB_MAX_VL_0_3;
817 break;
818 case MLX5_VL_HW_0_7:
819 *max_vl_num = __IB_MAX_VL_0_7;
820 break;
821 case MLX5_VL_HW_0_14:
822 *max_vl_num = __IB_MAX_VL_0_14;
823 break;
824
825 default:
826 return -EINVAL;
827 }
828
829 return 0;
830}
831
832static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
833 struct ib_port_attr *props)
834{
835 struct mlx5_ib_dev *dev = to_mdev(ibdev);
836 struct mlx5_core_dev *mdev = dev->mdev;
837 struct mlx5_hca_vport_context *rep;
Saeed Mahameed046339e2016-04-22 00:33:03 +0300838 u16 max_mtu;
839 u16 oper_mtu;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300840 int err;
841 u8 ib_link_width_oper;
842 u8 vl_hw_cap;
843
844 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
845 if (!rep) {
846 err = -ENOMEM;
847 goto out;
848 }
849
850 memset(props, 0, sizeof(*props));
851
852 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
853 if (err)
854 goto out;
855
856 props->lid = rep->lid;
857 props->lmc = rep->lmc;
858 props->sm_lid = rep->sm_lid;
859 props->sm_sl = rep->sm_sl;
860 props->state = rep->vport_state;
861 props->phys_state = rep->port_physical_state;
862 props->port_cap_flags = rep->cap_mask1;
863 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
864 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
865 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
866 props->bad_pkey_cntr = rep->pkey_violation_counter;
867 props->qkey_viol_cntr = rep->qkey_violation_counter;
868 props->subnet_timeout = rep->subnet_timeout;
869 props->init_type_reply = rep->init_type_reply;
Eli Coheneff901d2016-03-11 22:58:42 +0200870 props->grh_required = rep->grh_required;
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300871
872 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
873 if (err)
874 goto out;
875
876 err = translate_active_width(ibdev, ib_link_width_oper,
877 &props->active_width);
878 if (err)
879 goto out;
Noa Osherovichd5beb7f2016-06-02 10:47:53 +0300880 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300881 if (err)
882 goto out;
883
Saeed Mahameedfacc9692015-06-11 14:47:27 +0300884 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300885
886 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
887
Saeed Mahameedfacc9692015-06-11 14:47:27 +0300888 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300889
890 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
891
892 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
893 if (err)
894 goto out;
895
896 err = translate_max_vl_num(ibdev, vl_hw_cap,
897 &props->max_vl_num);
898out:
899 kfree(rep);
Eli Cohene126ba92013-07-07 17:25:49 +0300900 return err;
901}
902
903int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
904 struct ib_port_attr *props)
905{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300906 switch (mlx5_get_vport_access_method(ibdev)) {
907 case MLX5_VPORT_ACCESS_METHOD_MAD:
908 return mlx5_query_mad_ifc_port(ibdev, port, props);
Eli Cohene126ba92013-07-07 17:25:49 +0300909
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300910 case MLX5_VPORT_ACCESS_METHOD_HCA:
911 return mlx5_query_hca_port(ibdev, port, props);
912
Achiad Shochat3f89a642015-12-23 18:47:21 +0200913 case MLX5_VPORT_ACCESS_METHOD_NIC:
914 return mlx5_query_port_roce(ibdev, port, props);
915
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300916 default:
Eli Cohene126ba92013-07-07 17:25:49 +0300917 return -EINVAL;
918 }
Eli Cohene126ba92013-07-07 17:25:49 +0300919}
920
921static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
922 union ib_gid *gid)
923{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300924 struct mlx5_ib_dev *dev = to_mdev(ibdev);
925 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300926
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300927 switch (mlx5_get_vport_access_method(ibdev)) {
928 case MLX5_VPORT_ACCESS_METHOD_MAD:
929 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +0300930
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300931 case MLX5_VPORT_ACCESS_METHOD_HCA:
932 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
Eli Cohene126ba92013-07-07 17:25:49 +0300933
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300934 default:
935 return -EINVAL;
936 }
Eli Cohene126ba92013-07-07 17:25:49 +0300937
Eli Cohene126ba92013-07-07 17:25:49 +0300938}
939
940static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
941 u16 *pkey)
942{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300943 struct mlx5_ib_dev *dev = to_mdev(ibdev);
944 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300945
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300946 switch (mlx5_get_vport_access_method(ibdev)) {
947 case MLX5_VPORT_ACCESS_METHOD_MAD:
948 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
Eli Cohene126ba92013-07-07 17:25:49 +0300949
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300950 case MLX5_VPORT_ACCESS_METHOD_HCA:
951 case MLX5_VPORT_ACCESS_METHOD_NIC:
952 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
953 pkey);
954 default:
955 return -EINVAL;
956 }
Eli Cohene126ba92013-07-07 17:25:49 +0300957}
958
Eli Cohene126ba92013-07-07 17:25:49 +0300959static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
960 struct ib_device_modify *props)
961{
962 struct mlx5_ib_dev *dev = to_mdev(ibdev);
963 struct mlx5_reg_node_desc in;
964 struct mlx5_reg_node_desc out;
965 int err;
966
967 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
968 return -EOPNOTSUPP;
969
970 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
971 return 0;
972
973 /*
974 * If possible, pass node desc to FW, so it can generate
975 * a 144 trap. If cmd fails, just ignore.
976 */
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700977 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300978 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
Eli Cohene126ba92013-07-07 17:25:49 +0300979 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
980 if (err)
981 return err;
982
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700983 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +0300984
985 return err;
986}
987
988static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
989 struct ib_port_modify *props)
990{
991 struct mlx5_ib_dev *dev = to_mdev(ibdev);
992 struct ib_port_attr attr;
993 u32 tmp;
994 int err;
995
996 mutex_lock(&dev->cap_mask_mutex);
997
998 err = mlx5_ib_query_port(ibdev, port, &attr);
999 if (err)
1000 goto out;
1001
1002 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1003 ~props->clr_port_cap_mask;
1004
Jack Morgenstein9603b612014-07-28 23:30:22 +03001005 err = mlx5_set_port_caps(dev->mdev, port, tmp);
Eli Cohene126ba92013-07-07 17:25:49 +03001006
1007out:
1008 mutex_unlock(&dev->cap_mask_mutex);
1009 return err;
1010}
1011
Eli Cohen30aa60b2017-01-03 23:55:27 +02001012static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1013{
1014 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1015 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1016}
1017
Eli Cohenb037c292017-01-03 23:55:26 +02001018static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1019 struct mlx5_ib_alloc_ucontext_req_v2 *req,
1020 u32 *num_sys_pages)
1021{
1022 int uars_per_sys_page;
1023 int bfregs_per_sys_page;
1024 int ref_bfregs = req->total_num_bfregs;
1025
1026 if (req->total_num_bfregs == 0)
1027 return -EINVAL;
1028
1029 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1030 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1031
1032 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1033 return -ENOMEM;
1034
1035 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1036 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1037 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1038 *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1039
1040 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1041 return -EINVAL;
1042
1043 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
1044 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1045 lib_uar_4k ? "yes" : "no", ref_bfregs,
1046 req->total_num_bfregs, *num_sys_pages);
1047
1048 return 0;
1049}
1050
1051static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1052{
1053 struct mlx5_bfreg_info *bfregi;
1054 int err;
1055 int i;
1056
1057 bfregi = &context->bfregi;
1058 for (i = 0; i < bfregi->num_sys_pages; i++) {
1059 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1060 if (err)
1061 goto error;
1062
1063 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1064 }
1065 return 0;
1066
1067error:
1068 for (--i; i >= 0; i--)
1069 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1070 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1071
1072 return err;
1073}
1074
1075static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1076{
1077 struct mlx5_bfreg_info *bfregi;
1078 int err;
1079 int i;
1080
1081 bfregi = &context->bfregi;
1082 for (i = 0; i < bfregi->num_sys_pages; i++) {
1083 err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1084 if (err) {
1085 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1086 return err;
1087 }
1088 }
1089 return 0;
1090}
1091
Eli Cohene126ba92013-07-07 17:25:49 +03001092static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1093 struct ib_udata *udata)
1094{
1095 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Matan Barakb368d7c2015-12-15 20:30:12 +02001096 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1097 struct mlx5_ib_alloc_ucontext_resp resp = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001098 struct mlx5_ib_ucontext *context;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001099 struct mlx5_bfreg_info *bfregi;
Eli Cohen78c0f982014-01-30 13:49:48 +02001100 int ver;
Eli Cohene126ba92013-07-07 17:25:49 +03001101 int err;
Jack Morgensteinf241e742014-07-28 23:30:23 +03001102 size_t reqlen;
Majd Dibbinya168a41c2016-01-28 17:51:47 +02001103 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1104 max_cqe_version);
Eli Cohenb037c292017-01-03 23:55:26 +02001105 bool lib_uar_4k;
Eli Cohene126ba92013-07-07 17:25:49 +03001106
1107 if (!dev->ib_active)
1108 return ERR_PTR(-EAGAIN);
1109
Haggai Abramovskydfbee852016-01-14 19:12:56 +02001110 if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
1111 return ERR_PTR(-EINVAL);
1112
Eli Cohen78c0f982014-01-30 13:49:48 +02001113 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
1114 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1115 ver = 0;
Majd Dibbinya168a41c2016-01-28 17:51:47 +02001116 else if (reqlen >= min_req_v2)
Eli Cohen78c0f982014-01-30 13:49:48 +02001117 ver = 2;
1118 else
1119 return ERR_PTR(-EINVAL);
1120
Matan Barakb368d7c2015-12-15 20:30:12 +02001121 err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
Eli Cohene126ba92013-07-07 17:25:49 +03001122 if (err)
1123 return ERR_PTR(err);
1124
Matan Barakb368d7c2015-12-15 20:30:12 +02001125 if (req.flags)
Eli Cohen78c0f982014-01-30 13:49:48 +02001126 return ERR_PTR(-EINVAL);
1127
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001128 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
Matan Barakb368d7c2015-12-15 20:30:12 +02001129 return ERR_PTR(-EOPNOTSUPP);
1130
Eli Cohen2f5ff262017-01-03 23:55:21 +02001131 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1132 MLX5_NON_FP_BFREGS_PER_UAR);
1133 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
Eli Cohene126ba92013-07-07 17:25:49 +03001134 return ERR_PTR(-EINVAL);
1135
Saeed Mahameed938fe832015-05-28 22:28:41 +03001136 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
Noa Osherovich2cc6ad52016-06-04 15:15:33 +03001137 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1138 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
Daniel Jurgensb47bd6e2016-10-25 18:36:24 +03001139 resp.cache_line_size = cache_line_size();
Saeed Mahameed938fe832015-05-28 22:28:41 +03001140 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1141 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1142 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1143 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1144 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001145 resp.cqe_version = min_t(__u8,
1146 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1147 req.max_cqe_version);
Eli Cohen30aa60b2017-01-03 23:55:27 +02001148 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1149 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1150 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1151 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
Matan Barakb368d7c2015-12-15 20:30:12 +02001152 resp.response_length = min(offsetof(typeof(resp), response_length) +
1153 sizeof(resp.response_length), udata->outlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001154
1155 context = kzalloc(sizeof(*context), GFP_KERNEL);
1156 if (!context)
1157 return ERR_PTR(-ENOMEM);
1158
Eli Cohen30aa60b2017-01-03 23:55:27 +02001159 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
Eli Cohen2f5ff262017-01-03 23:55:21 +02001160 bfregi = &context->bfregi;
Eli Cohenb037c292017-01-03 23:55:26 +02001161
1162 /* updates req->total_num_bfregs */
1163 err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
1164 if (err)
1165 goto out_ctx;
1166
Eli Cohen2f5ff262017-01-03 23:55:21 +02001167 mutex_init(&bfregi->lock);
Eli Cohenb037c292017-01-03 23:55:26 +02001168 bfregi->lib_uar_4k = lib_uar_4k;
1169 bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
1170 GFP_KERNEL);
1171 if (!bfregi->count) {
Eli Cohene126ba92013-07-07 17:25:49 +03001172 err = -ENOMEM;
1173 goto out_ctx;
1174 }
1175
Eli Cohenb037c292017-01-03 23:55:26 +02001176 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1177 sizeof(*bfregi->sys_pages),
1178 GFP_KERNEL);
1179 if (!bfregi->sys_pages) {
Eli Cohene126ba92013-07-07 17:25:49 +03001180 err = -ENOMEM;
Eli Cohenb037c292017-01-03 23:55:26 +02001181 goto out_count;
Eli Cohene126ba92013-07-07 17:25:49 +03001182 }
1183
Eli Cohenb037c292017-01-03 23:55:26 +02001184 err = allocate_uars(dev, context);
1185 if (err)
1186 goto out_sys_pages;
Eli Cohene126ba92013-07-07 17:25:49 +03001187
Haggai Eranb4cfe442014-12-11 17:04:26 +02001188#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1189 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1190#endif
1191
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001192 context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1193 if (!context->upd_xlt_page) {
1194 err = -ENOMEM;
1195 goto out_uars;
1196 }
1197 mutex_init(&context->upd_xlt_page_mutex);
1198
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001199 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1200 err = mlx5_core_alloc_transport_domain(dev->mdev,
1201 &context->tdn);
1202 if (err)
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001203 goto out_page;
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001204 }
1205
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001206 INIT_LIST_HEAD(&context->vma_private_list);
Eli Cohene126ba92013-07-07 17:25:49 +03001207 INIT_LIST_HEAD(&context->db_page_list);
1208 mutex_init(&context->db_page_mutex);
1209
Eli Cohen2f5ff262017-01-03 23:55:21 +02001210 resp.tot_bfregs = req.total_num_bfregs;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001211 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
Matan Barakb368d7c2015-12-15 20:30:12 +02001212
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001213 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1214 resp.response_length += sizeof(resp.cqe_version);
Matan Barakb368d7c2015-12-15 20:30:12 +02001215
Bodong Wang402ca532016-06-17 15:02:20 +03001216 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
Moni Shoua6ad279c52016-11-23 08:23:23 +02001217 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1218 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
Bodong Wang402ca532016-06-17 15:02:20 +03001219 resp.response_length += sizeof(resp.cmds_supp_uhw);
1220 }
1221
Noa Osherovichbc5c6ee2016-06-04 15:15:31 +03001222 /*
1223 * We don't want to expose information from the PCI bar that is located
1224 * after 4096 bytes, so if the arch only supports larger pages, let's
1225 * pretend we don't support reading the HCA's core clock. This is also
1226 * forced by mmap function.
1227 */
Eli Cohende8d6e02017-01-03 23:55:19 +02001228 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1229 if (PAGE_SIZE <= 4096) {
1230 resp.comp_mask |=
1231 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1232 resp.hca_core_clock_offset =
1233 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1234 }
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001235 resp.response_length += sizeof(resp.hca_core_clock_offset) +
Bodong Wang402ca532016-06-17 15:02:20 +03001236 sizeof(resp.reserved2);
Matan Barakb368d7c2015-12-15 20:30:12 +02001237 }
1238
Eli Cohen30aa60b2017-01-03 23:55:27 +02001239 if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1240 resp.response_length += sizeof(resp.log_uar_size);
1241
1242 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1243 resp.response_length += sizeof(resp.num_uars_per_page);
1244
Matan Barakb368d7c2015-12-15 20:30:12 +02001245 err = ib_copy_to_udata(udata, &resp, resp.response_length);
Eli Cohene126ba92013-07-07 17:25:49 +03001246 if (err)
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001247 goto out_td;
Eli Cohene126ba92013-07-07 17:25:49 +03001248
Eli Cohen2f5ff262017-01-03 23:55:21 +02001249 bfregi->ver = ver;
1250 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001251 context->cqe_version = resp.cqe_version;
Eli Cohen30aa60b2017-01-03 23:55:27 +02001252 context->lib_caps = req.lib_caps;
1253 print_lib_caps(dev, context->lib_caps);
Haggai Abramovskyf72300c2016-01-14 19:12:58 +02001254
Eli Cohene126ba92013-07-07 17:25:49 +03001255 return &context->ibucontext;
1256
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001257out_td:
1258 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1259 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1260
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001261out_page:
1262 free_page(context->upd_xlt_page);
1263
Eli Cohene126ba92013-07-07 17:25:49 +03001264out_uars:
Eli Cohenb037c292017-01-03 23:55:26 +02001265 deallocate_uars(dev, context);
1266
1267out_sys_pages:
1268 kfree(bfregi->sys_pages);
1269
Eli Cohene126ba92013-07-07 17:25:49 +03001270out_count:
Eli Cohen2f5ff262017-01-03 23:55:21 +02001271 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001272
Eli Cohene126ba92013-07-07 17:25:49 +03001273out_ctx:
1274 kfree(context);
Eli Cohenb037c292017-01-03 23:55:26 +02001275
Eli Cohene126ba92013-07-07 17:25:49 +03001276 return ERR_PTR(err);
1277}
1278
1279static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1280{
1281 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1282 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohenb037c292017-01-03 23:55:26 +02001283 struct mlx5_bfreg_info *bfregi;
Eli Cohene126ba92013-07-07 17:25:49 +03001284
Eli Cohenb037c292017-01-03 23:55:26 +02001285 bfregi = &context->bfregi;
majd@mellanox.com146d2f12016-01-14 19:13:02 +02001286 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1287 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1288
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001289 free_page(context->upd_xlt_page);
Eli Cohenb037c292017-01-03 23:55:26 +02001290 deallocate_uars(dev, context);
1291 kfree(bfregi->sys_pages);
Eli Cohen2f5ff262017-01-03 23:55:21 +02001292 kfree(bfregi->count);
Eli Cohene126ba92013-07-07 17:25:49 +03001293 kfree(context);
1294
1295 return 0;
1296}
1297
Eli Cohenb037c292017-01-03 23:55:26 +02001298static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
1299 struct mlx5_bfreg_info *bfregi,
1300 int idx)
Eli Cohene126ba92013-07-07 17:25:49 +03001301{
Eli Cohenb037c292017-01-03 23:55:26 +02001302 int fw_uars_per_page;
1303
1304 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
1305
1306 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
1307 bfregi->sys_pages[idx] / fw_uars_per_page;
Eli Cohene126ba92013-07-07 17:25:49 +03001308}
1309
1310static int get_command(unsigned long offset)
1311{
1312 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1313}
1314
1315static int get_arg(unsigned long offset)
1316{
1317 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1318}
1319
1320static int get_index(unsigned long offset)
1321{
1322 return get_arg(offset);
1323}
1324
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001325static void mlx5_ib_vma_open(struct vm_area_struct *area)
1326{
1327 /* vma_open is called when a new VMA is created on top of our VMA. This
1328 * is done through either mremap flow or split_vma (usually due to
1329 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1330 * as this VMA is strongly hardware related. Therefore we set the
1331 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1332 * calling us again and trying to do incorrect actions. We assume that
1333 * the original VMA size is exactly a single page, and therefore all
1334 * "splitting" operation will not happen to it.
1335 */
1336 area->vm_ops = NULL;
1337}
1338
1339static void mlx5_ib_vma_close(struct vm_area_struct *area)
1340{
1341 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1342
1343 /* It's guaranteed that all VMAs opened on a FD are closed before the
1344 * file itself is closed, therefore no sync is needed with the regular
1345 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1346 * However need a sync with accessing the vma as part of
1347 * mlx5_ib_disassociate_ucontext.
1348 * The close operation is usually called under mm->mmap_sem except when
1349 * process is exiting.
1350 * The exiting case is handled explicitly as part of
1351 * mlx5_ib_disassociate_ucontext.
1352 */
1353 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1354
1355 /* setting the vma context pointer to null in the mlx5_ib driver's
1356 * private data, to protect a race condition in
1357 * mlx5_ib_disassociate_ucontext().
1358 */
1359 mlx5_ib_vma_priv_data->vma = NULL;
1360 list_del(&mlx5_ib_vma_priv_data->list);
1361 kfree(mlx5_ib_vma_priv_data);
1362}
1363
1364static const struct vm_operations_struct mlx5_ib_vm_ops = {
1365 .open = mlx5_ib_vma_open,
1366 .close = mlx5_ib_vma_close
1367};
1368
1369static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1370 struct mlx5_ib_ucontext *ctx)
1371{
1372 struct mlx5_ib_vma_private_data *vma_prv;
1373 struct list_head *vma_head = &ctx->vma_private_list;
1374
1375 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1376 if (!vma_prv)
1377 return -ENOMEM;
1378
1379 vma_prv->vma = vma;
1380 vma->vm_private_data = vma_prv;
1381 vma->vm_ops = &mlx5_ib_vm_ops;
1382
1383 list_add(&vma_prv->list, vma_head);
1384
1385 return 0;
1386}
1387
1388static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1389{
1390 int ret;
1391 struct vm_area_struct *vma;
1392 struct mlx5_ib_vma_private_data *vma_private, *n;
1393 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1394 struct task_struct *owning_process = NULL;
1395 struct mm_struct *owning_mm = NULL;
1396
1397 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1398 if (!owning_process)
1399 return;
1400
1401 owning_mm = get_task_mm(owning_process);
1402 if (!owning_mm) {
1403 pr_info("no mm, disassociate ucontext is pending task termination\n");
1404 while (1) {
1405 put_task_struct(owning_process);
1406 usleep_range(1000, 2000);
1407 owning_process = get_pid_task(ibcontext->tgid,
1408 PIDTYPE_PID);
1409 if (!owning_process ||
1410 owning_process->state == TASK_DEAD) {
1411 pr_info("disassociate ucontext done, task was terminated\n");
1412 /* in case task was dead need to release the
1413 * task struct.
1414 */
1415 if (owning_process)
1416 put_task_struct(owning_process);
1417 return;
1418 }
1419 }
1420 }
1421
1422 /* need to protect from a race on closing the vma as part of
1423 * mlx5_ib_vma_close.
1424 */
1425 down_read(&owning_mm->mmap_sem);
1426 list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1427 list) {
1428 vma = vma_private->vma;
1429 ret = zap_vma_ptes(vma, vma->vm_start,
1430 PAGE_SIZE);
1431 WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
1432 /* context going to be destroyed, should
1433 * not access ops any more.
1434 */
1435 vma->vm_ops = NULL;
1436 list_del(&vma_private->list);
1437 kfree(vma_private);
1438 }
1439 up_read(&owning_mm->mmap_sem);
1440 mmput(owning_mm);
1441 put_task_struct(owning_process);
1442}
1443
Guy Levi37aa5c32016-04-27 16:49:50 +03001444static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1445{
1446 switch (cmd) {
1447 case MLX5_IB_MMAP_WC_PAGE:
1448 return "WC";
1449 case MLX5_IB_MMAP_REGULAR_PAGE:
1450 return "best effort WC";
1451 case MLX5_IB_MMAP_NC_PAGE:
1452 return "NC";
1453 default:
1454 return NULL;
1455 }
1456}
1457
1458static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001459 struct vm_area_struct *vma,
1460 struct mlx5_ib_ucontext *context)
Guy Levi37aa5c32016-04-27 16:49:50 +03001461{
Eli Cohen2f5ff262017-01-03 23:55:21 +02001462 struct mlx5_bfreg_info *bfregi = &context->bfregi;
Guy Levi37aa5c32016-04-27 16:49:50 +03001463 int err;
1464 unsigned long idx;
1465 phys_addr_t pfn, pa;
1466 pgprot_t prot;
Eli Cohenb037c292017-01-03 23:55:26 +02001467 int uars_per_page;
1468
1469 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1470 return -EINVAL;
1471
1472 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
1473 idx = get_index(vma->vm_pgoff);
1474 if (idx % uars_per_page ||
1475 idx * uars_per_page >= bfregi->num_sys_pages) {
1476 mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
1477 return -EINVAL;
1478 }
Guy Levi37aa5c32016-04-27 16:49:50 +03001479
1480 switch (cmd) {
1481 case MLX5_IB_MMAP_WC_PAGE:
1482/* Some architectures don't support WC memory */
1483#if defined(CONFIG_X86)
1484 if (!pat_enabled())
1485 return -EPERM;
1486#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
1487 return -EPERM;
1488#endif
1489 /* fall through */
1490 case MLX5_IB_MMAP_REGULAR_PAGE:
1491 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
1492 prot = pgprot_writecombine(vma->vm_page_prot);
1493 break;
1494 case MLX5_IB_MMAP_NC_PAGE:
1495 prot = pgprot_noncached(vma->vm_page_prot);
1496 break;
1497 default:
1498 return -EINVAL;
1499 }
1500
Eli Cohenb037c292017-01-03 23:55:26 +02001501 pfn = uar_index2pfn(dev, bfregi, idx);
Guy Levi37aa5c32016-04-27 16:49:50 +03001502 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1503
1504 vma->vm_page_prot = prot;
1505 err = io_remap_pfn_range(vma, vma->vm_start, pfn,
1506 PAGE_SIZE, vma->vm_page_prot);
1507 if (err) {
1508 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
1509 err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
1510 return -EAGAIN;
1511 }
1512
1513 pa = pfn << PAGE_SHIFT;
1514 mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
1515 vma->vm_start, &pa);
1516
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001517 return mlx5_ib_set_vma_data(vma, context);
Guy Levi37aa5c32016-04-27 16:49:50 +03001518}
1519
Eli Cohene126ba92013-07-07 17:25:49 +03001520static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
1521{
1522 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1523 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001524 unsigned long command;
Eli Cohene126ba92013-07-07 17:25:49 +03001525 phys_addr_t pfn;
1526
1527 command = get_command(vma->vm_pgoff);
1528 switch (command) {
Guy Levi37aa5c32016-04-27 16:49:50 +03001529 case MLX5_IB_MMAP_WC_PAGE:
1530 case MLX5_IB_MMAP_NC_PAGE:
Eli Cohene126ba92013-07-07 17:25:49 +03001531 case MLX5_IB_MMAP_REGULAR_PAGE:
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03001532 return uar_mmap(dev, command, vma, context);
Eli Cohene126ba92013-07-07 17:25:49 +03001533
1534 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1535 return -ENOSYS;
1536
Matan Barakd69e3bc2015-12-15 20:30:13 +02001537 case MLX5_IB_MMAP_CORE_CLOCK:
Matan Barakd69e3bc2015-12-15 20:30:13 +02001538 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1539 return -EINVAL;
1540
Matan Barak6cbac1e2016-04-14 16:52:10 +03001541 if (vma->vm_flags & VM_WRITE)
Matan Barakd69e3bc2015-12-15 20:30:13 +02001542 return -EPERM;
1543
1544 /* Don't expose to user-space information it shouldn't have */
1545 if (PAGE_SIZE > 4096)
1546 return -EOPNOTSUPP;
1547
1548 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1549 pfn = (dev->mdev->iseg_base +
1550 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1551 PAGE_SHIFT;
1552 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1553 PAGE_SIZE, vma->vm_page_prot))
1554 return -EAGAIN;
1555
1556 mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
1557 vma->vm_start,
1558 (unsigned long long)pfn << PAGE_SHIFT);
1559 break;
Matan Barakd69e3bc2015-12-15 20:30:13 +02001560
Eli Cohene126ba92013-07-07 17:25:49 +03001561 default:
1562 return -EINVAL;
1563 }
1564
1565 return 0;
1566}
1567
Eli Cohene126ba92013-07-07 17:25:49 +03001568static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1569 struct ib_ucontext *context,
1570 struct ib_udata *udata)
1571{
1572 struct mlx5_ib_alloc_pd_resp resp;
1573 struct mlx5_ib_pd *pd;
1574 int err;
1575
1576 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1577 if (!pd)
1578 return ERR_PTR(-ENOMEM);
1579
Jack Morgenstein9603b612014-07-28 23:30:22 +03001580 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001581 if (err) {
1582 kfree(pd);
1583 return ERR_PTR(err);
1584 }
1585
1586 if (context) {
1587 resp.pdn = pd->pdn;
1588 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001589 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001590 kfree(pd);
1591 return ERR_PTR(-EFAULT);
1592 }
Eli Cohene126ba92013-07-07 17:25:49 +03001593 }
1594
1595 return &pd->ibpd;
1596}
1597
1598static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1599{
1600 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1601 struct mlx5_ib_pd *mpd = to_mpd(pd);
1602
Jack Morgenstein9603b612014-07-28 23:30:22 +03001603 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
Eli Cohene126ba92013-07-07 17:25:49 +03001604 kfree(mpd);
1605
1606 return 0;
1607}
1608
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001609enum {
1610 MATCH_CRITERIA_ENABLE_OUTER_BIT,
1611 MATCH_CRITERIA_ENABLE_MISC_BIT,
1612 MATCH_CRITERIA_ENABLE_INNER_BIT
1613};
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001614
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001615#define HEADER_IS_ZERO(match_criteria, headers) \
1616 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1617 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1618
1619static u8 get_match_criteria_enable(u32 *match_criteria)
1620{
1621 u8 match_criteria_enable;
1622
1623 match_criteria_enable =
1624 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1625 MATCH_CRITERIA_ENABLE_OUTER_BIT;
1626 match_criteria_enable |=
1627 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1628 MATCH_CRITERIA_ENABLE_MISC_BIT;
1629 match_criteria_enable |=
1630 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1631 MATCH_CRITERIA_ENABLE_INNER_BIT;
1632
1633 return match_criteria_enable;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001634}
1635
Maor Gottliebca0d4752016-08-30 16:58:35 +03001636static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
1637{
1638 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
1639 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
1640}
1641
Moses Reuben2d1e6972016-11-14 19:04:52 +02001642static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
1643 bool inner)
1644{
1645 if (inner) {
1646 MLX5_SET(fte_match_set_misc,
1647 misc_c, inner_ipv6_flow_label, mask);
1648 MLX5_SET(fte_match_set_misc,
1649 misc_v, inner_ipv6_flow_label, val);
1650 } else {
1651 MLX5_SET(fte_match_set_misc,
1652 misc_c, outer_ipv6_flow_label, mask);
1653 MLX5_SET(fte_match_set_misc,
1654 misc_v, outer_ipv6_flow_label, val);
1655 }
1656}
1657
Maor Gottliebca0d4752016-08-30 16:58:35 +03001658static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
1659{
1660 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
1661 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
1662 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
1663 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
1664}
1665
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001666#define LAST_ETH_FIELD vlan_tag
1667#define LAST_IB_FIELD sl
Maor Gottliebca0d4752016-08-30 16:58:35 +03001668#define LAST_IPV4_FIELD tos
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001669#define LAST_IPV6_FIELD traffic_class
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001670#define LAST_TCP_UDP_FIELD src_port
Moses Reubenffb30d82016-11-14 19:04:50 +02001671#define LAST_TUNNEL_FIELD tunnel_id
Moses Reuben2ac693f2017-01-18 14:59:50 +02001672#define LAST_FLOW_TAG_FIELD tag_id
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001673
1674/* Field is the last supported field */
1675#define FIELDS_NOT_SUPPORTED(filter, field)\
1676 memchr_inv((void *)&filter.field +\
1677 sizeof(filter.field), 0,\
1678 sizeof(filter) -\
1679 offsetof(typeof(filter), field) -\
1680 sizeof(filter.field))
1681
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001682static int parse_flow_attr(u32 *match_c, u32 *match_v,
Moses Reuben2ac693f2017-01-18 14:59:50 +02001683 const union ib_flow_spec *ib_spec, u32 *tag_id)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001684{
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001685 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
1686 misc_parameters);
1687 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
1688 misc_parameters);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001689 void *headers_c;
1690 void *headers_v;
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001691
Moses Reuben2d1e6972016-11-14 19:04:52 +02001692 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
1693 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1694 inner_headers);
1695 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1696 inner_headers);
1697 } else {
1698 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1699 outer_headers);
1700 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1701 outer_headers);
1702 }
1703
1704 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001705 case IB_FLOW_SPEC_ETH:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001706 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1707 return -ENOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001708
Moses Reuben2d1e6972016-11-14 19:04:52 +02001709 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001710 dmac_47_16),
1711 ib_spec->eth.mask.dst_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001712 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001713 dmac_47_16),
1714 ib_spec->eth.val.dst_mac);
1715
Moses Reuben2d1e6972016-11-14 19:04:52 +02001716 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottliebee3da802016-09-12 19:16:24 +03001717 smac_47_16),
1718 ib_spec->eth.mask.src_mac);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001719 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottliebee3da802016-09-12 19:16:24 +03001720 smac_47_16),
1721 ib_spec->eth.val.src_mac);
1722
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001723 if (ib_spec->eth.mask.vlan_tag) {
Moses Reuben2d1e6972016-11-14 19:04:52 +02001724 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001725 vlan_tag, 1);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001726 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001727 vlan_tag, 1);
1728
Moses Reuben2d1e6972016-11-14 19:04:52 +02001729 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001730 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001731 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001732 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
1733
Moses Reuben2d1e6972016-11-14 19:04:52 +02001734 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001735 first_cfi,
1736 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001737 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001738 first_cfi,
1739 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
1740
Moses Reuben2d1e6972016-11-14 19:04:52 +02001741 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001742 first_prio,
1743 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001744 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001745 first_prio,
1746 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
1747 }
Moses Reuben2d1e6972016-11-14 19:04:52 +02001748 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001749 ethertype, ntohs(ib_spec->eth.mask.ether_type));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001750 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001751 ethertype, ntohs(ib_spec->eth.val.ether_type));
1752 break;
1753 case IB_FLOW_SPEC_IPV4:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001754 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1755 return -ENOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001756
Moses Reuben2d1e6972016-11-14 19:04:52 +02001757 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001758 ethertype, 0xffff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001759 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001760 ethertype, ETH_P_IP);
1761
Moses Reuben2d1e6972016-11-14 19:04:52 +02001762 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001763 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1764 &ib_spec->ipv4.mask.src_ip,
1765 sizeof(ib_spec->ipv4.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001766 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001767 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1768 &ib_spec->ipv4.val.src_ip,
1769 sizeof(ib_spec->ipv4.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001770 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001771 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1772 &ib_spec->ipv4.mask.dst_ip,
1773 sizeof(ib_spec->ipv4.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001774 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001775 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1776 &ib_spec->ipv4.val.dst_ip,
1777 sizeof(ib_spec->ipv4.val.dst_ip));
Maor Gottliebca0d4752016-08-30 16:58:35 +03001778
Moses Reuben2d1e6972016-11-14 19:04:52 +02001779 set_tos(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03001780 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
1781
Moses Reuben2d1e6972016-11-14 19:04:52 +02001782 set_proto(headers_c, headers_v,
Maor Gottliebca0d4752016-08-30 16:58:35 +03001783 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001784 break;
Maor Gottlieb026bae02016-06-17 15:14:51 +03001785 case IB_FLOW_SPEC_IPV6:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001786 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
1787 return -ENOTSUPP;
Maor Gottlieb026bae02016-06-17 15:14:51 +03001788
Moses Reuben2d1e6972016-11-14 19:04:52 +02001789 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001790 ethertype, 0xffff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001791 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001792 ethertype, ETH_P_IPV6);
1793
Moses Reuben2d1e6972016-11-14 19:04:52 +02001794 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001795 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1796 &ib_spec->ipv6.mask.src_ip,
1797 sizeof(ib_spec->ipv6.mask.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001798 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001799 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1800 &ib_spec->ipv6.val.src_ip,
1801 sizeof(ib_spec->ipv6.val.src_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001802 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001803 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1804 &ib_spec->ipv6.mask.dst_ip,
1805 sizeof(ib_spec->ipv6.mask.dst_ip));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001806 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
Maor Gottlieb026bae02016-06-17 15:14:51 +03001807 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1808 &ib_spec->ipv6.val.dst_ip,
1809 sizeof(ib_spec->ipv6.val.dst_ip));
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001810
Moses Reuben2d1e6972016-11-14 19:04:52 +02001811 set_tos(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001812 ib_spec->ipv6.mask.traffic_class,
1813 ib_spec->ipv6.val.traffic_class);
1814
Moses Reuben2d1e6972016-11-14 19:04:52 +02001815 set_proto(headers_c, headers_v,
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03001816 ib_spec->ipv6.mask.next_hdr,
1817 ib_spec->ipv6.val.next_hdr);
1818
Moses Reuben2d1e6972016-11-14 19:04:52 +02001819 set_flow_label(misc_params_c, misc_params_v,
1820 ntohl(ib_spec->ipv6.mask.flow_label),
1821 ntohl(ib_spec->ipv6.val.flow_label),
1822 ib_spec->type & IB_FLOW_SPEC_INNER);
1823
Maor Gottlieb026bae02016-06-17 15:14:51 +03001824 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001825 case IB_FLOW_SPEC_TCP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001826 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1827 LAST_TCP_UDP_FIELD))
1828 return -ENOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001829
Moses Reuben2d1e6972016-11-14 19:04:52 +02001830 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001831 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001832 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001833 IPPROTO_TCP);
1834
Moses Reuben2d1e6972016-11-14 19:04:52 +02001835 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001836 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001837 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001838 ntohs(ib_spec->tcp_udp.val.src_port));
1839
Moses Reuben2d1e6972016-11-14 19:04:52 +02001840 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001841 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001842 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001843 ntohs(ib_spec->tcp_udp.val.dst_port));
1844 break;
1845 case IB_FLOW_SPEC_UDP:
Maor Gottliebc47ac6a2016-08-30 16:58:31 +03001846 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1847 LAST_TCP_UDP_FIELD))
1848 return -ENOTSUPP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001849
Moses Reuben2d1e6972016-11-14 19:04:52 +02001850 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001851 0xff);
Moses Reuben2d1e6972016-11-14 19:04:52 +02001852 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001853 IPPROTO_UDP);
1854
Moses Reuben2d1e6972016-11-14 19:04:52 +02001855 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001856 ntohs(ib_spec->tcp_udp.mask.src_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001857 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001858 ntohs(ib_spec->tcp_udp.val.src_port));
1859
Moses Reuben2d1e6972016-11-14 19:04:52 +02001860 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001861 ntohs(ib_spec->tcp_udp.mask.dst_port));
Moses Reuben2d1e6972016-11-14 19:04:52 +02001862 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001863 ntohs(ib_spec->tcp_udp.val.dst_port));
1864 break;
Moses Reubenffb30d82016-11-14 19:04:50 +02001865 case IB_FLOW_SPEC_VXLAN_TUNNEL:
1866 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
1867 LAST_TUNNEL_FIELD))
1868 return -ENOTSUPP;
1869
1870 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
1871 ntohl(ib_spec->tunnel.mask.tunnel_id));
1872 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
1873 ntohl(ib_spec->tunnel.val.tunnel_id));
1874 break;
Moses Reuben2ac693f2017-01-18 14:59:50 +02001875 case IB_FLOW_SPEC_ACTION_TAG:
1876 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
1877 LAST_FLOW_TAG_FIELD))
1878 return -EOPNOTSUPP;
1879 if (ib_spec->flow_tag.tag_id >= BIT(24))
1880 return -EINVAL;
1881
1882 *tag_id = ib_spec->flow_tag.tag_id;
1883 break;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001884 default:
1885 return -EINVAL;
1886 }
1887
1888 return 0;
1889}
1890
1891/* If a flow could catch both multicast and unicast packets,
1892 * it won't fall into the multicast flow steering table and this rule
1893 * could steal other multicast packets.
1894 */
1895static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
1896{
1897 struct ib_flow_spec_eth *eth_spec;
1898
1899 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
1900 ib_attr->size < sizeof(struct ib_flow_attr) +
1901 sizeof(struct ib_flow_spec_eth) ||
1902 ib_attr->num_of_specs < 1)
1903 return false;
1904
1905 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
1906 if (eth_spec->type != IB_FLOW_SPEC_ETH ||
1907 eth_spec->size != sizeof(*eth_spec))
1908 return false;
1909
1910 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
1911 is_multicast_ether_addr(eth_spec->val.dst_mac);
1912}
1913
Maor Gottliebdd063d02016-08-28 14:16:32 +03001914static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001915{
1916 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1917 bool has_ipv4_spec = false;
1918 bool eth_type_ipv4 = true;
1919 unsigned int spec_index;
1920
1921 /* Validate that ethertype is correct */
1922 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1923 if (ib_spec->type == IB_FLOW_SPEC_ETH &&
1924 ib_spec->eth.mask.ether_type) {
1925 if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
1926 ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
1927 eth_type_ipv4 = false;
1928 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
1929 has_ipv4_spec = true;
1930 }
1931 ib_spec = (void *)ib_spec + ib_spec->size;
1932 }
1933 return !has_ipv4_spec || eth_type_ipv4;
1934}
1935
1936static void put_flow_table(struct mlx5_ib_dev *dev,
1937 struct mlx5_ib_flow_prio *prio, bool ft_added)
1938{
1939 prio->refcount -= !!ft_added;
1940 if (!prio->refcount) {
1941 mlx5_destroy_flow_table(prio->flow_table);
1942 prio->flow_table = NULL;
1943 }
1944}
1945
1946static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
1947{
1948 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
1949 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
1950 struct mlx5_ib_flow_handler,
1951 ibflow);
1952 struct mlx5_ib_flow_handler *iter, *tmp;
1953
1954 mutex_lock(&dev->flow_db.lock);
1955
1956 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
Mark Bloch74491de2016-08-31 11:24:25 +00001957 mlx5_del_flow_rules(iter->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03001958 put_flow_table(dev, iter->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001959 list_del(&iter->list);
1960 kfree(iter);
1961 }
1962
Mark Bloch74491de2016-08-31 11:24:25 +00001963 mlx5_del_flow_rules(handler->rule);
Maor Gottlieb5497adc2016-08-28 14:16:31 +03001964 put_flow_table(dev, handler->prio, true);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001965 mutex_unlock(&dev->flow_db.lock);
1966
1967 kfree(handler);
1968
1969 return 0;
1970}
1971
Maor Gottlieb35d190112016-03-07 18:51:47 +02001972static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
1973{
1974 priority *= 2;
1975 if (!dont_trap)
1976 priority++;
1977 return priority;
1978}
1979
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03001980enum flow_table_type {
1981 MLX5_IB_FT_RX,
1982 MLX5_IB_FT_TX
1983};
1984
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001985#define MLX5_FS_MAX_TYPES 10
1986#define MLX5_FS_MAX_ENTRIES 32000UL
1987static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03001988 struct ib_flow_attr *flow_attr,
1989 enum flow_table_type ft_type)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001990{
Maor Gottlieb35d190112016-03-07 18:51:47 +02001991 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02001992 struct mlx5_flow_namespace *ns = NULL;
1993 struct mlx5_ib_flow_prio *prio;
1994 struct mlx5_flow_table *ft;
1995 int num_entries;
1996 int num_groups;
1997 int priority;
1998 int err = 0;
1999
2000 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02002001 if (flow_is_multicast_only(flow_attr) &&
2002 !dont_trap)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002003 priority = MLX5_IB_FLOW_MCAST_PRIO;
2004 else
Maor Gottlieb35d190112016-03-07 18:51:47 +02002005 priority = ib_prio_to_core_prio(flow_attr->priority,
2006 dont_trap);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002007 ns = mlx5_get_flow_namespace(dev->mdev,
2008 MLX5_FLOW_NAMESPACE_BYPASS);
2009 num_entries = MLX5_FS_MAX_ENTRIES;
2010 num_groups = MLX5_FS_MAX_TYPES;
2011 prio = &dev->flow_db.prios[priority];
2012 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2013 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2014 ns = mlx5_get_flow_namespace(dev->mdev,
2015 MLX5_FLOW_NAMESPACE_LEFTOVERS);
2016 build_leftovers_ft_param(&priority,
2017 &num_entries,
2018 &num_groups);
2019 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002020 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2021 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
2022 allow_sniffer_and_nic_rx_shared_tir))
2023 return ERR_PTR(-ENOTSUPP);
2024
2025 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
2026 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
2027 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
2028
2029 prio = &dev->flow_db.sniffer[ft_type];
2030 priority = 0;
2031 num_entries = 1;
2032 num_groups = 1;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002033 }
2034
2035 if (!ns)
2036 return ERR_PTR(-ENOTSUPP);
2037
2038 ft = prio->flow_table;
2039 if (!ft) {
2040 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
2041 num_entries,
Maor Gottliebd63cd282016-04-29 01:36:35 +03002042 num_groups,
Hadar Hen Zionc9f1b072016-11-07 15:14:44 +02002043 0, 0);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002044
2045 if (!IS_ERR(ft)) {
2046 prio->refcount = 0;
2047 prio->flow_table = ft;
2048 } else {
2049 err = PTR_ERR(ft);
2050 }
2051 }
2052
2053 return err ? ERR_PTR(err) : prio;
2054}
2055
2056static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
2057 struct mlx5_ib_flow_prio *ft_prio,
Maor Gottliebdd063d02016-08-28 14:16:32 +03002058 const struct ib_flow_attr *flow_attr,
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002059 struct mlx5_flow_destination *dst)
2060{
2061 struct mlx5_flow_table *ft = ft_prio->flow_table;
2062 struct mlx5_ib_flow_handler *handler;
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002063 struct mlx5_flow_act flow_act = {0};
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002064 struct mlx5_flow_spec *spec;
Maor Gottliebdd063d02016-08-28 14:16:32 +03002065 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002066 unsigned int spec_index;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002067 u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002068 int err = 0;
2069
2070 if (!is_valid_attr(flow_attr))
2071 return ERR_PTR(-EINVAL);
2072
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002073 spec = mlx5_vzalloc(sizeof(*spec));
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002074 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002075 if (!handler || !spec) {
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002076 err = -ENOMEM;
2077 goto free;
2078 }
2079
2080 INIT_LIST_HEAD(&handler->list);
2081
2082 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002083 err = parse_flow_attr(spec->match_criteria,
Moses Reuben2ac693f2017-01-18 14:59:50 +02002084 spec->match_value, ib_flow, &flow_tag);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002085 if (err < 0)
2086 goto free;
2087
2088 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
2089 }
2090
Maor Gottlieb466fa6d2016-08-30 16:58:36 +03002091 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002092 flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
Maor Gottlieb35d190112016-03-07 18:51:47 +02002093 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
Moses Reuben2ac693f2017-01-18 14:59:50 +02002094
2095 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
2096 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2097 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
2098 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
2099 flow_tag, flow_attr->type);
2100 err = -EINVAL;
2101 goto free;
2102 }
2103 flow_act.flow_tag = flow_tag;
Mark Bloch74491de2016-08-31 11:24:25 +00002104 handler->rule = mlx5_add_flow_rules(ft, spec,
Hadar Hen Zion66958ed2016-11-07 15:14:45 +02002105 &flow_act,
2106 dst, 1);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002107
2108 if (IS_ERR(handler->rule)) {
2109 err = PTR_ERR(handler->rule);
2110 goto free;
2111 }
2112
Maor Gottliebd9d49802016-08-28 14:16:33 +03002113 ft_prio->refcount++;
Maor Gottlieb5497adc2016-08-28 14:16:31 +03002114 handler->prio = ft_prio;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002115
2116 ft_prio->flow_table = ft;
2117free:
2118 if (err)
2119 kfree(handler);
Maor Gottliebc5bb1732016-07-04 17:23:05 +03002120 kvfree(spec);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002121 return err ? ERR_PTR(err) : handler;
2122}
2123
Maor Gottlieb35d190112016-03-07 18:51:47 +02002124static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
2125 struct mlx5_ib_flow_prio *ft_prio,
2126 struct ib_flow_attr *flow_attr,
2127 struct mlx5_flow_destination *dst)
2128{
2129 struct mlx5_ib_flow_handler *handler_dst = NULL;
2130 struct mlx5_ib_flow_handler *handler = NULL;
2131
2132 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
2133 if (!IS_ERR(handler)) {
2134 handler_dst = create_flow_rule(dev, ft_prio,
2135 flow_attr, dst);
2136 if (IS_ERR(handler_dst)) {
Mark Bloch74491de2016-08-31 11:24:25 +00002137 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03002138 ft_prio->refcount--;
Maor Gottlieb35d190112016-03-07 18:51:47 +02002139 kfree(handler);
2140 handler = handler_dst;
2141 } else {
2142 list_add(&handler_dst->list, &handler->list);
2143 }
2144 }
2145
2146 return handler;
2147}
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002148enum {
2149 LEFTOVERS_MC,
2150 LEFTOVERS_UC,
2151};
2152
2153static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
2154 struct mlx5_ib_flow_prio *ft_prio,
2155 struct ib_flow_attr *flow_attr,
2156 struct mlx5_flow_destination *dst)
2157{
2158 struct mlx5_ib_flow_handler *handler_ucast = NULL;
2159 struct mlx5_ib_flow_handler *handler = NULL;
2160
2161 static struct {
2162 struct ib_flow_attr flow_attr;
2163 struct ib_flow_spec_eth eth_flow;
2164 } leftovers_specs[] = {
2165 [LEFTOVERS_MC] = {
2166 .flow_attr = {
2167 .num_of_specs = 1,
2168 .size = sizeof(leftovers_specs[0])
2169 },
2170 .eth_flow = {
2171 .type = IB_FLOW_SPEC_ETH,
2172 .size = sizeof(struct ib_flow_spec_eth),
2173 .mask = {.dst_mac = {0x1} },
2174 .val = {.dst_mac = {0x1} }
2175 }
2176 },
2177 [LEFTOVERS_UC] = {
2178 .flow_attr = {
2179 .num_of_specs = 1,
2180 .size = sizeof(leftovers_specs[0])
2181 },
2182 .eth_flow = {
2183 .type = IB_FLOW_SPEC_ETH,
2184 .size = sizeof(struct ib_flow_spec_eth),
2185 .mask = {.dst_mac = {0x1} },
2186 .val = {.dst_mac = {} }
2187 }
2188 }
2189 };
2190
2191 handler = create_flow_rule(dev, ft_prio,
2192 &leftovers_specs[LEFTOVERS_MC].flow_attr,
2193 dst);
2194 if (!IS_ERR(handler) &&
2195 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
2196 handler_ucast = create_flow_rule(dev, ft_prio,
2197 &leftovers_specs[LEFTOVERS_UC].flow_attr,
2198 dst);
2199 if (IS_ERR(handler_ucast)) {
Mark Bloch74491de2016-08-31 11:24:25 +00002200 mlx5_del_flow_rules(handler->rule);
Maor Gottliebd9d49802016-08-28 14:16:33 +03002201 ft_prio->refcount--;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002202 kfree(handler);
2203 handler = handler_ucast;
2204 } else {
2205 list_add(&handler_ucast->list, &handler->list);
2206 }
2207 }
2208
2209 return handler;
2210}
2211
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002212static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
2213 struct mlx5_ib_flow_prio *ft_rx,
2214 struct mlx5_ib_flow_prio *ft_tx,
2215 struct mlx5_flow_destination *dst)
2216{
2217 struct mlx5_ib_flow_handler *handler_rx;
2218 struct mlx5_ib_flow_handler *handler_tx;
2219 int err;
2220 static const struct ib_flow_attr flow_attr = {
2221 .num_of_specs = 0,
2222 .size = sizeof(flow_attr)
2223 };
2224
2225 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
2226 if (IS_ERR(handler_rx)) {
2227 err = PTR_ERR(handler_rx);
2228 goto err;
2229 }
2230
2231 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
2232 if (IS_ERR(handler_tx)) {
2233 err = PTR_ERR(handler_tx);
2234 goto err_tx;
2235 }
2236
2237 list_add(&handler_tx->list, &handler_rx->list);
2238
2239 return handler_rx;
2240
2241err_tx:
Mark Bloch74491de2016-08-31 11:24:25 +00002242 mlx5_del_flow_rules(handler_rx->rule);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002243 ft_rx->refcount--;
2244 kfree(handler_rx);
2245err:
2246 return ERR_PTR(err);
2247}
2248
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002249static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
2250 struct ib_flow_attr *flow_attr,
2251 int domain)
2252{
2253 struct mlx5_ib_dev *dev = to_mdev(qp->device);
Yishai Hadasd9f88e52016-08-28 10:58:37 +03002254 struct mlx5_ib_qp *mqp = to_mqp(qp);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002255 struct mlx5_ib_flow_handler *handler = NULL;
2256 struct mlx5_flow_destination *dst = NULL;
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002257 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002258 struct mlx5_ib_flow_prio *ft_prio;
2259 int err;
2260
2261 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
2262 return ERR_PTR(-ENOSPC);
2263
2264 if (domain != IB_FLOW_DOMAIN_USER ||
2265 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
Maor Gottlieb35d190112016-03-07 18:51:47 +02002266 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002267 return ERR_PTR(-EINVAL);
2268
2269 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
2270 if (!dst)
2271 return ERR_PTR(-ENOMEM);
2272
2273 mutex_lock(&dev->flow_db.lock);
2274
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002275 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002276 if (IS_ERR(ft_prio)) {
2277 err = PTR_ERR(ft_prio);
2278 goto unlock;
2279 }
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002280 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2281 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
2282 if (IS_ERR(ft_prio_tx)) {
2283 err = PTR_ERR(ft_prio_tx);
2284 ft_prio_tx = NULL;
2285 goto destroy_ft;
2286 }
2287 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002288
2289 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
Yishai Hadasd9f88e52016-08-28 10:58:37 +03002290 if (mqp->flags & MLX5_IB_QP_RSS)
2291 dst->tir_num = mqp->rss_qp.tirn;
2292 else
2293 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002294
2295 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Maor Gottlieb35d190112016-03-07 18:51:47 +02002296 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
2297 handler = create_dont_trap_rule(dev, ft_prio,
2298 flow_attr, dst);
2299 } else {
2300 handler = create_flow_rule(dev, ft_prio, flow_attr,
2301 dst);
2302 }
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002303 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2304 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2305 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
2306 dst);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002307 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2308 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002309 } else {
2310 err = -EINVAL;
2311 goto destroy_ft;
2312 }
2313
2314 if (IS_ERR(handler)) {
2315 err = PTR_ERR(handler);
2316 handler = NULL;
2317 goto destroy_ft;
2318 }
2319
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002320 mutex_unlock(&dev->flow_db.lock);
2321 kfree(dst);
2322
2323 return &handler->ibflow;
2324
2325destroy_ft:
2326 put_flow_table(dev, ft_prio, false);
Maor Gottliebcc0e5d42016-08-28 14:16:34 +03002327 if (ft_prio_tx)
2328 put_flow_table(dev, ft_prio_tx, false);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02002329unlock:
2330 mutex_unlock(&dev->flow_db.lock);
2331 kfree(dst);
2332 kfree(handler);
2333 return ERR_PTR(err);
2334}
2335
Eli Cohene126ba92013-07-07 17:25:49 +03002336static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2337{
2338 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2339 int err;
2340
Jack Morgenstein9603b612014-07-28 23:30:22 +03002341 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
Eli Cohene126ba92013-07-07 17:25:49 +03002342 if (err)
2343 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2344 ibqp->qp_num, gid->raw);
2345
2346 return err;
2347}
2348
2349static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2350{
2351 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2352 int err;
2353
Jack Morgenstein9603b612014-07-28 23:30:22 +03002354 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
Eli Cohene126ba92013-07-07 17:25:49 +03002355 if (err)
2356 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2357 ibqp->qp_num, gid->raw);
2358
2359 return err;
2360}
2361
2362static int init_node_data(struct mlx5_ib_dev *dev)
2363{
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002364 int err;
Eli Cohene126ba92013-07-07 17:25:49 +03002365
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002366 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
Eli Cohene126ba92013-07-07 17:25:49 +03002367 if (err)
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002368 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03002369
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002370 dev->mdev->rev_id = dev->mdev->pdev->revision;
Eli Cohene126ba92013-07-07 17:25:49 +03002371
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03002372 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
Eli Cohene126ba92013-07-07 17:25:49 +03002373}
2374
2375static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
2376 char *buf)
2377{
2378 struct mlx5_ib_dev *dev =
2379 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2380
Jack Morgenstein9603b612014-07-28 23:30:22 +03002381 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
Eli Cohene126ba92013-07-07 17:25:49 +03002382}
2383
2384static ssize_t show_reg_pages(struct device *device,
2385 struct device_attribute *attr, char *buf)
2386{
2387 struct mlx5_ib_dev *dev =
2388 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2389
Haggai Eran6aec21f2014-12-11 17:04:23 +02002390 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
Eli Cohene126ba92013-07-07 17:25:49 +03002391}
2392
2393static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2394 char *buf)
2395{
2396 struct mlx5_ib_dev *dev =
2397 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002398 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
Eli Cohene126ba92013-07-07 17:25:49 +03002399}
2400
Eli Cohene126ba92013-07-07 17:25:49 +03002401static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2402 char *buf)
2403{
2404 struct mlx5_ib_dev *dev =
2405 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
Jack Morgenstein9603b612014-07-28 23:30:22 +03002406 return sprintf(buf, "%x\n", dev->mdev->rev_id);
Eli Cohene126ba92013-07-07 17:25:49 +03002407}
2408
2409static ssize_t show_board(struct device *device, struct device_attribute *attr,
2410 char *buf)
2411{
2412 struct mlx5_ib_dev *dev =
2413 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2414 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
Jack Morgenstein9603b612014-07-28 23:30:22 +03002415 dev->mdev->board_id);
Eli Cohene126ba92013-07-07 17:25:49 +03002416}
2417
2418static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03002419static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2420static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2421static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
2422static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
2423
2424static struct device_attribute *mlx5_class_attributes[] = {
2425 &dev_attr_hw_rev,
Eli Cohene126ba92013-07-07 17:25:49 +03002426 &dev_attr_hca_type,
2427 &dev_attr_board_id,
2428 &dev_attr_fw_pages,
2429 &dev_attr_reg_pages,
2430};
2431
Haggai Eran7722f472016-02-29 15:45:07 +02002432static void pkey_change_handler(struct work_struct *work)
2433{
2434 struct mlx5_ib_port_resources *ports =
2435 container_of(work, struct mlx5_ib_port_resources,
2436 pkey_change_work);
2437
2438 mutex_lock(&ports->devr->mutex);
2439 mlx5_ib_gsi_pkey_change(ports->gsi);
2440 mutex_unlock(&ports->devr->mutex);
2441}
2442
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03002443static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2444{
2445 struct mlx5_ib_qp *mqp;
2446 struct mlx5_ib_cq *send_mcq, *recv_mcq;
2447 struct mlx5_core_cq *mcq;
2448 struct list_head cq_armed_list;
2449 unsigned long flags_qp;
2450 unsigned long flags_cq;
2451 unsigned long flags;
2452
2453 INIT_LIST_HEAD(&cq_armed_list);
2454
2455 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2456 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2457 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2458 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2459 if (mqp->sq.tail != mqp->sq.head) {
2460 send_mcq = to_mcq(mqp->ibqp.send_cq);
2461 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2462 if (send_mcq->mcq.comp &&
2463 mqp->ibqp.send_cq->comp_handler) {
2464 if (!send_mcq->mcq.reset_notify_added) {
2465 send_mcq->mcq.reset_notify_added = 1;
2466 list_add_tail(&send_mcq->mcq.reset_notify,
2467 &cq_armed_list);
2468 }
2469 }
2470 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2471 }
2472 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2473 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2474 /* no handling is needed for SRQ */
2475 if (!mqp->ibqp.srq) {
2476 if (mqp->rq.tail != mqp->rq.head) {
2477 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2478 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2479 if (recv_mcq->mcq.comp &&
2480 mqp->ibqp.recv_cq->comp_handler) {
2481 if (!recv_mcq->mcq.reset_notify_added) {
2482 recv_mcq->mcq.reset_notify_added = 1;
2483 list_add_tail(&recv_mcq->mcq.reset_notify,
2484 &cq_armed_list);
2485 }
2486 }
2487 spin_unlock_irqrestore(&recv_mcq->lock,
2488 flags_cq);
2489 }
2490 }
2491 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2492 }
2493 /*At that point all inflight post send were put to be executed as of we
2494 * lock/unlock above locks Now need to arm all involved CQs.
2495 */
2496 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2497 mcq->comp(mcq);
2498 }
2499 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2500}
2501
Jack Morgenstein9603b612014-07-28 23:30:22 +03002502static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002503 enum mlx5_dev_event event, unsigned long param)
Eli Cohene126ba92013-07-07 17:25:49 +03002504{
Jack Morgenstein9603b612014-07-28 23:30:22 +03002505 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
Eli Cohene126ba92013-07-07 17:25:49 +03002506 struct ib_event ibev;
Eli Cohendbaaff22016-10-27 16:36:44 +03002507 bool fatal = false;
Eli Cohene126ba92013-07-07 17:25:49 +03002508 u8 port = 0;
2509
2510 switch (event) {
2511 case MLX5_DEV_EVENT_SYS_ERROR:
Eli Cohene126ba92013-07-07 17:25:49 +03002512 ibev.event = IB_EVENT_DEVICE_FATAL;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03002513 mlx5_ib_handle_internal_error(ibdev);
Eli Cohendbaaff22016-10-27 16:36:44 +03002514 fatal = true;
Eli Cohene126ba92013-07-07 17:25:49 +03002515 break;
2516
2517 case MLX5_DEV_EVENT_PORT_UP:
Eli Cohene126ba92013-07-07 17:25:49 +03002518 case MLX5_DEV_EVENT_PORT_DOWN:
Noa Osherovich2788cf32016-06-04 15:15:29 +03002519 case MLX5_DEV_EVENT_PORT_INITIALIZED:
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002520 port = (u8)param;
Aviv Heller5ec8c832016-09-18 20:48:00 +03002521
2522 /* In RoCE, port up/down events are handled in
2523 * mlx5_netdev_event().
2524 */
2525 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2526 IB_LINK_LAYER_ETHERNET)
2527 return;
2528
2529 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
2530 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
Eli Cohene126ba92013-07-07 17:25:49 +03002531 break;
2532
Eli Cohene126ba92013-07-07 17:25:49 +03002533 case MLX5_DEV_EVENT_LID_CHANGE:
2534 ibev.event = IB_EVENT_LID_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002535 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03002536 break;
2537
2538 case MLX5_DEV_EVENT_PKEY_CHANGE:
2539 ibev.event = IB_EVENT_PKEY_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002540 port = (u8)param;
Haggai Eran7722f472016-02-29 15:45:07 +02002541
2542 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03002543 break;
2544
2545 case MLX5_DEV_EVENT_GUID_CHANGE:
2546 ibev.event = IB_EVENT_GID_CHANGE;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002547 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03002548 break;
2549
2550 case MLX5_DEV_EVENT_CLIENT_REREG:
2551 ibev.event = IB_EVENT_CLIENT_REREGISTER;
Jack Morgenstein4d2f9bb2014-07-28 23:30:24 +03002552 port = (u8)param;
Eli Cohene126ba92013-07-07 17:25:49 +03002553 break;
Saeed Mahameedbdc37922016-09-29 19:35:38 +03002554 default:
2555 return;
Eli Cohene126ba92013-07-07 17:25:49 +03002556 }
2557
2558 ibev.device = &ibdev->ib_dev;
2559 ibev.element.port_num = port;
2560
Eli Cohena0c84c32013-09-11 16:35:27 +03002561 if (port < 1 || port > ibdev->num_ports) {
2562 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2563 return;
2564 }
2565
Eli Cohene126ba92013-07-07 17:25:49 +03002566 if (ibdev->ib_active)
2567 ib_dispatch_event(&ibev);
Eli Cohendbaaff22016-10-27 16:36:44 +03002568
2569 if (fatal)
2570 ibdev->ib_active = false;
Eli Cohene126ba92013-07-07 17:25:49 +03002571}
2572
Maor Gottliebc43f1112017-01-18 14:10:33 +02002573static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2574{
2575 struct mlx5_hca_vport_context vport_ctx;
2576 int err;
2577 int port;
2578
2579 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
2580 dev->mdev->port_caps[port - 1].has_smi = false;
2581 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
2582 MLX5_CAP_PORT_TYPE_IB) {
2583 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2584 err = mlx5_query_hca_vport_context(dev->mdev, 0,
2585 port, 0,
2586 &vport_ctx);
2587 if (err) {
2588 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2589 port, err);
2590 return err;
2591 }
2592 dev->mdev->port_caps[port - 1].has_smi =
2593 vport_ctx.has_smi;
2594 } else {
2595 dev->mdev->port_caps[port - 1].has_smi = true;
2596 }
2597 }
2598 }
2599 return 0;
2600}
2601
Eli Cohene126ba92013-07-07 17:25:49 +03002602static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2603{
2604 int port;
2605
Saeed Mahameed938fe832015-05-28 22:28:41 +03002606 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
Eli Cohene126ba92013-07-07 17:25:49 +03002607 mlx5_query_ext_port_caps(dev, port);
2608}
2609
2610static int get_port_caps(struct mlx5_ib_dev *dev)
2611{
2612 struct ib_device_attr *dprops = NULL;
2613 struct ib_port_attr *pprops = NULL;
Dan Carpenterf614fc12015-01-12 11:56:58 +03002614 int err = -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +03002615 int port;
Matan Barak2528e332015-06-11 16:35:25 +03002616 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
Eli Cohene126ba92013-07-07 17:25:49 +03002617
2618 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
2619 if (!pprops)
2620 goto out;
2621
2622 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
2623 if (!dprops)
2624 goto out;
2625
Maor Gottliebc43f1112017-01-18 14:10:33 +02002626 err = set_has_smi_cap(dev);
2627 if (err)
2628 goto out;
2629
Matan Barak2528e332015-06-11 16:35:25 +03002630 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
Eli Cohene126ba92013-07-07 17:25:49 +03002631 if (err) {
2632 mlx5_ib_warn(dev, "query_device failed %d\n", err);
2633 goto out;
2634 }
2635
Saeed Mahameed938fe832015-05-28 22:28:41 +03002636 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
Eli Cohene126ba92013-07-07 17:25:49 +03002637 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
2638 if (err) {
Saeed Mahameed938fe832015-05-28 22:28:41 +03002639 mlx5_ib_warn(dev, "query_port %d failed %d\n",
2640 port, err);
Eli Cohene126ba92013-07-07 17:25:49 +03002641 break;
2642 }
Saeed Mahameed938fe832015-05-28 22:28:41 +03002643 dev->mdev->port_caps[port - 1].pkey_table_len =
2644 dprops->max_pkeys;
2645 dev->mdev->port_caps[port - 1].gid_table_len =
2646 pprops->gid_tbl_len;
Eli Cohene126ba92013-07-07 17:25:49 +03002647 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
2648 dprops->max_pkeys, pprops->gid_tbl_len);
2649 }
2650
2651out:
2652 kfree(pprops);
2653 kfree(dprops);
2654
2655 return err;
2656}
2657
2658static void destroy_umrc_res(struct mlx5_ib_dev *dev)
2659{
2660 int err;
2661
2662 err = mlx5_mr_cache_cleanup(dev);
2663 if (err)
2664 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
2665
2666 mlx5_ib_destroy_qp(dev->umrc.qp);
Christoph Hellwigadd08d72016-03-03 09:38:22 +01002667 ib_free_cq(dev->umrc.cq);
Eli Cohene126ba92013-07-07 17:25:49 +03002668 ib_dealloc_pd(dev->umrc.pd);
2669}
2670
2671enum {
2672 MAX_UMR_WR = 128,
2673};
2674
2675static int create_umr_res(struct mlx5_ib_dev *dev)
2676{
2677 struct ib_qp_init_attr *init_attr = NULL;
2678 struct ib_qp_attr *attr = NULL;
2679 struct ib_pd *pd;
2680 struct ib_cq *cq;
2681 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +03002682 int ret;
2683
2684 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
2685 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
2686 if (!attr || !init_attr) {
2687 ret = -ENOMEM;
2688 goto error_0;
2689 }
2690
Christoph Hellwiged082d32016-09-05 12:56:17 +02002691 pd = ib_alloc_pd(&dev->ib_dev, 0);
Eli Cohene126ba92013-07-07 17:25:49 +03002692 if (IS_ERR(pd)) {
2693 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
2694 ret = PTR_ERR(pd);
2695 goto error_0;
2696 }
2697
Christoph Hellwigadd08d72016-03-03 09:38:22 +01002698 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
Eli Cohene126ba92013-07-07 17:25:49 +03002699 if (IS_ERR(cq)) {
2700 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
2701 ret = PTR_ERR(cq);
2702 goto error_2;
2703 }
Eli Cohene126ba92013-07-07 17:25:49 +03002704
2705 init_attr->send_cq = cq;
2706 init_attr->recv_cq = cq;
2707 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
2708 init_attr->cap.max_send_wr = MAX_UMR_WR;
2709 init_attr->cap.max_send_sge = 1;
2710 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
2711 init_attr->port_num = 1;
2712 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
2713 if (IS_ERR(qp)) {
2714 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
2715 ret = PTR_ERR(qp);
2716 goto error_3;
2717 }
2718 qp->device = &dev->ib_dev;
2719 qp->real_qp = qp;
2720 qp->uobject = NULL;
2721 qp->qp_type = MLX5_IB_QPT_REG_UMR;
2722
2723 attr->qp_state = IB_QPS_INIT;
2724 attr->port_num = 1;
2725 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
2726 IB_QP_PORT, NULL);
2727 if (ret) {
2728 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
2729 goto error_4;
2730 }
2731
2732 memset(attr, 0, sizeof(*attr));
2733 attr->qp_state = IB_QPS_RTR;
2734 attr->path_mtu = IB_MTU_256;
2735
2736 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2737 if (ret) {
2738 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
2739 goto error_4;
2740 }
2741
2742 memset(attr, 0, sizeof(*attr));
2743 attr->qp_state = IB_QPS_RTS;
2744 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2745 if (ret) {
2746 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
2747 goto error_4;
2748 }
2749
2750 dev->umrc.qp = qp;
2751 dev->umrc.cq = cq;
Eli Cohene126ba92013-07-07 17:25:49 +03002752 dev->umrc.pd = pd;
2753
2754 sema_init(&dev->umrc.sem, MAX_UMR_WR);
2755 ret = mlx5_mr_cache_init(dev);
2756 if (ret) {
2757 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
2758 goto error_4;
2759 }
2760
2761 kfree(attr);
2762 kfree(init_attr);
2763
2764 return 0;
2765
2766error_4:
2767 mlx5_ib_destroy_qp(qp);
2768
2769error_3:
Christoph Hellwigadd08d72016-03-03 09:38:22 +01002770 ib_free_cq(cq);
Eli Cohene126ba92013-07-07 17:25:49 +03002771
2772error_2:
Eli Cohene126ba92013-07-07 17:25:49 +03002773 ib_dealloc_pd(pd);
2774
2775error_0:
2776 kfree(attr);
2777 kfree(init_attr);
2778 return ret;
2779}
2780
2781static int create_dev_resources(struct mlx5_ib_resources *devr)
2782{
2783 struct ib_srq_init_attr attr;
2784 struct mlx5_ib_dev *dev;
Matan Barakbcf4c1e2015-06-11 16:35:20 +03002785 struct ib_cq_init_attr cq_attr = {.cqe = 1};
Haggai Eran7722f472016-02-29 15:45:07 +02002786 int port;
Eli Cohene126ba92013-07-07 17:25:49 +03002787 int ret = 0;
2788
2789 dev = container_of(devr, struct mlx5_ib_dev, devr);
2790
Haggai Erand16e91d2016-02-29 15:45:05 +02002791 mutex_init(&devr->mutex);
2792
Eli Cohene126ba92013-07-07 17:25:49 +03002793 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
2794 if (IS_ERR(devr->p0)) {
2795 ret = PTR_ERR(devr->p0);
2796 goto error0;
2797 }
2798 devr->p0->device = &dev->ib_dev;
2799 devr->p0->uobject = NULL;
2800 atomic_set(&devr->p0->usecnt, 0);
2801
Matan Barakbcf4c1e2015-06-11 16:35:20 +03002802 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03002803 if (IS_ERR(devr->c0)) {
2804 ret = PTR_ERR(devr->c0);
2805 goto error1;
2806 }
2807 devr->c0->device = &dev->ib_dev;
2808 devr->c0->uobject = NULL;
2809 devr->c0->comp_handler = NULL;
2810 devr->c0->event_handler = NULL;
2811 devr->c0->cq_context = NULL;
2812 atomic_set(&devr->c0->usecnt, 0);
2813
2814 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2815 if (IS_ERR(devr->x0)) {
2816 ret = PTR_ERR(devr->x0);
2817 goto error2;
2818 }
2819 devr->x0->device = &dev->ib_dev;
2820 devr->x0->inode = NULL;
2821 atomic_set(&devr->x0->usecnt, 0);
2822 mutex_init(&devr->x0->tgt_qp_mutex);
2823 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
2824
2825 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2826 if (IS_ERR(devr->x1)) {
2827 ret = PTR_ERR(devr->x1);
2828 goto error3;
2829 }
2830 devr->x1->device = &dev->ib_dev;
2831 devr->x1->inode = NULL;
2832 atomic_set(&devr->x1->usecnt, 0);
2833 mutex_init(&devr->x1->tgt_qp_mutex);
2834 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
2835
2836 memset(&attr, 0, sizeof(attr));
2837 attr.attr.max_sge = 1;
2838 attr.attr.max_wr = 1;
2839 attr.srq_type = IB_SRQT_XRC;
2840 attr.ext.xrc.cq = devr->c0;
2841 attr.ext.xrc.xrcd = devr->x0;
2842
2843 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2844 if (IS_ERR(devr->s0)) {
2845 ret = PTR_ERR(devr->s0);
2846 goto error4;
2847 }
2848 devr->s0->device = &dev->ib_dev;
2849 devr->s0->pd = devr->p0;
2850 devr->s0->uobject = NULL;
2851 devr->s0->event_handler = NULL;
2852 devr->s0->srq_context = NULL;
2853 devr->s0->srq_type = IB_SRQT_XRC;
2854 devr->s0->ext.xrc.xrcd = devr->x0;
2855 devr->s0->ext.xrc.cq = devr->c0;
2856 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
2857 atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
2858 atomic_inc(&devr->p0->usecnt);
2859 atomic_set(&devr->s0->usecnt, 0);
2860
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03002861 memset(&attr, 0, sizeof(attr));
2862 attr.attr.max_sge = 1;
2863 attr.attr.max_wr = 1;
2864 attr.srq_type = IB_SRQT_BASIC;
2865 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2866 if (IS_ERR(devr->s1)) {
2867 ret = PTR_ERR(devr->s1);
2868 goto error5;
2869 }
2870 devr->s1->device = &dev->ib_dev;
2871 devr->s1->pd = devr->p0;
2872 devr->s1->uobject = NULL;
2873 devr->s1->event_handler = NULL;
2874 devr->s1->srq_context = NULL;
2875 devr->s1->srq_type = IB_SRQT_BASIC;
2876 devr->s1->ext.xrc.cq = devr->c0;
2877 atomic_inc(&devr->p0->usecnt);
2878 atomic_set(&devr->s0->usecnt, 0);
2879
Haggai Eran7722f472016-02-29 15:45:07 +02002880 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
2881 INIT_WORK(&devr->ports[port].pkey_change_work,
2882 pkey_change_handler);
2883 devr->ports[port].devr = devr;
2884 }
2885
Eli Cohene126ba92013-07-07 17:25:49 +03002886 return 0;
2887
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03002888error5:
2889 mlx5_ib_destroy_srq(devr->s0);
Eli Cohene126ba92013-07-07 17:25:49 +03002890error4:
2891 mlx5_ib_dealloc_xrcd(devr->x1);
2892error3:
2893 mlx5_ib_dealloc_xrcd(devr->x0);
2894error2:
2895 mlx5_ib_destroy_cq(devr->c0);
2896error1:
2897 mlx5_ib_dealloc_pd(devr->p0);
2898error0:
2899 return ret;
2900}
2901
2902static void destroy_dev_resources(struct mlx5_ib_resources *devr)
2903{
Haggai Eran7722f472016-02-29 15:45:07 +02002904 struct mlx5_ib_dev *dev =
2905 container_of(devr, struct mlx5_ib_dev, devr);
2906 int port;
2907
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +03002908 mlx5_ib_destroy_srq(devr->s1);
Eli Cohene126ba92013-07-07 17:25:49 +03002909 mlx5_ib_destroy_srq(devr->s0);
2910 mlx5_ib_dealloc_xrcd(devr->x0);
2911 mlx5_ib_dealloc_xrcd(devr->x1);
2912 mlx5_ib_destroy_cq(devr->c0);
2913 mlx5_ib_dealloc_pd(devr->p0);
Haggai Eran7722f472016-02-29 15:45:07 +02002914
2915 /* Make sure no change P_Key work items are still executing */
2916 for (port = 0; port < dev->num_ports; ++port)
2917 cancel_work_sync(&devr->ports[port].pkey_change_work);
Eli Cohene126ba92013-07-07 17:25:49 +03002918}
2919
Achiad Shochate53505a2015-12-23 18:47:25 +02002920static u32 get_core_cap_flags(struct ib_device *ibdev)
2921{
2922 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2923 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2924 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2925 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2926 u32 ret = 0;
2927
2928 if (ll == IB_LINK_LAYER_INFINIBAND)
2929 return RDMA_CORE_PORT_IBA_IB;
2930
2931 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2932 return 0;
2933
2934 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2935 return 0;
2936
2937 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2938 ret |= RDMA_CORE_PORT_IBA_ROCE;
2939
2940 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2941 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2942
2943 return ret;
2944}
2945
Ira Weiny77386132015-05-13 20:02:58 -04002946static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
2947 struct ib_port_immutable *immutable)
2948{
2949 struct ib_port_attr attr;
Or Gerlitzca5b91d2016-11-27 16:51:36 +02002950 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2951 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
Ira Weiny77386132015-05-13 20:02:58 -04002952 int err;
2953
2954 err = mlx5_ib_query_port(ibdev, port_num, &attr);
2955 if (err)
2956 return err;
2957
2958 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2959 immutable->gid_tbl_len = attr.gid_tbl_len;
Achiad Shochate53505a2015-12-23 18:47:25 +02002960 immutable->core_cap_flags = get_core_cap_flags(ibdev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02002961 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
2962 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04002963
2964 return 0;
2965}
2966
Ira Weinyc7342822016-06-15 02:22:01 -04002967static void get_dev_fw_str(struct ib_device *ibdev, char *str,
2968 size_t str_len)
2969{
2970 struct mlx5_ib_dev *dev =
2971 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
2972 snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
2973 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
2974}
2975
Or Gerlitz45f95ac2016-11-27 16:51:35 +02002976static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03002977{
2978 struct mlx5_core_dev *mdev = dev->mdev;
2979 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
2980 MLX5_FLOW_NAMESPACE_LAG);
2981 struct mlx5_flow_table *ft;
2982 int err;
2983
2984 if (!ns || !mlx5_lag_is_active(mdev))
2985 return 0;
2986
2987 err = mlx5_cmd_create_vport_lag(mdev);
2988 if (err)
2989 return err;
2990
2991 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
2992 if (IS_ERR(ft)) {
2993 err = PTR_ERR(ft);
2994 goto err_destroy_vport_lag;
2995 }
2996
2997 dev->flow_db.lag_demux_ft = ft;
2998 return 0;
2999
3000err_destroy_vport_lag:
3001 mlx5_cmd_destroy_vport_lag(mdev);
3002 return err;
3003}
3004
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003005static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
Aviv Heller9ef9c642016-09-18 20:48:01 +03003006{
3007 struct mlx5_core_dev *mdev = dev->mdev;
3008
3009 if (dev->flow_db.lag_demux_ft) {
3010 mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
3011 dev->flow_db.lag_demux_ft = NULL;
3012
3013 mlx5_cmd_destroy_vport_lag(mdev);
3014 }
3015}
3016
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003017static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003018{
Achiad Shochate53505a2015-12-23 18:47:25 +02003019 int err;
3020
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003021 dev->roce.nb.notifier_call = mlx5_netdev_event;
Achiad Shochate53505a2015-12-23 18:47:25 +02003022 err = register_netdevice_notifier(&dev->roce.nb);
Aviv Heller5ec8c832016-09-18 20:48:00 +03003023 if (err) {
3024 dev->roce.nb.notifier_call = NULL;
Achiad Shochate53505a2015-12-23 18:47:25 +02003025 return err;
Aviv Heller5ec8c832016-09-18 20:48:00 +03003026 }
Achiad Shochate53505a2015-12-23 18:47:25 +02003027
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003028 return 0;
3029}
Achiad Shochate53505a2015-12-23 18:47:25 +02003030
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003031static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03003032{
3033 if (dev->roce.nb.notifier_call) {
3034 unregister_netdevice_notifier(&dev->roce.nb);
3035 dev->roce.nb.notifier_call = NULL;
3036 }
3037}
3038
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003039static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +03003040{
Eli Cohene126ba92013-07-07 17:25:49 +03003041 int err;
3042
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003043 err = mlx5_add_netdev_notifier(dev);
3044 if (err)
Achiad Shochate53505a2015-12-23 18:47:25 +02003045 return err;
Achiad Shochate53505a2015-12-23 18:47:25 +02003046
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003047 if (MLX5_CAP_GEN(dev->mdev, roce)) {
3048 err = mlx5_nic_vport_enable_roce(dev->mdev);
3049 if (err)
3050 goto err_unregister_netdevice_notifier;
3051 }
Achiad Shochate53505a2015-12-23 18:47:25 +02003052
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003053 err = mlx5_eth_lag_init(dev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03003054 if (err)
3055 goto err_disable_roce;
3056
Achiad Shochate53505a2015-12-23 18:47:25 +02003057 return 0;
3058
Aviv Heller9ef9c642016-09-18 20:48:01 +03003059err_disable_roce:
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003060 if (MLX5_CAP_GEN(dev->mdev, roce))
3061 mlx5_nic_vport_disable_roce(dev->mdev);
Aviv Heller9ef9c642016-09-18 20:48:01 +03003062
Achiad Shochate53505a2015-12-23 18:47:25 +02003063err_unregister_netdevice_notifier:
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003064 mlx5_remove_netdev_notifier(dev);
Achiad Shochate53505a2015-12-23 18:47:25 +02003065 return err;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003066}
3067
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003068static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003069{
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003070 mlx5_eth_lag_cleanup(dev);
Or Gerlitzca5b91d2016-11-27 16:51:36 +02003071 if (MLX5_CAP_GEN(dev->mdev, roce))
3072 mlx5_nic_vport_disable_roce(dev->mdev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003073}
3074
Mark Bloch0837e862016-06-17 15:10:55 +03003075static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
3076{
3077 unsigned int i;
3078
3079 for (i = 0; i < dev->num_ports; i++)
3080 mlx5_core_dealloc_q_counter(dev->mdev,
3081 dev->port[i].q_cnt_id);
3082}
3083
3084static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
3085{
3086 int i;
3087 int ret;
3088
3089 for (i = 0; i < dev->num_ports; i++) {
3090 ret = mlx5_core_alloc_q_counter(dev->mdev,
3091 &dev->port[i].q_cnt_id);
3092 if (ret) {
3093 mlx5_ib_warn(dev,
3094 "couldn't allocate queue counter for port %d, err %d\n",
3095 i + 1, ret);
3096 goto dealloc_counters;
3097 }
3098 }
3099
3100 return 0;
3101
3102dealloc_counters:
3103 while (--i >= 0)
3104 mlx5_core_dealloc_q_counter(dev->mdev,
3105 dev->port[i].q_cnt_id);
3106
3107 return ret;
3108}
3109
Wei Yongjun61961502016-07-12 11:32:47 +00003110static const char * const names[] = {
Mark Bloch0ad17a82016-06-17 15:10:56 +03003111 "rx_write_requests",
3112 "rx_read_requests",
3113 "rx_atomic_requests",
3114 "out_of_buffer",
3115 "out_of_sequence",
3116 "duplicate_request",
3117 "rnr_nak_retry_err",
3118 "packet_seq_err",
3119 "implied_nak_seq_err",
3120 "local_ack_timeout_err",
3121};
3122
3123static const size_t stats_offsets[] = {
3124 MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
3125 MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
3126 MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
3127 MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
3128 MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
3129 MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
3130 MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
3131 MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
3132 MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
3133 MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
3134};
3135
3136static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
3137 u8 port_num)
3138{
3139 BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
3140
3141 /* We support only per port stats */
3142 if (port_num == 0)
3143 return NULL;
3144
3145 return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
3146 RDMA_HW_STATS_DEFAULT_LIFESPAN);
3147}
3148
3149static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
3150 struct rdma_hw_stats *stats,
3151 u8 port, int index)
3152{
3153 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3154 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
3155 void *out;
3156 __be32 val;
3157 int ret;
3158 int i;
3159
3160 if (!port || !stats)
3161 return -ENOSYS;
3162
3163 out = mlx5_vzalloc(outlen);
3164 if (!out)
3165 return -ENOMEM;
3166
3167 ret = mlx5_core_query_q_counter(dev->mdev,
3168 dev->port[port - 1].q_cnt_id, 0,
3169 out, outlen);
3170 if (ret)
3171 goto free;
3172
3173 for (i = 0; i < ARRAY_SIZE(names); i++) {
3174 val = *(__be32 *)(out + stats_offsets[i]);
3175 stats->value[i] = (u64)be32_to_cpu(val);
3176 }
3177free:
3178 kvfree(out);
3179 return ARRAY_SIZE(names);
3180}
3181
Jack Morgenstein9603b612014-07-28 23:30:22 +03003182static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
Eli Cohene126ba92013-07-07 17:25:49 +03003183{
Eli Cohene126ba92013-07-07 17:25:49 +03003184 struct mlx5_ib_dev *dev;
Achiad Shochatebd61f62015-12-23 18:47:16 +02003185 enum rdma_link_layer ll;
3186 int port_type_cap;
Aviv Heller4babcf92016-09-18 20:48:03 +03003187 const char *name;
Eli Cohene126ba92013-07-07 17:25:49 +03003188 int err;
3189 int i;
3190
Achiad Shochatebd61f62015-12-23 18:47:16 +02003191 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3192 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3193
Eli Cohene126ba92013-07-07 17:25:49 +03003194 printk_once(KERN_INFO "%s", mlx5_version);
3195
3196 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
3197 if (!dev)
Jack Morgenstein9603b612014-07-28 23:30:22 +03003198 return NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03003199
Jack Morgenstein9603b612014-07-28 23:30:22 +03003200 dev->mdev = mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03003201
Mark Bloch0837e862016-06-17 15:10:55 +03003202 dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
3203 GFP_KERNEL);
3204 if (!dev->port)
3205 goto err_dealloc;
3206
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003207 rwlock_init(&dev->roce.netdev_lock);
Eli Cohene126ba92013-07-07 17:25:49 +03003208 err = get_port_caps(dev);
3209 if (err)
Mark Bloch0837e862016-06-17 15:10:55 +03003210 goto err_free_port;
Eli Cohene126ba92013-07-07 17:25:49 +03003211
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03003212 if (mlx5_use_mad_ifc(dev))
3213 get_ext_port_caps(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03003214
Aviv Heller4babcf92016-09-18 20:48:03 +03003215 if (!mlx5_lag_is_active(mdev))
3216 name = "mlx5_%d";
3217 else
3218 name = "mlx5_bond_%d";
3219
3220 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
Eli Cohene126ba92013-07-07 17:25:49 +03003221 dev->ib_dev.owner = THIS_MODULE;
3222 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
Sagi Grimbergc6790aa2015-09-24 10:34:23 +03003223 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
Saeed Mahameed938fe832015-05-28 22:28:41 +03003224 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
Eli Cohene126ba92013-07-07 17:25:49 +03003225 dev->ib_dev.phys_port_cnt = dev->num_ports;
Saeed Mahameed233d05d2015-04-02 17:07:32 +03003226 dev->ib_dev.num_comp_vectors =
3227 dev->mdev->priv.eq_table.num_comp_vectors;
Eli Cohene126ba92013-07-07 17:25:49 +03003228 dev->ib_dev.dma_device = &mdev->pdev->dev;
3229
3230 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
3231 dev->ib_dev.uverbs_cmd_mask =
3232 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
3233 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
3234 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
3235 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
3236 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
Moni Shoua41c450f2016-11-23 08:23:26 +02003237 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3238 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
Eli Cohene126ba92013-07-07 17:25:49 +03003239 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Noa Osherovich56e11d62016-02-29 16:46:51 +02003240 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Eli Cohene126ba92013-07-07 17:25:49 +03003241 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
3242 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
3243 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
3244 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
3245 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
3246 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
3247 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
3248 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
3249 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
3250 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
3251 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
3252 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
3253 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
3254 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
3255 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
3256 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
3257 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Haggai Eran1707cb42015-02-08 13:28:52 +02003258 dev->ib_dev.uverbs_ex_cmd_mask =
Matan Barakd4584dd2016-01-28 17:51:46 +02003259 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
3260 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
Bodong Wang7d29f342016-12-01 13:43:16 +02003261 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
3262 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
Eli Cohene126ba92013-07-07 17:25:49 +03003263
3264 dev->ib_dev.query_device = mlx5_ib_query_device;
3265 dev->ib_dev.query_port = mlx5_ib_query_port;
Achiad Shochatebd61f62015-12-23 18:47:16 +02003266 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003267 if (ll == IB_LINK_LAYER_ETHERNET)
3268 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
Eli Cohene126ba92013-07-07 17:25:49 +03003269 dev->ib_dev.query_gid = mlx5_ib_query_gid;
Achiad Shochat3cca2602015-12-23 18:47:23 +02003270 dev->ib_dev.add_gid = mlx5_ib_add_gid;
3271 dev->ib_dev.del_gid = mlx5_ib_del_gid;
Eli Cohene126ba92013-07-07 17:25:49 +03003272 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
3273 dev->ib_dev.modify_device = mlx5_ib_modify_device;
3274 dev->ib_dev.modify_port = mlx5_ib_modify_port;
3275 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
3276 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
3277 dev->ib_dev.mmap = mlx5_ib_mmap;
3278 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
3279 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
3280 dev->ib_dev.create_ah = mlx5_ib_create_ah;
3281 dev->ib_dev.query_ah = mlx5_ib_query_ah;
3282 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
3283 dev->ib_dev.create_srq = mlx5_ib_create_srq;
3284 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
3285 dev->ib_dev.query_srq = mlx5_ib_query_srq;
3286 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
3287 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
3288 dev->ib_dev.create_qp = mlx5_ib_create_qp;
3289 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
3290 dev->ib_dev.query_qp = mlx5_ib_query_qp;
3291 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
3292 dev->ib_dev.post_send = mlx5_ib_post_send;
3293 dev->ib_dev.post_recv = mlx5_ib_post_recv;
3294 dev->ib_dev.create_cq = mlx5_ib_create_cq;
3295 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
3296 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
3297 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
3298 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
3299 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
3300 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
3301 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
Noa Osherovich56e11d62016-02-29 16:46:51 +02003302 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
Eli Cohene126ba92013-07-07 17:25:49 +03003303 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
3304 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
3305 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
3306 dev->ib_dev.process_mad = mlx5_ib_process_mad;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03003307 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03003308 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02003309 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
Ira Weiny77386132015-05-13 20:02:58 -04003310 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
Ira Weinyc7342822016-06-15 02:22:01 -04003311 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
Eli Coheneff901d2016-03-11 22:58:42 +02003312 if (mlx5_core_is_pf(mdev)) {
3313 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
3314 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
3315 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
3316 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
3317 }
Eli Cohene126ba92013-07-07 17:25:49 +03003318
Maor Gottlieb7c2344c2016-06-17 14:56:44 +03003319 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
3320
Saeed Mahameed938fe832015-05-28 22:28:41 +03003321 mlx5_ib_internal_fill_odp_caps(dev);
Haggai Eran8cdd3122014-12-11 17:04:20 +02003322
Matan Barakd2370e02016-02-29 18:05:30 +02003323 if (MLX5_CAP_GEN(mdev, imaicl)) {
3324 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
3325 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
3326 dev->ib_dev.uverbs_cmd_mask |=
3327 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
3328 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
3329 }
3330
Mark Bloch0ad17a82016-06-17 15:10:56 +03003331 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
3332 MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3333 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
3334 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
3335 }
3336
Saeed Mahameed938fe832015-05-28 22:28:41 +03003337 if (MLX5_CAP_GEN(mdev, xrc)) {
Eli Cohene126ba92013-07-07 17:25:49 +03003338 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
3339 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
3340 dev->ib_dev.uverbs_cmd_mask |=
3341 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
3342 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
3343 }
3344
Linus Torvalds048ccca2016-01-23 18:45:06 -08003345 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003346 IB_LINK_LAYER_ETHERNET) {
3347 dev->ib_dev.create_flow = mlx5_ib_create_flow;
3348 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
Yishai Hadas79b20a62016-05-23 15:20:50 +03003349 dev->ib_dev.create_wq = mlx5_ib_create_wq;
3350 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
3351 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
Yishai Hadasc5f90922016-05-23 15:20:53 +03003352 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
3353 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003354 dev->ib_dev.uverbs_ex_cmd_mask |=
3355 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
Yishai Hadas79b20a62016-05-23 15:20:50 +03003356 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
3357 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
3358 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
Yishai Hadasc5f90922016-05-23 15:20:53 +03003359 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
3360 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
3361 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003362 }
Eli Cohene126ba92013-07-07 17:25:49 +03003363 err = init_node_data(dev);
3364 if (err)
Majd Dibbiny90be7c82016-10-27 16:36:39 +03003365 goto err_free_port;
Eli Cohene126ba92013-07-07 17:25:49 +03003366
Maor Gottlieb038d2ef2016-01-11 10:26:07 +02003367 mutex_init(&dev->flow_db.lock);
Eli Cohene126ba92013-07-07 17:25:49 +03003368 mutex_init(&dev->cap_mask_mutex);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03003369 INIT_LIST_HEAD(&dev->qp_list);
3370 spin_lock_init(&dev->reset_flow_resource_lock);
Eli Cohene126ba92013-07-07 17:25:49 +03003371
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003372 if (ll == IB_LINK_LAYER_ETHERNET) {
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003373 err = mlx5_enable_eth(dev);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003374 if (err)
Majd Dibbiny90be7c82016-10-27 16:36:39 +03003375 goto err_free_port;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003376 }
3377
Eli Cohene126ba92013-07-07 17:25:49 +03003378 err = create_dev_resources(&dev->devr);
3379 if (err)
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003380 goto err_disable_eth;
Eli Cohene126ba92013-07-07 17:25:49 +03003381
Haggai Eran6aec21f2014-12-11 17:04:23 +02003382 err = mlx5_ib_odp_init_one(dev);
Wei Yongjun281d1a92013-07-30 07:54:26 +08003383 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +03003384 goto err_rsrc;
3385
Kamal Heib45bded22017-01-18 14:10:32 +02003386 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
3387 err = mlx5_ib_alloc_q_counters(dev);
3388 if (err)
3389 goto err_odp;
3390 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02003391
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003392 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
3393 if (!dev->mdev->priv.uar)
3394 goto err_q_cnt;
3395
3396 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
3397 if (err)
3398 goto err_uar_page;
3399
3400 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
3401 if (err)
3402 goto err_bfreg;
3403
Mark Bloch0837e862016-06-17 15:10:55 +03003404 err = ib_register_device(&dev->ib_dev, NULL);
3405 if (err)
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003406 goto err_fp_bfreg;
Mark Bloch0837e862016-06-17 15:10:55 +03003407
Eli Cohene126ba92013-07-07 17:25:49 +03003408 err = create_umr_res(dev);
3409 if (err)
3410 goto err_dev;
3411
3412 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
Wei Yongjun281d1a92013-07-30 07:54:26 +08003413 err = device_create_file(&dev->ib_dev.dev,
3414 mlx5_class_attributes[i]);
3415 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +03003416 goto err_umrc;
3417 }
3418
3419 dev->ib_active = true;
3420
Jack Morgenstein9603b612014-07-28 23:30:22 +03003421 return dev;
Eli Cohene126ba92013-07-07 17:25:49 +03003422
3423err_umrc:
3424 destroy_umrc_res(dev);
3425
3426err_dev:
3427 ib_unregister_device(&dev->ib_dev);
3428
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003429err_fp_bfreg:
3430 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3431
3432err_bfreg:
3433 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3434
3435err_uar_page:
3436 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
3437
Mark Bloch0837e862016-06-17 15:10:55 +03003438err_q_cnt:
Kamal Heib45bded22017-01-18 14:10:32 +02003439 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
3440 mlx5_ib_dealloc_q_counters(dev);
Mark Bloch0837e862016-06-17 15:10:55 +03003441
Haggai Eran6aec21f2014-12-11 17:04:23 +02003442err_odp:
3443 mlx5_ib_odp_remove_one(dev);
3444
Eli Cohene126ba92013-07-07 17:25:49 +03003445err_rsrc:
3446 destroy_dev_resources(&dev->devr);
3447
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003448err_disable_eth:
Aviv Heller5ec8c832016-09-18 20:48:00 +03003449 if (ll == IB_LINK_LAYER_ETHERNET) {
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003450 mlx5_disable_eth(dev);
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003451 mlx5_remove_netdev_notifier(dev);
Aviv Heller5ec8c832016-09-18 20:48:00 +03003452 }
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003453
Mark Bloch0837e862016-06-17 15:10:55 +03003454err_free_port:
3455 kfree(dev->port);
3456
Jack Morgenstein9603b612014-07-28 23:30:22 +03003457err_dealloc:
Eli Cohene126ba92013-07-07 17:25:49 +03003458 ib_dealloc_device((struct ib_device *)dev);
3459
Jack Morgenstein9603b612014-07-28 23:30:22 +03003460 return NULL;
Eli Cohene126ba92013-07-07 17:25:49 +03003461}
3462
Jack Morgenstein9603b612014-07-28 23:30:22 +03003463static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
Eli Cohene126ba92013-07-07 17:25:49 +03003464{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003465 struct mlx5_ib_dev *dev = context;
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003466 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
Haggai Eran6aec21f2014-12-11 17:04:23 +02003467
Or Gerlitzd012f5d2016-11-27 16:51:34 +02003468 mlx5_remove_netdev_notifier(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03003469 ib_unregister_device(&dev->ib_dev);
Eli Cohen5fe9dec2017-01-03 23:55:25 +02003470 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3471 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3472 mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
Kamal Heib45bded22017-01-18 14:10:32 +02003473 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
3474 mlx5_ib_dealloc_q_counters(dev);
Eli Coheneefd56e2014-09-14 16:47:50 +03003475 destroy_umrc_res(dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +02003476 mlx5_ib_odp_remove_one(dev);
Eli Cohene126ba92013-07-07 17:25:49 +03003477 destroy_dev_resources(&dev->devr);
Achiad Shochatfc24fc52015-12-23 18:47:17 +02003478 if (ll == IB_LINK_LAYER_ETHERNET)
Or Gerlitz45f95ac2016-11-27 16:51:35 +02003479 mlx5_disable_eth(dev);
Mark Bloch0837e862016-06-17 15:10:55 +03003480 kfree(dev->port);
Eli Cohene126ba92013-07-07 17:25:49 +03003481 ib_dealloc_device(&dev->ib_dev);
3482}
3483
Jack Morgenstein9603b612014-07-28 23:30:22 +03003484static struct mlx5_interface mlx5_ib_interface = {
3485 .add = mlx5_ib_add,
3486 .remove = mlx5_ib_remove,
3487 .event = mlx5_ib_event,
Artemy Kovalyovd9aaed82017-01-02 11:37:46 +02003488#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3489 .pfault = mlx5_ib_pfault,
3490#endif
Saeed Mahameed64613d942015-04-02 17:07:34 +03003491 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
Eli Cohene126ba92013-07-07 17:25:49 +03003492};
3493
3494static int __init mlx5_ib_init(void)
3495{
Haggai Eran6aec21f2014-12-11 17:04:23 +02003496 int err;
3497
Haggai Eran6aec21f2014-12-11 17:04:23 +02003498 err = mlx5_register_interface(&mlx5_ib_interface);
Haggai Eran6aec21f2014-12-11 17:04:23 +02003499
3500 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03003501}
3502
3503static void __exit mlx5_ib_cleanup(void)
3504{
Jack Morgenstein9603b612014-07-28 23:30:22 +03003505 mlx5_unregister_interface(&mlx5_ib_interface);
Eli Cohene126ba92013-07-07 17:25:49 +03003506}
3507
3508module_init(mlx5_ib_init);
3509module_exit(mlx5_ib_cleanup);