blob: a15a7b37d3862358eab3a5744c94d194224b7d71 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Moni Shouad487ee72013-12-12 18:03:13 +020042#include <net/ipv6.h>
43#include <net/addrconf.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010044#include <net/devlink.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070045
46#include <rdma/ib_smi.h>
47#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070048#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030049#include <rdma/ib_cache.h>
50
51#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070052
53#include <linux/mlx4/driver.h>
54#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030055#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070056
57#include "mlx4_ib.h"
58#include "user.h"
59
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030060#define DRV_NAME MLX4_IB_DRV_NAME
Amir Vadai169a1d82014-02-19 17:47:31 +020061#define DRV_VERSION "2.2-1"
62#define DRV_RELDATE "Feb 2014"
Roland Dreier225c7b12007-05-08 18:00:38 -070063
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030064#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020065#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000066#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030067
Roland Dreier225c7b12007-05-08 18:00:38 -070068MODULE_AUTHOR("Roland Dreier");
69MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_VERSION(DRV_VERSION);
72
Yishai Hadas56c1d232015-02-12 09:49:43 +020073int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000074module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020075MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000076
Roland Dreier68f39482008-02-04 20:20:44 -080077static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070078 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
79 DRV_VERSION " (" DRV_RELDATE ")\n";
80
Jack Morgenstein3806d082012-08-03 08:40:58 +000081static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
82
Eli Cohenfa417f72010-10-24 21:08:52 -070083static struct workqueue_struct *wq;
84
Roland Dreier225c7b12007-05-08 18:00:38 -070085static void init_query_mad(struct ib_smp *mad)
86{
87 mad->base_version = 1;
88 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
89 mad->class_version = 1;
90 mad->method = IB_MGMT_METHOD_GET;
91}
92
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030093static int check_flow_steering_support(struct mlx4_dev *dev)
94{
Matan Barak0a9b7d52013-11-07 15:25:15 +020095 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030097
Matan Barak0a9b7d52013-11-07 15:25:15 +020098 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030099
Matan Barak0a9b7d52013-11-07 15:25:15 +0200100 if (dmfs) {
101 int i;
102 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
103 eth_num_ports++;
104 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
105 ib_num_ports++;
106 dmfs &= (!ib_num_ports ||
107 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
108 (!eth_num_ports ||
109 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
110 if (ib_num_ports && mlx4_is_mfunc(dev)) {
111 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
112 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300113 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300114 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200115 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300116}
117
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300118static int num_ib_ports(struct mlx4_dev *dev)
119{
120 int ib_ports = 0;
121 int i;
122
123 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
124 ib_ports++;
125
126 return ib_ports;
127}
128
Moni Shouae26be1b2015-07-30 18:33:29 +0300129static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
130{
131 struct mlx4_ib_dev *ibdev = to_mdev(device);
132 struct net_device *dev;
133
134 rcu_read_lock();
135 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
136
137 if (dev) {
138 if (mlx4_is_bonded(ibdev->dev)) {
139 struct net_device *upper = NULL;
140
141 upper = netdev_master_upper_dev_get_rcu(dev);
142 if (upper) {
143 struct net_device *active;
144
145 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
146 if (active)
147 dev = active;
148 }
149 }
150 }
151 if (dev)
152 dev_hold(dev);
153
154 rcu_read_unlock();
155 return dev;
156}
157
Moni Shoua7e57b852016-01-14 17:50:35 +0200158static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
159 struct mlx4_ib_dev *ibdev,
160 u8 port_num)
Moni Shouae26be1b2015-07-30 18:33:29 +0300161{
162 struct mlx4_cmd_mailbox *mailbox;
163 int err;
164 struct mlx4_dev *dev = ibdev->dev;
165 int i;
166 union ib_gid *gid_tbl;
167
168 mailbox = mlx4_alloc_cmd_mailbox(dev);
169 if (IS_ERR(mailbox))
170 return -ENOMEM;
171
172 gid_tbl = mailbox->buf;
173
174 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
175 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
176
177 err = mlx4_cmd(dev, mailbox->dma,
178 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
179 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
180 MLX4_CMD_WRAPPED);
181 if (mlx4_is_bonded(dev))
182 err += mlx4_cmd(dev, mailbox->dma,
183 MLX4_SET_PORT_GID_TABLE << 8 | 2,
184 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
185 MLX4_CMD_WRAPPED);
186
187 mlx4_free_cmd_mailbox(dev, mailbox);
188 return err;
189}
190
Moni Shoua7e57b852016-01-14 17:50:35 +0200191static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
192 struct mlx4_ib_dev *ibdev,
193 u8 port_num)
194{
195 struct mlx4_cmd_mailbox *mailbox;
196 int err;
197 struct mlx4_dev *dev = ibdev->dev;
198 int i;
199 struct {
200 union ib_gid gid;
201 __be32 rsrvd1[2];
202 __be16 rsrvd2;
203 u8 type;
204 u8 version;
205 __be32 rsrvd3;
206 } *gid_tbl;
207
208 mailbox = mlx4_alloc_cmd_mailbox(dev);
209 if (IS_ERR(mailbox))
210 return -ENOMEM;
211
212 gid_tbl = mailbox->buf;
213 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
214 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
215 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
216 gid_tbl[i].version = 2;
217 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
218 gid_tbl[i].type = 1;
219 else
220 memset(&gid_tbl[i].gid, 0, 12);
221 }
222 }
223
224 err = mlx4_cmd(dev, mailbox->dma,
225 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
226 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
227 MLX4_CMD_WRAPPED);
228 if (mlx4_is_bonded(dev))
229 err += mlx4_cmd(dev, mailbox->dma,
230 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
231 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
232 MLX4_CMD_WRAPPED);
233
234 mlx4_free_cmd_mailbox(dev, mailbox);
235 return err;
236}
237
238static int mlx4_ib_update_gids(struct gid_entry *gids,
239 struct mlx4_ib_dev *ibdev,
240 u8 port_num)
241{
242 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
243 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
244
245 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
246}
247
Moni Shouae26be1b2015-07-30 18:33:29 +0300248static int mlx4_ib_add_gid(struct ib_device *device,
249 u8 port_num,
250 unsigned int index,
251 const union ib_gid *gid,
252 const struct ib_gid_attr *attr,
253 void **context)
254{
255 struct mlx4_ib_dev *ibdev = to_mdev(device);
256 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
257 struct mlx4_port_gid_table *port_gid_table;
258 int free = -1, found = -1;
259 int ret = 0;
260 int hw_update = 0;
261 int i;
262 struct gid_entry *gids = NULL;
263
264 if (!rdma_cap_roce_gid_table(device, port_num))
265 return -EINVAL;
266
267 if (port_num > MLX4_MAX_PORTS)
268 return -EINVAL;
269
270 if (!context)
271 return -EINVAL;
272
273 port_gid_table = &iboe->gids[port_num - 1];
274 spin_lock_bh(&iboe->lock);
275 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
Moni Shouab699a852016-01-14 17:50:33 +0200276 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
277 (port_gid_table->gids[i].gid_type == attr->gid_type)) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300278 found = i;
279 break;
280 }
281 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
282 free = i; /* HW has space */
283 }
284
285 if (found < 0) {
286 if (free < 0) {
287 ret = -ENOSPC;
288 } else {
289 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
290 if (!port_gid_table->gids[free].ctx) {
291 ret = -ENOMEM;
292 } else {
293 *context = port_gid_table->gids[free].ctx;
294 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
Moni Shouab699a852016-01-14 17:50:33 +0200295 port_gid_table->gids[free].gid_type = attr->gid_type;
Moni Shouae26be1b2015-07-30 18:33:29 +0300296 port_gid_table->gids[free].ctx->real_index = free;
297 port_gid_table->gids[free].ctx->refcount = 1;
298 hw_update = 1;
299 }
300 }
301 } else {
302 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
303 *context = ctx;
304 ctx->refcount++;
305 }
306 if (!ret && hw_update) {
307 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
308 if (!gids) {
309 ret = -ENOMEM;
310 } else {
Moni Shouab699a852016-01-14 17:50:33 +0200311 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300312 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
Moni Shouab699a852016-01-14 17:50:33 +0200313 gids[i].gid_type = port_gid_table->gids[i].gid_type;
314 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300315 }
316 }
317 spin_unlock_bh(&iboe->lock);
318
319 if (!ret && hw_update) {
320 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
321 kfree(gids);
322 }
323
324 return ret;
325}
326
327static int mlx4_ib_del_gid(struct ib_device *device,
328 u8 port_num,
329 unsigned int index,
330 void **context)
331{
332 struct gid_cache_context *ctx = *context;
333 struct mlx4_ib_dev *ibdev = to_mdev(device);
334 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
335 struct mlx4_port_gid_table *port_gid_table;
336 int ret = 0;
337 int hw_update = 0;
338 struct gid_entry *gids = NULL;
339
340 if (!rdma_cap_roce_gid_table(device, port_num))
341 return -EINVAL;
342
343 if (port_num > MLX4_MAX_PORTS)
344 return -EINVAL;
345
346 port_gid_table = &iboe->gids[port_num - 1];
347 spin_lock_bh(&iboe->lock);
348 if (ctx) {
349 ctx->refcount--;
350 if (!ctx->refcount) {
351 unsigned int real_index = ctx->real_index;
352
353 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
354 kfree(port_gid_table->gids[real_index].ctx);
355 port_gid_table->gids[real_index].ctx = NULL;
356 hw_update = 1;
357 }
358 }
359 if (!ret && hw_update) {
360 int i;
361
362 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
363 if (!gids) {
364 ret = -ENOMEM;
365 } else {
366 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
367 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
368 }
369 }
370 spin_unlock_bh(&iboe->lock);
371
372 if (!ret && hw_update) {
373 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
374 kfree(gids);
375 }
376 return ret;
377}
378
379int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
380 u8 port_num, int index)
381{
382 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
383 struct gid_cache_context *ctx = NULL;
384 union ib_gid gid;
385 struct mlx4_port_gid_table *port_gid_table;
386 int real_index = -EINVAL;
387 int i;
388 int ret;
389 unsigned long flags;
Moni Shouab699a852016-01-14 17:50:33 +0200390 struct ib_gid_attr attr;
Moni Shouae26be1b2015-07-30 18:33:29 +0300391
392 if (port_num > MLX4_MAX_PORTS)
393 return -EINVAL;
394
395 if (mlx4_is_bonded(ibdev->dev))
396 port_num = 1;
397
398 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
399 return index;
400
Moni Shouab699a852016-01-14 17:50:33 +0200401 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
Moni Shouae26be1b2015-07-30 18:33:29 +0300402 if (ret)
403 return ret;
404
Moni Shouab699a852016-01-14 17:50:33 +0200405 if (attr.ndev)
406 dev_put(attr.ndev);
407
Moni Shouae26be1b2015-07-30 18:33:29 +0300408 if (!memcmp(&gid, &zgid, sizeof(gid)))
409 return -EINVAL;
410
411 spin_lock_irqsave(&iboe->lock, flags);
412 port_gid_table = &iboe->gids[port_num - 1];
413
414 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
Moni Shouab699a852016-01-14 17:50:33 +0200415 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
416 attr.gid_type == port_gid_table->gids[i].gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300417 ctx = port_gid_table->gids[i].ctx;
418 break;
419 }
420 if (ctx)
421 real_index = ctx->real_index;
422 spin_unlock_irqrestore(&iboe->lock, flags);
423 return real_index;
424}
425
Roland Dreier225c7b12007-05-08 18:00:38 -0700426static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300427 struct ib_device_attr *props,
428 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700429{
430 struct mlx4_ib_dev *dev = to_mdev(ibdev);
431 struct ib_smp *in_mad = NULL;
432 struct ib_smp *out_mad = NULL;
433 int err = -ENOMEM;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300434 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300435 struct mlx4_uverbs_ex_query_device cmd;
436 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
437 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700438
Matan Barak4b664c42015-06-11 16:35:27 +0300439 if (uhw->inlen) {
440 if (uhw->inlen < sizeof(cmd))
441 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300442
Matan Barak4b664c42015-06-11 16:35:27 +0300443 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
444 if (err)
445 return err;
446
447 if (cmd.comp_mask)
448 return -EINVAL;
449
450 if (cmd.reserved)
451 return -EINVAL;
452 }
453
454 resp.response_length = offsetof(typeof(resp), response_length) +
455 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700456 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
457 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
458 if (!in_mad || !out_mad)
459 goto out;
460
461 init_query_mad(in_mad);
462 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
463
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000464 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
465 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700466 if (err)
467 goto out;
468
469 memset(props, 0, sizeof *props);
470
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300471 have_ib_ports = num_ib_ports(dev->dev);
472
Roland Dreier225c7b12007-05-08 18:00:38 -0700473 props->fw_ver = dev->dev->caps.fw_ver;
474 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
475 IB_DEVICE_PORT_ACTIVE_EVENT |
476 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700477 IB_DEVICE_RC_RNR_NAK_GEN |
478 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700479 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
480 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
481 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
482 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700484 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
485 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
486 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
488 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000489 if (dev->dev->caps.max_gso_sz &&
490 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
491 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700492 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700493 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
494 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
495 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
496 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
497 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
498 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700499 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
500 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000501 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
502 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
503 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
504 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
505 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
506 else
507 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
Matan Barak0a9b7d52013-11-07 15:25:15 +0200508 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300509 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Shani Michaelib4253882013-02-06 16:19:16 +0000510 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700511
Bodong Wang070b3992015-09-22 23:18:11 +0300512 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
513
Roland Dreier225c7b12007-05-08 18:00:38 -0700514 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
515 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200516 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700517 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
518 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
519
520 props->max_mr_size = ~0ull;
521 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200522 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300523 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700524 props->max_sge = min(dev->dev->caps.max_sq_sg,
525 dev->dev->caps.max_rq_sg);
Sagi Grimberga5e14ba2015-10-28 13:28:15 +0200526 props->max_sge_rd = MLX4_MAX_SGE_RD;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200527 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700528 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200529 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700530 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
531 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
532 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
533 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200534 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700535 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700536 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200537 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700538 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
539 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
540 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000541 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700542 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700543 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
544 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
545 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
546 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200547 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300548 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
549 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700550
Matan Barak8a7ff142015-07-01 14:31:02 +0300551 if (!mlx4_is_slave(dev->dev))
552 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300553
554 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300555 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300556 if (!err && !mlx4_is_slave(dev->dev)) {
557 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
558 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
559 }
Matan Barak4b664c42015-06-11 16:35:27 +0300560 }
561
562 if (uhw->outlen) {
563 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
564 if (err)
565 goto out;
566 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700567out:
568 kfree(in_mad);
569 kfree(out_mad);
570
571 return err;
572}
573
Eli Cohenfa417f72010-10-24 21:08:52 -0700574static enum rdma_link_layer
575mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
576{
577 struct mlx4_dev *dev = to_mdev(device)->dev;
578
Jack Morgenstein65dab252011-12-13 04:10:41 +0000579 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700580 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
581}
582
583static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000584 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700585{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200586 struct ib_smp *in_mad = NULL;
587 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300588 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000589 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200590 int err = -ENOMEM;
591
592 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
593 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
594 if (!in_mad || !out_mad)
595 goto out;
596
597 init_query_mad(in_mad);
598 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
599 in_mad->attr_mod = cpu_to_be32(port);
600
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000601 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
602 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
603
604 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200605 in_mad, out_mad);
606 if (err)
607 goto out;
608
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300609
Eli Cohenfa417f72010-10-24 21:08:52 -0700610 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
611 props->lmc = out_mad->data[34] & 0x7;
612 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
613 props->sm_sl = out_mad->data[36] & 0xf;
614 props->state = out_mad->data[32] & 0xf;
615 props->phys_state = out_mad->data[33] >> 4;
616 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000617 if (netw_view)
618 props->gid_tbl_len = out_mad->data[50];
619 else
620 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700621 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
622 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
623 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
624 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
625 props->active_width = out_mad->data[31] & 0xf;
626 props->active_speed = out_mad->data[35] >> 4;
627 props->max_mtu = out_mad->data[41] & 0xf;
628 props->active_mtu = out_mad->data[36] >> 4;
629 props->subnet_timeout = out_mad->data[51] & 0x1f;
630 props->max_vl_num = out_mad->data[37] >> 4;
631 props->init_type_reply = out_mad->data[41] >> 4;
632
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300633 /* Check if extended speeds (EDR/FDR/...) are supported */
634 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
635 ext_active_speed = out_mad->data[62] >> 4;
636
637 switch (ext_active_speed) {
638 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200639 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300640 break;
641 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200642 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300643 break;
644 }
645 }
646
647 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200648 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200649 init_query_mad(in_mad);
650 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
651 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300652
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000653 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200654 NULL, NULL, in_mad, out_mad);
655 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200656 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300657
Or Gerlitz8154c072012-03-06 15:50:50 +0200658 /* Checking LinkSpeedActive for FDR-10 */
659 if (out_mad->data[15] & 0x1)
660 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300661 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300662
663 /* Avoid wrong speed value returned by FW if the IB link is down. */
664 if (props->state == IB_PORT_DOWN)
665 props->active_speed = IB_SPEED_SDR;
666
Or Gerlitza9c766b2012-01-11 19:00:29 +0200667out:
668 kfree(in_mad);
669 kfree(out_mad);
670 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700671}
672
673static u8 state_to_phys_state(enum ib_port_state state)
674{
675 return state == IB_PORT_ACTIVE ? 5 : 3;
676}
677
678static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000679 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700680{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200681
682 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
683 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700684 struct net_device *ndev;
685 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200686 struct mlx4_cmd_mailbox *mailbox;
687 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200688 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700689
Or Gerlitza9c766b2012-01-11 19:00:29 +0200690 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
691 if (IS_ERR(mailbox))
692 return PTR_ERR(mailbox);
693
694 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
695 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
696 MLX4_CMD_WRAPPED);
697 if (err)
698 goto out;
699
700 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
701 IB_WIDTH_4X : IB_WIDTH_1X;
Or Gerlitz2e966912012-02-28 18:49:50 +0200702 props->active_speed = IB_SPEED_QDR;
Moni Shouab4a26a22014-02-09 11:54:34 +0200703 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200704 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
705 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700706 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200707 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200708 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700709 props->state = IB_PORT_DOWN;
710 props->phys_state = state_to_phys_state(props->state);
711 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300712 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700713 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300714 if (ndev && is_bonded) {
715 rcu_read_lock(); /* required to get upper dev */
716 ndev = netdev_master_upper_dev_get_rcu(ndev);
717 rcu_read_unlock();
718 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700719 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200720 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700721
722 tmp = iboe_get_mtu(ndev->mtu);
723 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
724
Eli Cohen21d606092010-11-11 21:05:58 +0000725 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700726 IB_PORT_ACTIVE : IB_PORT_DOWN;
727 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200728out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300729 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200730out:
731 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
732 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700733}
734
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000735int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
736 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700737{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200738 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700739
740 memset(props, 0, sizeof *props);
741
Eli Cohenfa417f72010-10-24 21:08:52 -0700742 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000743 ib_link_query_port(ibdev, port, props, netw_view) :
744 eth_link_query_port(ibdev, port, props, netw_view);
Roland Dreier225c7b12007-05-08 18:00:38 -0700745
746 return err;
747}
748
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000749static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
750 struct ib_port_attr *props)
751{
752 /* returns host view */
753 return __mlx4_ib_query_port(ibdev, port, props, 0);
754}
755
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000756int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
757 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700758{
759 struct ib_smp *in_mad = NULL;
760 struct ib_smp *out_mad = NULL;
761 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000762 struct mlx4_ib_dev *dev = to_mdev(ibdev);
763 int clear = 0;
764 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700765
766 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
767 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
768 if (!in_mad || !out_mad)
769 goto out;
770
771 init_query_mad(in_mad);
772 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
773 in_mad->attr_mod = cpu_to_be32(port);
774
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000775 if (mlx4_is_mfunc(dev->dev) && netw_view)
776 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
777
778 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700779 if (err)
780 goto out;
781
782 memcpy(gid->raw, out_mad->data + 8, 8);
783
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000784 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
785 if (index) {
786 /* For any index > 0, return the null guid */
787 err = 0;
788 clear = 1;
789 goto out;
790 }
791 }
792
Roland Dreier225c7b12007-05-08 18:00:38 -0700793 init_query_mad(in_mad);
794 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
795 in_mad->attr_mod = cpu_to_be32(index / 8);
796
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000797 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000798 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700799 if (err)
800 goto out;
801
802 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
803
804out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000805 if (clear)
806 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700807 kfree(in_mad);
808 kfree(out_mad);
809 return err;
810}
811
Eli Cohenfa417f72010-10-24 21:08:52 -0700812static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
813 union ib_gid *gid)
814{
Moni Shoua5070cd22015-07-30 18:33:30 +0300815 int ret;
816
817 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000818 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Moni Shoua5070cd22015-07-30 18:33:30 +0300819
820 if (!rdma_protocol_roce(ibdev, port))
821 return -ENODEV;
822
823 if (!rdma_cap_roce_gid_table(ibdev, port))
824 return -ENODEV;
825
Matan Barak55ee3ab2015-10-15 18:38:45 +0300826 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
Moni Shoua5070cd22015-07-30 18:33:30 +0300827 if (ret == -EAGAIN) {
828 memcpy(gid, &zgid, sizeof(*gid));
829 return 0;
830 }
831
832 return ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700833}
834
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000835int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
836 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700837{
838 struct ib_smp *in_mad = NULL;
839 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000840 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700841 int err = -ENOMEM;
842
843 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
844 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
845 if (!in_mad || !out_mad)
846 goto out;
847
848 init_query_mad(in_mad);
849 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
850 in_mad->attr_mod = cpu_to_be32(index / 32);
851
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000852 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
853 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
854
855 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
856 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700857 if (err)
858 goto out;
859
860 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
861
862out:
863 kfree(in_mad);
864 kfree(out_mad);
865 return err;
866}
867
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000868static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
869{
870 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
871}
872
Roland Dreier225c7b12007-05-08 18:00:38 -0700873static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
874 struct ib_device_modify *props)
875{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000876 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000877 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000878
Roland Dreier225c7b12007-05-08 18:00:38 -0700879 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
880 return -EOPNOTSUPP;
881
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000882 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
883 return 0;
884
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000885 if (mlx4_is_slave(to_mdev(ibdev)->dev))
886 return -EOPNOTSUPP;
887
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000888 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000889 memcpy(ibdev->node_desc, props->node_desc, 64);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000890 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000891
892 /*
893 * If possible, pass node desc to FW, so it can generate
894 * a 144 trap. If cmd fails, just ignore.
895 */
896 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
897 if (IS_ERR(mailbox))
898 return 0;
899
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000900 memcpy(mailbox->buf, props->node_desc, 64);
901 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000902 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000903
904 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -0700905
906 return 0;
907}
908
Jack Morgenstein61565012014-05-29 16:31:01 +0300909static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
910 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -0700911{
912 struct mlx4_cmd_mailbox *mailbox;
913 int err;
914
915 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
916 if (IS_ERR(mailbox))
917 return PTR_ERR(mailbox);
918
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700919 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
920 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
921 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
922 } else {
923 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
924 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
925 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700926
Ido Shamaya130b592015-04-02 16:31:19 +0300927 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
928 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
929 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700930
931 mlx4_free_cmd_mailbox(dev->dev, mailbox);
932 return err;
933}
934
935static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
936 struct ib_port_modify *props)
937{
Jack Morgenstein61565012014-05-29 16:31:01 +0300938 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
939 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -0700940 struct ib_port_attr attr;
941 u32 cap_mask;
942 int err;
943
Jack Morgenstein61565012014-05-29 16:31:01 +0300944 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
945 * of whether port link layer is ETH or IB. For ETH ports, qkey
946 * violations and port capabilities are not meaningful.
947 */
948 if (is_eth)
949 return 0;
950
951 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -0700952
953 err = mlx4_ib_query_port(ibdev, port, &attr);
954 if (err)
955 goto out;
956
957 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
958 ~props->clr_port_cap_mask;
959
Jack Morgenstein61565012014-05-29 16:31:01 +0300960 err = mlx4_ib_SET_PORT(mdev, port,
961 !!(mask & IB_PORT_RESET_QKEY_CNTR),
962 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -0700963
964out:
965 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
966 return err;
967}
968
969static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
970 struct ib_udata *udata)
971{
972 struct mlx4_ib_dev *dev = to_mdev(ibdev);
973 struct mlx4_ib_ucontext *context;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000974 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -0700975 struct mlx4_ib_alloc_ucontext_resp resp;
976 int err;
977
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -0700978 if (!dev->ib_active)
979 return ERR_PTR(-EAGAIN);
980
Or Gerlitz08ff3232012-10-21 14:59:24 +0000981 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
982 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
983 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
984 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
985 } else {
986 resp.dev_caps = dev->dev->caps.userspace_caps;
987 resp.qp_tab_size = dev->dev->caps.num_qps;
988 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
989 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
990 resp.cqe_size = dev->dev->caps.cqe_size;
991 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700992
Yishai Hadasae184dd2015-08-13 18:32:06 +0300993 context = kzalloc(sizeof(*context), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700994 if (!context)
995 return ERR_PTR(-ENOMEM);
996
997 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
998 if (err) {
999 kfree(context);
1000 return ERR_PTR(err);
1001 }
1002
1003 INIT_LIST_HEAD(&context->db_page_list);
1004 mutex_init(&context->db_page_mutex);
1005
Or Gerlitz08ff3232012-10-21 14:59:24 +00001006 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1007 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1008 else
1009 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1010
Roland Dreier225c7b12007-05-08 18:00:38 -07001011 if (err) {
1012 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1013 kfree(context);
1014 return ERR_PTR(-EFAULT);
1015 }
1016
1017 return &context->ibucontext;
1018}
1019
1020static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1021{
1022 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1023
1024 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1025 kfree(context);
1026
1027 return 0;
1028}
1029
Yishai Hadasae184dd2015-08-13 18:32:06 +03001030static void mlx4_ib_vma_open(struct vm_area_struct *area)
1031{
1032 /* vma_open is called when a new VMA is created on top of our VMA.
1033 * This is done through either mremap flow or split_vma (usually due
1034 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1035 * vma, as this VMA is strongly hardware related. Therefore we set the
1036 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1037 * calling us again and trying to do incorrect actions. We assume that
1038 * the original vma size is exactly a single page that there will be no
1039 * "splitting" operations on.
1040 */
1041 area->vm_ops = NULL;
1042}
1043
1044static void mlx4_ib_vma_close(struct vm_area_struct *area)
1045{
1046 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1047
1048 /* It's guaranteed that all VMAs opened on a FD are closed before the
1049 * file itself is closed, therefore no sync is needed with the regular
1050 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1051 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1052 * The close operation is usually called under mm->mmap_sem except when
1053 * process is exiting. The exiting case is handled explicitly as part
1054 * of mlx4_ib_disassociate_ucontext.
1055 */
1056 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1057 area->vm_private_data;
1058
1059 /* set the vma context pointer to null in the mlx4_ib driver's private
1060 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1061 */
1062 mlx4_ib_vma_priv_data->vma = NULL;
1063}
1064
1065static const struct vm_operations_struct mlx4_ib_vm_ops = {
1066 .open = mlx4_ib_vma_open,
1067 .close = mlx4_ib_vma_close
1068};
1069
1070static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1071{
1072 int i;
1073 int ret = 0;
1074 struct vm_area_struct *vma;
1075 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1076 struct task_struct *owning_process = NULL;
1077 struct mm_struct *owning_mm = NULL;
1078
1079 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1080 if (!owning_process)
1081 return;
1082
1083 owning_mm = get_task_mm(owning_process);
1084 if (!owning_mm) {
1085 pr_info("no mm, disassociate ucontext is pending task termination\n");
1086 while (1) {
1087 /* make sure that task is dead before returning, it may
1088 * prevent a rare case of module down in parallel to a
1089 * call to mlx4_ib_vma_close.
1090 */
1091 put_task_struct(owning_process);
1092 msleep(1);
1093 owning_process = get_pid_task(ibcontext->tgid,
1094 PIDTYPE_PID);
1095 if (!owning_process ||
1096 owning_process->state == TASK_DEAD) {
1097 pr_info("disassociate ucontext done, task was terminated\n");
1098 /* in case task was dead need to release the task struct */
1099 if (owning_process)
1100 put_task_struct(owning_process);
1101 return;
1102 }
1103 }
1104 }
1105
1106 /* need to protect from a race on closing the vma as part of
1107 * mlx4_ib_vma_close().
1108 */
1109 down_read(&owning_mm->mmap_sem);
1110 for (i = 0; i < HW_BAR_COUNT; i++) {
1111 vma = context->hw_bar_info[i].vma;
1112 if (!vma)
1113 continue;
1114
1115 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1116 context->hw_bar_info[i].vma->vm_start,
1117 PAGE_SIZE);
1118 if (ret) {
1119 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1120 BUG_ON(1);
1121 }
1122
1123 /* context going to be destroyed, should not access ops any more */
1124 context->hw_bar_info[i].vma->vm_ops = NULL;
1125 }
1126
1127 up_read(&owning_mm->mmap_sem);
1128 mmput(owning_mm);
1129 put_task_struct(owning_process);
1130}
1131
1132static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1133 struct mlx4_ib_vma_private_data *vma_private_data)
1134{
1135 vma_private_data->vma = vma;
1136 vma->vm_private_data = vma_private_data;
1137 vma->vm_ops = &mlx4_ib_vm_ops;
1138}
1139
Roland Dreier225c7b12007-05-08 18:00:38 -07001140static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1141{
1142 struct mlx4_ib_dev *dev = to_mdev(context->device);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001143 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
Roland Dreier225c7b12007-05-08 18:00:38 -07001144
1145 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1146 return -EINVAL;
1147
1148 if (vma->vm_pgoff == 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001149 /* We prevent double mmaping on same context */
1150 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1151 return -EINVAL;
1152
Roland Dreier225c7b12007-05-08 18:00:38 -07001153 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1154
1155 if (io_remap_pfn_range(vma, vma->vm_start,
1156 to_mucontext(context)->uar.pfn,
1157 PAGE_SIZE, vma->vm_page_prot))
1158 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001159
1160 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1161
Roland Dreier225c7b12007-05-08 18:00:38 -07001162 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001163 /* We prevent double mmaping on same context */
1164 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1165 return -EINVAL;
1166
Roland Dreiere1d60ec2009-03-30 08:31:05 -07001167 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Roland Dreier225c7b12007-05-08 18:00:38 -07001168
1169 if (io_remap_pfn_range(vma, vma->vm_start,
1170 to_mucontext(context)->uar.pfn +
1171 dev->dev->caps.num_uars,
1172 PAGE_SIZE, vma->vm_page_prot))
1173 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001174
1175 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1176
Matan Barak52033cf2015-06-11 16:35:26 +03001177 } else if (vma->vm_pgoff == 3) {
1178 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001179 int ret;
1180
1181 /* We prevent double mmaping on same context */
1182 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1183 return -EINVAL;
1184
1185 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001186
1187 if (ret)
1188 return ret;
1189
1190 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1191 if (io_remap_pfn_range(vma, vma->vm_start,
1192 (pci_resource_start(dev->dev->persist->pdev,
1193 params.bar) +
1194 params.offset)
1195 >> PAGE_SHIFT,
1196 PAGE_SIZE, vma->vm_page_prot))
1197 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001198
1199 mlx4_ib_set_vma_data(vma,
1200 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
Matan Barak52033cf2015-06-11 16:35:26 +03001201 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -07001202 return -EINVAL;
Matan Barak52033cf2015-06-11 16:35:26 +03001203 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001204
1205 return 0;
1206}
1207
1208static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1209 struct ib_ucontext *context,
1210 struct ib_udata *udata)
1211{
1212 struct mlx4_ib_pd *pd;
1213 int err;
1214
1215 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1216 if (!pd)
1217 return ERR_PTR(-ENOMEM);
1218
1219 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1220 if (err) {
1221 kfree(pd);
1222 return ERR_PTR(err);
1223 }
1224
1225 if (context)
1226 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1227 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1228 kfree(pd);
1229 return ERR_PTR(-EFAULT);
1230 }
1231
1232 return &pd->ibpd;
1233}
1234
1235static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1236{
1237 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1238 kfree(pd);
1239
1240 return 0;
1241}
1242
Sean Hefty012a8ff2011-06-02 09:01:33 -07001243static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1244 struct ib_ucontext *context,
1245 struct ib_udata *udata)
1246{
1247 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001248 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001249 int err;
1250
1251 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1252 return ERR_PTR(-ENOSYS);
1253
1254 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1255 if (!xrcd)
1256 return ERR_PTR(-ENOMEM);
1257
1258 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1259 if (err)
1260 goto err1;
1261
1262 xrcd->pd = ib_alloc_pd(ibdev);
1263 if (IS_ERR(xrcd->pd)) {
1264 err = PTR_ERR(xrcd->pd);
1265 goto err2;
1266 }
1267
Matan Barak8e372102015-06-11 16:35:21 +03001268 cq_attr.cqe = 1;
1269 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001270 if (IS_ERR(xrcd->cq)) {
1271 err = PTR_ERR(xrcd->cq);
1272 goto err3;
1273 }
1274
1275 return &xrcd->ibxrcd;
1276
1277err3:
1278 ib_dealloc_pd(xrcd->pd);
1279err2:
1280 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1281err1:
1282 kfree(xrcd);
1283 return ERR_PTR(err);
1284}
1285
1286static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1287{
1288 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1289 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1290 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1291 kfree(xrcd);
1292
1293 return 0;
1294}
1295
Eli Cohenfa417f72010-10-24 21:08:52 -07001296static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1297{
1298 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1299 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1300 struct mlx4_ib_gid_entry *ge;
1301
1302 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1303 if (!ge)
1304 return -ENOMEM;
1305
1306 ge->gid = *gid;
1307 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1308 ge->port = mqp->port;
1309 ge->added = 1;
1310 }
1311
1312 mutex_lock(&mqp->mutex);
1313 list_add_tail(&ge->list, &mqp->gid_list);
1314 mutex_unlock(&mqp->mutex);
1315
1316 return 0;
1317}
1318
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03001319static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1320 struct mlx4_ib_counters *ctr_table)
1321{
1322 struct counter_index *counter, *tmp_count;
1323
1324 mutex_lock(&ctr_table->mutex);
1325 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1326 list) {
1327 if (counter->allocated)
1328 mlx4_counter_free(ibdev->dev, counter->index);
1329 list_del(&counter->list);
1330 kfree(counter);
1331 }
1332 mutex_unlock(&ctr_table->mutex);
1333}
1334
Eli Cohenfa417f72010-10-24 21:08:52 -07001335int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1336 union ib_gid *gid)
1337{
Eli Cohenfa417f72010-10-24 21:08:52 -07001338 struct net_device *ndev;
1339 int ret = 0;
1340
1341 if (!mqp->port)
1342 return 0;
1343
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001344 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001345 ndev = mdev->iboe.netdevs[mqp->port - 1];
1346 if (ndev)
1347 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001348 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001349
1350 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001351 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001352 dev_put(ndev);
1353 }
1354
1355 return ret;
1356}
1357
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001358struct mlx4_ib_steering {
1359 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001360 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001361 union ib_gid gid;
1362};
1363
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001364static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001365 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001366 union ib_flow_spec *ib_spec,
1367 struct _rule_hw *mlx4_spec)
1368{
1369 enum mlx4_net_trans_rule_id type;
1370
1371 switch (ib_spec->type) {
1372 case IB_FLOW_SPEC_ETH:
1373 type = MLX4_NET_TRANS_RULE_ID_ETH;
1374 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1375 ETH_ALEN);
1376 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1377 ETH_ALEN);
1378 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1379 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1380 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001381 case IB_FLOW_SPEC_IB:
1382 type = MLX4_NET_TRANS_RULE_ID_IB;
1383 mlx4_spec->ib.l3_qpn =
1384 cpu_to_be32(qp_num);
1385 mlx4_spec->ib.qpn_mask =
1386 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1387 break;
1388
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001389
1390 case IB_FLOW_SPEC_IPV4:
1391 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1392 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1393 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1394 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1395 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1396 break;
1397
1398 case IB_FLOW_SPEC_TCP:
1399 case IB_FLOW_SPEC_UDP:
1400 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1401 MLX4_NET_TRANS_RULE_ID_TCP :
1402 MLX4_NET_TRANS_RULE_ID_UDP;
1403 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1404 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1405 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1406 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1407 break;
1408
1409 default:
1410 return -EINVAL;
1411 }
1412 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1413 mlx4_hw_rule_sz(dev, type) < 0)
1414 return -EINVAL;
1415 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1416 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1417 return mlx4_hw_rule_sz(dev, type);
1418}
1419
Matan Baraka37a1a42013-11-07 15:25:16 +02001420struct default_rules {
1421 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1422 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1423 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1424 __u8 link_layer;
1425};
1426static const struct default_rules default_table[] = {
1427 {
1428 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1429 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1430 .rules_create_list = {IB_FLOW_SPEC_IB},
1431 .link_layer = IB_LINK_LAYER_INFINIBAND
1432 }
1433};
1434
1435static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1436 struct ib_flow_attr *flow_attr)
1437{
1438 int i, j, k;
1439 void *ib_flow;
1440 const struct default_rules *pdefault_rules = default_table;
1441 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1442
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001443 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001444 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1445 memset(&field_types, 0, sizeof(field_types));
1446
1447 if (link_layer != pdefault_rules->link_layer)
1448 continue;
1449
1450 ib_flow = flow_attr + 1;
1451 /* we assume the specs are sorted */
1452 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1453 j < flow_attr->num_of_specs; k++) {
1454 union ib_flow_spec *current_flow =
1455 (union ib_flow_spec *)ib_flow;
1456
1457 /* same layer but different type */
1458 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1459 (pdefault_rules->mandatory_fields[k] &
1460 IB_FLOW_SPEC_LAYER_MASK)) &&
1461 (current_flow->type !=
1462 pdefault_rules->mandatory_fields[k]))
1463 goto out;
1464
1465 /* same layer, try match next one */
1466 if (current_flow->type ==
1467 pdefault_rules->mandatory_fields[k]) {
1468 j++;
1469 ib_flow +=
1470 ((union ib_flow_spec *)ib_flow)->size;
1471 }
1472 }
1473
1474 ib_flow = flow_attr + 1;
1475 for (j = 0; j < flow_attr->num_of_specs;
1476 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1477 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1478 /* same layer and same type */
1479 if (((union ib_flow_spec *)ib_flow)->type ==
1480 pdefault_rules->mandatory_not_fields[k])
1481 goto out;
1482
1483 return i;
1484 }
1485out:
1486 return -1;
1487}
1488
1489static int __mlx4_ib_create_default_rules(
1490 struct mlx4_ib_dev *mdev,
1491 struct ib_qp *qp,
1492 const struct default_rules *pdefault_rules,
1493 struct _rule_hw *mlx4_spec) {
1494 int size = 0;
1495 int i;
1496
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001497 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001498 int ret;
1499 union ib_flow_spec ib_spec;
1500 switch (pdefault_rules->rules_create_list[i]) {
1501 case 0:
1502 /* no rule */
1503 continue;
1504 case IB_FLOW_SPEC_IB:
1505 ib_spec.type = IB_FLOW_SPEC_IB;
1506 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1507
1508 break;
1509 default:
1510 /* invalid rule */
1511 return -EINVAL;
1512 }
1513 /* We must put empty rule, qpn is being ignored */
1514 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1515 mlx4_spec);
1516 if (ret < 0) {
1517 pr_info("invalid parsing\n");
1518 return -EINVAL;
1519 }
1520
1521 mlx4_spec = (void *)mlx4_spec + ret;
1522 size += ret;
1523 }
1524 return size;
1525}
1526
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001527static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1528 int domain,
1529 enum mlx4_net_trans_promisc_mode flow_type,
1530 u64 *reg_id)
1531{
1532 int ret, i;
1533 int size = 0;
1534 void *ib_flow;
1535 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1536 struct mlx4_cmd_mailbox *mailbox;
1537 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001538 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001539
1540 static const u16 __mlx4_domain[] = {
1541 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1542 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1543 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1544 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1545 };
1546
1547 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1548 pr_err("Invalid priority value %d\n", flow_attr->priority);
1549 return -EINVAL;
1550 }
1551
1552 if (domain >= IB_FLOW_DOMAIN_NUM) {
1553 pr_err("Invalid domain value %d\n", domain);
1554 return -EINVAL;
1555 }
1556
1557 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1558 return -EINVAL;
1559
1560 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1561 if (IS_ERR(mailbox))
1562 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001563 ctrl = mailbox->buf;
1564
1565 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1566 flow_attr->priority);
1567 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1568 ctrl->port = flow_attr->port;
1569 ctrl->qpn = cpu_to_be32(qp->qp_num);
1570
1571 ib_flow = flow_attr + 1;
1572 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001573 /* Add default flows */
1574 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1575 if (default_flow >= 0) {
1576 ret = __mlx4_ib_create_default_rules(
1577 mdev, qp, default_table + default_flow,
1578 mailbox->buf + size);
1579 if (ret < 0) {
1580 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1581 return -EINVAL;
1582 }
1583 size += ret;
1584 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001585 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001586 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1587 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001588 if (ret < 0) {
1589 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1590 return -EINVAL;
1591 }
1592 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1593 size += ret;
1594 }
1595
1596 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1597 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Matan Barak48564132015-05-31 09:30:15 +03001598 MLX4_CMD_WRAPPED);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001599 if (ret == -ENOMEM)
1600 pr_err("mcg table is full. Fail to register network rule.\n");
1601 else if (ret == -ENXIO)
1602 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1603 else if (ret)
1604 pr_err("Invalid argumant. Fail to register network rule.\n");
1605
1606 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1607 return ret;
1608}
1609
1610static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1611{
1612 int err;
1613 err = mlx4_cmd(dev, reg_id, 0, 0,
1614 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Matan Barak48564132015-05-31 09:30:15 +03001615 MLX4_CMD_WRAPPED);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001616 if (err)
1617 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1618 reg_id);
1619 return err;
1620}
1621
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001622static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1623 u64 *reg_id)
1624{
1625 void *ib_flow;
1626 union ib_flow_spec *ib_spec;
1627 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1628 int err = 0;
1629
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001630 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1631 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001632 return 0; /* do nothing */
1633
1634 ib_flow = flow_attr + 1;
1635 ib_spec = (union ib_flow_spec *)ib_flow;
1636
1637 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1638 return 0; /* do nothing */
1639
1640 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1641 flow_attr->port, qp->qp_num,
1642 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1643 reg_id);
1644 return err;
1645}
1646
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001647static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1648 struct ib_flow_attr *flow_attr,
1649 int domain)
1650{
Moni Shoua146d6e12015-02-03 16:48:38 +02001651 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001652 struct mlx4_ib_flow *mflow;
1653 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001654 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1655 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001656
1657 memset(type, 0, sizeof(type));
1658
1659 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1660 if (!mflow) {
1661 err = -ENOMEM;
1662 goto err_free;
1663 }
1664
1665 switch (flow_attr->type) {
1666 case IB_FLOW_ATTR_NORMAL:
1667 type[0] = MLX4_FS_REGULAR;
1668 break;
1669
1670 case IB_FLOW_ATTR_ALL_DEFAULT:
1671 type[0] = MLX4_FS_ALL_DEFAULT;
1672 break;
1673
1674 case IB_FLOW_ATTR_MC_DEFAULT:
1675 type[0] = MLX4_FS_MC_DEFAULT;
1676 break;
1677
1678 case IB_FLOW_ATTR_SNIFFER:
1679 type[0] = MLX4_FS_UC_SNIFFER;
1680 type[1] = MLX4_FS_MC_SNIFFER;
1681 break;
1682
1683 default:
1684 err = -EINVAL;
1685 goto err_free;
1686 }
1687
1688 while (i < ARRAY_SIZE(type) && type[i]) {
1689 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001690 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001691 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001692 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001693 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001694 /* Application always sees one port so the mirror rule
1695 * must be on port #2
1696 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001697 flow_attr->port = 2;
1698 err = __mlx4_ib_create_flow(qp, flow_attr,
1699 domain, type[j],
1700 &mflow->reg_id[j].mirror);
1701 flow_attr->port = 1;
1702 if (err)
1703 goto err_create_flow;
1704 j++;
1705 }
1706
Roland Dreier11562562015-05-29 23:11:27 -07001707 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001708 }
1709
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001710 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001711 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1712 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001713 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001714 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001715
Moni Shoua146d6e12015-02-03 16:48:38 +02001716 if (is_bonded) {
1717 flow_attr->port = 2;
1718 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1719 &mflow->reg_id[j].mirror);
1720 flow_attr->port = 1;
1721 if (err)
1722 goto err_create_flow;
1723 j++;
1724 }
1725 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001726 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001727 }
1728
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001729 return &mflow->ibflow;
1730
Or Gerlitz571e1b22014-10-30 15:59:28 +02001731err_create_flow:
1732 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001733 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1734 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001735 i--;
1736 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001737
1738 while (j) {
1739 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1740 mflow->reg_id[j].mirror);
1741 j--;
1742 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001743err_free:
1744 kfree(mflow);
1745 return ERR_PTR(err);
1746}
1747
1748static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1749{
1750 int err, ret = 0;
1751 int i = 0;
1752 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1753 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1754
Moni Shoua146d6e12015-02-03 16:48:38 +02001755 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1756 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001757 if (err)
1758 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001759 if (mflow->reg_id[i].mirror) {
1760 err = __mlx4_ib_destroy_flow(mdev->dev,
1761 mflow->reg_id[i].mirror);
1762 if (err)
1763 ret = err;
1764 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001765 i++;
1766 }
1767
1768 kfree(mflow);
1769 return ret;
1770}
1771
Roland Dreier225c7b12007-05-08 18:00:38 -07001772static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1773{
Eli Cohenfa417f72010-10-24 21:08:52 -07001774 int err;
1775 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001776 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001777 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001778 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001779 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001780 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001781
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001782 if (mdev->dev->caps.steering_mode ==
1783 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1784 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1785 if (!ib_steering)
1786 return -ENOMEM;
1787 }
1788
1789 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1790 !!(mqp->flags &
1791 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02001792 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001793 if (err) {
1794 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001795 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001796 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001797
Moni Shoua146d6e12015-02-03 16:48:38 +02001798 reg_id.mirror = 0;
1799 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001800 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1801 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02001802 !!(mqp->flags &
1803 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1804 prot, &reg_id.mirror);
1805 if (err)
1806 goto err_add;
1807 }
1808
Eli Cohenfa417f72010-10-24 21:08:52 -07001809 err = add_gid_entry(ibqp, gid);
1810 if (err)
1811 goto err_add;
1812
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001813 if (ib_steering) {
1814 memcpy(ib_steering->gid.raw, gid->raw, 16);
1815 ib_steering->reg_id = reg_id;
1816 mutex_lock(&mqp->mutex);
1817 list_add(&ib_steering->list, &mqp->steering_rules);
1818 mutex_unlock(&mqp->mutex);
1819 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001820 return 0;
1821
1822err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001823 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001824 prot, reg_id.id);
1825 if (reg_id.mirror)
1826 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1827 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001828err_malloc:
1829 kfree(ib_steering);
1830
Eli Cohenfa417f72010-10-24 21:08:52 -07001831 return err;
1832}
1833
1834static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1835{
1836 struct mlx4_ib_gid_entry *ge;
1837 struct mlx4_ib_gid_entry *tmp;
1838 struct mlx4_ib_gid_entry *ret = NULL;
1839
1840 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1841 if (!memcmp(raw, ge->gid.raw, 16)) {
1842 ret = ge;
1843 break;
1844 }
1845 }
1846
1847 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07001848}
1849
1850static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1851{
Eli Cohenfa417f72010-10-24 21:08:52 -07001852 int err;
1853 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001854 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001855 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07001856 struct net_device *ndev;
1857 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02001858 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001859 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07001860
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001861 if (mdev->dev->caps.steering_mode ==
1862 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1863 struct mlx4_ib_steering *ib_steering;
1864
1865 mutex_lock(&mqp->mutex);
1866 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1867 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1868 list_del(&ib_steering->list);
1869 break;
1870 }
1871 }
1872 mutex_unlock(&mqp->mutex);
1873 if (&ib_steering->list == &mqp->steering_rules) {
1874 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1875 return -EINVAL;
1876 }
1877 reg_id = ib_steering->reg_id;
1878 kfree(ib_steering);
1879 }
1880
1881 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001882 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07001883 if (err)
1884 return err;
1885
Moni Shoua146d6e12015-02-03 16:48:38 +02001886 if (mlx4_is_bonded(dev)) {
1887 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1888 prot, reg_id.mirror);
1889 if (err)
1890 return err;
1891 }
1892
Eli Cohenfa417f72010-10-24 21:08:52 -07001893 mutex_lock(&mqp->mutex);
1894 ge = find_gid_entry(mqp, gid->raw);
1895 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001896 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001897 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1898 if (ndev)
1899 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001900 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02001901 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07001902 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07001903 list_del(&ge->list);
1904 kfree(ge);
1905 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001906 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07001907
1908 mutex_unlock(&mqp->mutex);
1909
1910 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001911}
1912
1913static int init_node_data(struct mlx4_ib_dev *dev)
1914{
1915 struct ib_smp *in_mad = NULL;
1916 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001917 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07001918 int err = -ENOMEM;
1919
1920 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1921 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1922 if (!in_mad || !out_mad)
1923 goto out;
1924
1925 init_query_mad(in_mad);
1926 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001927 if (mlx4_is_master(dev->dev))
1928 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07001929
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001930 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07001931 if (err)
1932 goto out;
1933
1934 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1935
1936 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1937
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001938 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07001939 if (err)
1940 goto out;
1941
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00001942 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07001943 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1944
1945out:
1946 kfree(in_mad);
1947 kfree(out_mad);
1948 return err;
1949}
1950
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001951static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1952 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001953{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001954 struct mlx4_ib_dev *dev =
1955 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001956 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001957}
1958
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001959static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1960 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001961{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001962 struct mlx4_ib_dev *dev =
1963 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001964 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1965 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1966 (int) dev->dev->caps.fw_ver & 0xffff);
1967}
1968
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001969static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1970 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001971{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001972 struct mlx4_ib_dev *dev =
1973 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001974 return sprintf(buf, "%x\n", dev->dev->rev_id);
1975}
1976
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001977static ssize_t show_board(struct device *device, struct device_attribute *attr,
1978 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001979{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001980 struct mlx4_ib_dev *dev =
1981 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1982 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1983 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001984}
1985
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001986static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1987static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1988static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1989static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001990
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001991static struct device_attribute *mlx4_class_attributes[] = {
1992 &dev_attr_hw_rev,
1993 &dev_attr_fw_ver,
1994 &dev_attr_hca_type,
1995 &dev_attr_board_id
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001996};
1997
Matan Barak9433c182014-05-15 15:29:28 +03001998#define MLX4_IB_INVALID_MAC ((u64)-1)
1999static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2000 struct net_device *dev,
2001 int port)
2002{
2003 u64 new_smac = 0;
2004 u64 release_mac = MLX4_IB_INVALID_MAC;
2005 struct mlx4_ib_qp *qp;
2006
2007 read_lock(&dev_base_lock);
2008 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2009 read_unlock(&dev_base_lock);
2010
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002011 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2012
Jack Morgensteind24d9f42014-09-11 14:11:18 +03002013 /* no need for update QP1 and mac registration in non-SRIOV */
2014 if (!mlx4_is_mfunc(ibdev->dev))
2015 return;
2016
Matan Barak9433c182014-05-15 15:29:28 +03002017 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2018 qp = ibdev->qp1_proxy[port - 1];
2019 if (qp) {
2020 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002021 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002022 struct mlx4_update_qp_params update_params;
2023
Jack Morgenstein25476b02014-09-11 14:11:20 +03002024 mutex_lock(&qp->mutex);
2025 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03002026 if (new_smac == old_smac)
2027 goto unlock;
2028
2029 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2030
2031 if (new_smac_index < 0)
2032 goto unlock;
2033
2034 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03002035 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03002036 &update_params)) {
2037 release_mac = new_smac;
2038 goto unlock;
2039 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03002040 /* if old port was zero, no mac was yet registered for this QP */
2041 if (qp->pri.smac_port)
2042 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002043 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002044 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03002045 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03002046 }
2047
2048unlock:
Matan Barak9433c182014-05-15 15:29:28 +03002049 if (release_mac != MLX4_IB_INVALID_MAC)
2050 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03002051 if (qp)
2052 mutex_unlock(&qp->mutex);
2053 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03002054}
2055
Matan Barak9433c182014-05-15 15:29:28 +03002056static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2057 struct net_device *dev,
2058 unsigned long event)
2059
Moni Shouad487ee72013-12-12 18:03:13 +02002060{
2061 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03002062 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02002063 int port;
2064
Moni Shoua5070cd22015-07-30 18:33:30 +03002065 ASSERT_RTNL();
2066
Moni Shouad487ee72013-12-12 18:03:13 +02002067 iboe = &ibdev->iboe;
2068
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002069 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002070 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02002071
Moni Shouad487ee72013-12-12 18:03:13 +02002072 iboe->netdevs[port - 1] =
2073 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02002074
Matan Barak9433c182014-05-15 15:29:28 +03002075 if (dev == iboe->netdevs[port - 1] &&
2076 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2077 event == NETDEV_UP || event == NETDEV_CHANGE))
2078 update_qps_port = port;
2079
Moni Shouad487ee72013-12-12 18:03:13 +02002080 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002081 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03002082
2083 if (update_qps_port > 0)
2084 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002085}
2086
2087static int mlx4_ib_netdev_event(struct notifier_block *this,
2088 unsigned long event, void *ptr)
2089{
Jiri Pirko351638e2013-05-28 01:30:21 +00002090 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002091 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002092
2093 if (!net_eq(dev_net(dev), &init_net))
2094 return NOTIFY_DONE;
2095
2096 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002097 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002098
2099 return NOTIFY_DONE;
2100}
2101
Jack Morgenstein54679e12012-08-03 08:40:43 +00002102static void init_pkeys(struct mlx4_ib_dev *ibdev)
2103{
2104 int port;
2105 int slave;
2106 int i;
2107
2108 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002109 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2110 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002111 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2112 for (i = 0;
2113 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2114 ++i) {
2115 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2116 /* master has the identity virt2phys pkey mapping */
2117 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2118 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2119 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2120 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2121 }
2122 }
2123 }
2124 /* initialize pkey cache */
2125 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2126 for (i = 0;
2127 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2128 ++i)
2129 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2130 (i) ? 0 : 0xFFFF;
2131 }
2132 }
2133}
2134
Shlomo Pongratze605b742012-04-29 17:04:27 +03002135static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2136{
Matan Barakc66fa192015-05-31 09:30:16 +03002137 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002138
Matan Barakc66fa192015-05-31 09:30:16 +03002139 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2140 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002141 if (!ibdev->eq_table)
2142 return;
2143
Matan Barakc66fa192015-05-31 09:30:16 +03002144 for (i = 1; i <= dev->caps.num_ports; i++) {
2145 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2146 j++, total_eqs++) {
2147 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2148 continue;
2149 ibdev->eq_table[eq] = total_eqs;
2150 if (!mlx4_assign_eq(dev, i,
2151 &ibdev->eq_table[eq]))
2152 eq++;
2153 else
2154 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002155 }
2156 }
2157
Matan Barakc66fa192015-05-31 09:30:16 +03002158 for (i = eq; i < dev->caps.num_comp_vectors;
2159 ibdev->eq_table[i++] = -1)
2160 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002161
2162 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002163 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002164}
2165
2166static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2167{
2168 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002169 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002170
Matan Barakc66fa192015-05-31 09:30:16 +03002171 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002172 if (!ibdev->eq_table)
2173 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002174
2175 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002176 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002177
Matan Barakc66fa192015-05-31 09:30:16 +03002178 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002179 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002180
Shlomo Pongratze605b742012-04-29 17:04:27 +03002181 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002182 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002183}
2184
Ira Weiny77386132015-05-13 20:02:58 -04002185static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2186 struct ib_port_immutable *immutable)
2187{
2188 struct ib_port_attr attr;
Matan Barak4ed088e2016-01-14 17:50:43 +02002189 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
Ira Weiny77386132015-05-13 20:02:58 -04002190 int err;
2191
2192 err = mlx4_ib_query_port(ibdev, port_num, &attr);
2193 if (err)
2194 return err;
2195
2196 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2197 immutable->gid_tbl_len = attr.gid_tbl_len;
2198
Matan Barak4ed088e2016-01-14 17:50:43 +02002199 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
Ira Weinyf9b22e32015-05-13 20:02:59 -04002200 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Matan Barak4ed088e2016-01-14 17:50:43 +02002201 } else {
2202 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2203 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2204 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2205 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2206 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2207 }
Ira Weinyf9b22e32015-05-13 20:02:59 -04002208
Ira Weiny337877a2015-06-06 14:38:29 -04002209 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2210
Ira Weiny77386132015-05-13 20:02:58 -04002211 return 0;
2212}
2213
Roland Dreier225c7b12007-05-08 18:00:38 -07002214static void *mlx4_ib_add(struct mlx4_dev *dev)
2215{
2216 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002217 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002218 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002219 int err;
2220 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002221 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002222 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002223 int allocated;
2224 u32 counter_index;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002225 struct counter_index *new_counter_index = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -07002226
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002227 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002228
Jack Morgenstein026149c2012-08-03 08:40:55 +00002229 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002230 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002231 num_ports++;
2232
2233 /* No point in registering a device with no ports... */
2234 if (num_ports == 0)
2235 return NULL;
2236
Roland Dreier225c7b12007-05-08 18:00:38 -07002237 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2238 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002239 dev_err(&dev->persist->pdev->dev,
2240 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002241 return NULL;
2242 }
2243
Eli Cohenfa417f72010-10-24 21:08:52 -07002244 iboe = &ibdev->iboe;
2245
Roland Dreier225c7b12007-05-08 18:00:38 -07002246 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2247 goto err_dealloc;
2248
2249 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2250 goto err_pd;
2251
Roland Dreier4979d182011-01-12 09:50:36 -08002252 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2253 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002254 if (!ibdev->uar_map)
2255 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002256 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002257
Roland Dreier225c7b12007-05-08 18:00:38 -07002258 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002259 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002260
2261 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2262 ibdev->ib_dev.owner = THIS_MODULE;
2263 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002264 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002265 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002266 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2267 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002268 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002269 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
Moni Shoua5070cd22015-07-30 18:33:30 +03002270 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2271 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2272 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
Roland Dreier225c7b12007-05-08 18:00:38 -07002273
Or Gerlitz08ff3232012-10-21 14:59:24 +00002274 if (dev->caps.userspace_caps)
2275 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2276 else
2277 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2278
Roland Dreier225c7b12007-05-08 18:00:38 -07002279 ibdev->ib_dev.uverbs_cmd_mask =
2280 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2281 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2282 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2283 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2284 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2285 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002286 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002287 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2288 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2289 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002290 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002291 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2292 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2293 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002294 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002295 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2296 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2297 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2298 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2299 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002300 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002301 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002302 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2303 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002304
2305 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2306 ibdev->ib_dev.query_port = mlx4_ib_query_port;
Eli Cohenfa417f72010-10-24 21:08:52 -07002307 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
Roland Dreier225c7b12007-05-08 18:00:38 -07002308 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2309 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2310 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2311 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2312 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2313 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2314 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2315 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2316 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2317 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2318 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2319 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2320 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2321 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002322 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002323 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2324 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2325 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2326 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002327 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -07002328 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2329 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2330 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2331 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
Eli Cohen3fdcb972008-04-16 21:09:33 -07002332 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002333 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002334 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2335 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2336 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2337 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2338 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
Matan Barak93769322014-07-31 11:01:30 +03002339 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002340 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
Sagi Grimberg679e34d2015-07-30 10:32:42 +03002341 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +03002342 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
Roland Dreier225c7b12007-05-08 18:00:38 -07002343 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2344 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2345 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
Ira Weiny77386132015-05-13 20:02:58 -04002346 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
Yishai Hadasae184dd2015-08-13 18:32:06 +03002347 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
Roland Dreier225c7b12007-05-08 18:00:38 -07002348
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002349 if (!mlx4_is_slave(ibdev->dev)) {
2350 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2351 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2352 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2353 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2354 }
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002355
Shani Michaelib4253882013-02-06 16:19:16 +00002356 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2357 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2358 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
Shani Michaelib4253882013-02-06 16:19:16 +00002359 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2360
2361 ibdev->ib_dev.uverbs_cmd_mask |=
2362 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2363 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2364 }
2365
Sean Hefty012a8ff2011-06-02 09:01:33 -07002366 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2367 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2368 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2369 ibdev->ib_dev.uverbs_cmd_mask |=
2370 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2371 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2372 }
2373
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002374 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002375 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002376 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2377 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2378
Yann Droneaudf21519b2013-11-06 23:21:49 +01002379 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2380 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2381 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002382 }
2383
Matan Barak4b664c42015-06-11 16:35:27 +03002384 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2385 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
Eran Ben Elishafbfb6622015-10-15 14:44:42 +03002386 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2387 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
Matan Barak4b664c42015-06-11 16:35:27 +03002388
Shlomo Pongratze605b742012-04-29 17:04:27 +03002389 mlx4_ib_alloc_eqs(dev, ibdev);
2390
Eli Cohenfa417f72010-10-24 21:08:52 -07002391 spin_lock_init(&iboe->lock);
2392
Roland Dreier225c7b12007-05-08 18:00:38 -07002393 if (init_node_data(ibdev))
2394 goto err_map;
2395
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002396 for (i = 0; i < ibdev->num_ports; ++i) {
2397 mutex_init(&ibdev->counters_table[i].mutex);
2398 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2399 }
2400
Moni Shouaa5750092015-02-03 16:48:37 +02002401 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2402 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002403 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002404 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002405 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2406 IB_LINK_LAYER_ETHERNET) {
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002407 err = mlx4_counter_alloc(ibdev->dev, &counter_index);
2408 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002409 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002410 counter_index =
2411 mlx4_get_default_counter_index(dev,
2412 i + 1);
2413 else
2414 allocated = 1;
2415 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2416 counter_index = mlx4_get_default_counter_index(dev,
2417 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002418 }
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002419 new_counter_index = kmalloc(sizeof(*new_counter_index),
2420 GFP_KERNEL);
2421 if (!new_counter_index) {
2422 if (allocated)
2423 mlx4_counter_free(ibdev->dev, counter_index);
2424 goto err_counter;
2425 }
2426 new_counter_index->index = counter_index;
2427 new_counter_index->allocated = allocated;
2428 list_add_tail(&new_counter_index->list,
2429 &ibdev->counters_table[i].counters_list);
2430 ibdev->counters_table[i].default_counter = counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002431 pr_info("counter index %d for port %d allocated %d\n",
2432 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002433 }
Moni Shouaa5750092015-02-03 16:48:37 +02002434 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002435 for (i = 1; i < ibdev->num_ports ; ++i) {
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002436 new_counter_index =
2437 kmalloc(sizeof(struct counter_index),
2438 GFP_KERNEL);
2439 if (!new_counter_index)
2440 goto err_counter;
2441 new_counter_index->index = counter_index;
2442 new_counter_index->allocated = 0;
2443 list_add_tail(&new_counter_index->list,
2444 &ibdev->counters_table[i].counters_list);
2445 ibdev->counters_table[i].default_counter =
2446 counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002447 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002448
Matan Barak41966702014-02-02 17:06:47 +02002449 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2450 ib_num_ports++;
2451
Roland Dreier225c7b12007-05-08 18:00:38 -07002452 spin_lock_init(&ibdev->sm_lock);
2453 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002454 INIT_LIST_HEAD(&ibdev->qp_list);
2455 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002456
Matan Barak41966702014-02-02 17:06:47 +02002457 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2458 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002459 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2460 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2461 MLX4_IB_UC_STEER_QPN_ALIGN,
Eugenia Emantayevddae0342014-12-11 10:57:54 +02002462 &ibdev->steer_qpn_base, 0);
Matan Barakc1c98502013-11-07 15:25:17 +02002463 if (err)
2464 goto err_counter;
2465
2466 ibdev->ib_uc_qpns_bitmap =
2467 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2468 sizeof(long),
2469 GFP_KERNEL);
2470 if (!ibdev->ib_uc_qpns_bitmap) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002471 dev_err(&dev->persist->pdev->dev,
2472 "bit map alloc failed\n");
Matan Barakc1c98502013-11-07 15:25:17 +02002473 goto err_steer_qp_release;
2474 }
2475
2476 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2477
2478 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2479 dev, ibdev->steer_qpn_base,
2480 ibdev->steer_qpn_base +
2481 ibdev->steer_qpn_count - 1);
2482 if (err)
2483 goto err_steer_free_bitmap;
2484 }
2485
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002486 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2487 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2488
Ralph Campbell9a6edb62010-05-06 17:03:25 -07002489 if (ib_register_device(&ibdev->ib_dev, NULL))
Matan Barakc1c98502013-11-07 15:25:17 +02002490 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002491
2492 if (mlx4_ib_mad_init(ibdev))
2493 goto err_reg;
2494
Jack Morgensteinfc065732012-08-03 08:40:42 +00002495 if (mlx4_ib_init_sriov(ibdev))
2496 goto err_mad;
2497
Moni Shoua71a39bb2016-01-14 17:50:40 +02002498 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE ||
2499 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
Moni Shouad487ee72013-12-12 18:03:13 +02002500 if (!iboe->nb.notifier_call) {
2501 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2502 err = register_netdevice_notifier(&iboe->nb);
2503 if (err) {
2504 iboe->nb.notifier_call = NULL;
2505 goto err_notif;
2506 }
2507 }
Moni Shoua71a39bb2016-01-14 17:50:40 +02002508 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2509 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2510 if (err) {
2511 goto err_notif;
2512 }
2513 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002514 }
2515
Jack Morgenstein035b1032012-05-10 23:28:09 +03002516 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002517 if (device_create_file(&ibdev->ib_dev.dev,
Jack Morgenstein035b1032012-05-10 23:28:09 +03002518 mlx4_class_attributes[j]))
Eli Cohenfa417f72010-10-24 21:08:52 -07002519 goto err_notif;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002520 }
2521
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002522 ibdev->ib_active = true;
Jiri Pirko09d4d082016-02-26 17:32:24 +01002523 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2524 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2525 &ibdev->ib_dev);
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002526
Jack Morgenstein54679e12012-08-03 08:40:43 +00002527 if (mlx4_is_mfunc(ibdev->dev))
2528 init_pkeys(ibdev);
2529
Jack Morgenstein3806d082012-08-03 08:40:58 +00002530 /* create paravirt contexts for any VFs which are active */
2531 if (mlx4_is_master(ibdev->dev)) {
2532 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2533 if (j == mlx4_master_func_num(ibdev->dev))
2534 continue;
2535 if (mlx4_is_slave_active(ibdev->dev, j))
2536 do_slave_init(ibdev, j, 1);
2537 }
2538 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002539 return ibdev;
2540
Eli Cohenfa417f72010-10-24 21:08:52 -07002541err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002542 if (ibdev->iboe.nb.notifier_call) {
2543 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2544 pr_warn("failure unregistering notifier\n");
2545 ibdev->iboe.nb.notifier_call = NULL;
2546 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002547 flush_workqueue(wq);
2548
Jack Morgensteinfc065732012-08-03 08:40:42 +00002549 mlx4_ib_close_sriov(ibdev);
2550
2551err_mad:
2552 mlx4_ib_mad_cleanup(ibdev);
2553
Roland Dreier225c7b12007-05-08 18:00:38 -07002554err_reg:
2555 ib_unregister_device(&ibdev->ib_dev);
2556
Matan Barakc1c98502013-11-07 15:25:17 +02002557err_steer_free_bitmap:
2558 kfree(ibdev->ib_uc_qpns_bitmap);
2559
2560err_steer_qp_release:
2561 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2562 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2563 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002564err_counter:
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002565 for (i = 0; i < ibdev->num_ports; ++i)
2566 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2567
Roland Dreier225c7b12007-05-08 18:00:38 -07002568err_map:
2569 iounmap(ibdev->uar_map);
2570
2571err_uar:
2572 mlx4_uar_free(dev, &ibdev->priv_uar);
2573
2574err_pd:
2575 mlx4_pd_free(dev, ibdev->priv_pdn);
2576
2577err_dealloc:
2578 ib_dealloc_device(&ibdev->ib_dev);
2579
2580 return NULL;
2581}
2582
Matan Barakc1c98502013-11-07 15:25:17 +02002583int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2584{
2585 int offset;
2586
2587 WARN_ON(!dev->ib_uc_qpns_bitmap);
2588
2589 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2590 dev->steer_qpn_count,
2591 get_count_order(count));
2592 if (offset < 0)
2593 return offset;
2594
2595 *qpn = dev->steer_qpn_base + offset;
2596 return 0;
2597}
2598
2599void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2600{
2601 if (!qpn ||
2602 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2603 return;
2604
2605 BUG_ON(qpn < dev->steer_qpn_base);
2606
2607 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2608 qpn - dev->steer_qpn_base,
2609 get_count_order(count));
2610}
2611
2612int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2613 int is_attach)
2614{
2615 int err;
2616 size_t flow_size;
2617 struct ib_flow_attr *flow = NULL;
2618 struct ib_flow_spec_ib *ib_spec;
2619
2620 if (is_attach) {
2621 flow_size = sizeof(struct ib_flow_attr) +
2622 sizeof(struct ib_flow_spec_ib);
2623 flow = kzalloc(flow_size, GFP_KERNEL);
2624 if (!flow)
2625 return -ENOMEM;
2626 flow->port = mqp->port;
2627 flow->num_of_specs = 1;
2628 flow->size = flow_size;
2629 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2630 ib_spec->type = IB_FLOW_SPEC_IB;
2631 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2632 /* Add an empty rule for IB L2 */
2633 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2634
2635 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2636 IB_FLOW_DOMAIN_NIC,
2637 MLX4_FS_REGULAR,
2638 &mqp->reg_id);
2639 } else {
2640 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2641 }
2642 kfree(flow);
2643 return err;
2644}
2645
Roland Dreier225c7b12007-05-08 18:00:38 -07002646static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2647{
2648 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2649 int p;
Jiri Pirko09d4d082016-02-26 17:32:24 +01002650 int i;
Roland Dreier225c7b12007-05-08 18:00:38 -07002651
Jiri Pirko09d4d082016-02-26 17:32:24 +01002652 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2653 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
Moni Shoua4bf97152014-08-21 14:28:42 +03002654 ibdev->ib_active = false;
2655 flush_workqueue(wq);
2656
Jack Morgensteinfc065732012-08-03 08:40:42 +00002657 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07002658 mlx4_ib_mad_cleanup(ibdev);
2659 ib_unregister_device(&ibdev->ib_dev);
Eli Cohenfa417f72010-10-24 21:08:52 -07002660 if (ibdev->iboe.nb.notifier_call) {
2661 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002662 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07002663 ibdev->iboe.nb.notifier_call = NULL;
2664 }
Matan Barakc1c98502013-11-07 15:25:17 +02002665
2666 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2667 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2668 ibdev->steer_qpn_count);
2669 kfree(ibdev->ib_uc_qpns_bitmap);
2670 }
2671
Eli Cohenfa417f72010-10-24 21:08:52 -07002672 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002673 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002674 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2675
Eli Cohenfa417f72010-10-24 21:08:52 -07002676 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07002677 mlx4_CLOSE_PORT(dev, p);
2678
Shlomo Pongratze605b742012-04-29 17:04:27 +03002679 mlx4_ib_free_eqs(dev, ibdev);
2680
Roland Dreier225c7b12007-05-08 18:00:38 -07002681 mlx4_uar_free(dev, &ibdev->priv_uar);
2682 mlx4_pd_free(dev, ibdev->priv_pdn);
2683 ib_dealloc_device(&ibdev->ib_dev);
2684}
2685
Jack Morgensteinfc065732012-08-03 08:40:42 +00002686static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2687{
2688 struct mlx4_ib_demux_work **dm = NULL;
2689 struct mlx4_dev *dev = ibdev->dev;
2690 int i;
2691 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02002692 struct mlx4_active_ports actv_ports;
2693 unsigned int ports;
2694 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002695
2696 if (!mlx4_is_master(dev))
2697 return;
2698
Matan Barak449fc482014-03-19 18:11:52 +02002699 actv_ports = mlx4_get_active_ports(dev, slave);
2700 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2701 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2702
2703 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002704 if (!dm) {
2705 pr_err("failed to allocate memory for tunneling qp update\n");
Maninder Singha39a98f2015-07-08 09:43:35 +05302706 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002707 }
2708
Matan Barak449fc482014-03-19 18:11:52 +02002709 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00002710 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2711 if (!dm[i]) {
2712 pr_err("failed to allocate memory for tunneling qp update work struct\n");
Maninder Singha39a98f2015-07-08 09:43:35 +05302713 while (--i >= 0)
2714 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002715 goto out;
2716 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002717 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02002718 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002719 dm[i]->slave = slave;
2720 dm[i]->do_init = do_init;
2721 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04002722 }
2723 /* initialize or tear down tunnel QPs for the slave */
2724 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2725 if (!ibdev->sriov.is_going_down) {
2726 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00002727 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2728 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04002729 } else {
2730 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2731 for (i = 0; i < ports; i++)
2732 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002733 }
2734out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00002735 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002736 return;
2737}
2738
Yishai Hadas35f05da2015-02-08 11:49:34 +02002739static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
2740{
2741 struct mlx4_ib_qp *mqp;
2742 unsigned long flags_qp;
2743 unsigned long flags_cq;
2744 struct mlx4_ib_cq *send_mcq, *recv_mcq;
2745 struct list_head cq_notify_list;
2746 struct mlx4_cq *mcq;
2747 unsigned long flags;
2748
2749 pr_warn("mlx4_ib_handle_catas_error was started\n");
2750 INIT_LIST_HEAD(&cq_notify_list);
2751
2752 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2753 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2754
2755 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2756 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2757 if (mqp->sq.tail != mqp->sq.head) {
2758 send_mcq = to_mcq(mqp->ibqp.send_cq);
2759 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2760 if (send_mcq->mcq.comp &&
2761 mqp->ibqp.send_cq->comp_handler) {
2762 if (!send_mcq->mcq.reset_notify_added) {
2763 send_mcq->mcq.reset_notify_added = 1;
2764 list_add_tail(&send_mcq->mcq.reset_notify,
2765 &cq_notify_list);
2766 }
2767 }
2768 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2769 }
2770 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2771 /* Now, handle the QP's receive queue */
2772 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2773 /* no handling is needed for SRQ */
2774 if (!mqp->ibqp.srq) {
2775 if (mqp->rq.tail != mqp->rq.head) {
2776 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2777 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2778 if (recv_mcq->mcq.comp &&
2779 mqp->ibqp.recv_cq->comp_handler) {
2780 if (!recv_mcq->mcq.reset_notify_added) {
2781 recv_mcq->mcq.reset_notify_added = 1;
2782 list_add_tail(&recv_mcq->mcq.reset_notify,
2783 &cq_notify_list);
2784 }
2785 }
2786 spin_unlock_irqrestore(&recv_mcq->lock,
2787 flags_cq);
2788 }
2789 }
2790 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2791 }
2792
2793 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
2794 mcq->comp(mcq);
2795 }
2796 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2797 pr_warn("mlx4_ib_handle_catas_error ended\n");
2798}
2799
Moni Shouaa5750092015-02-03 16:48:37 +02002800static void handle_bonded_port_state_event(struct work_struct *work)
2801{
2802 struct ib_event_work *ew =
2803 container_of(work, struct ib_event_work, work);
2804 struct mlx4_ib_dev *ibdev = ew->ib_dev;
2805 enum ib_port_state bonded_port_state = IB_PORT_NOP;
2806 int i;
2807 struct ib_event ibev;
2808
2809 kfree(ew);
2810 spin_lock_bh(&ibdev->iboe.lock);
2811 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2812 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02002813 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02002814
Moni Shoua217e8b12015-03-18 16:51:35 +02002815 if (!curr_netdev)
2816 continue;
2817
2818 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02002819 (netif_running(curr_netdev) &&
2820 netif_carrier_ok(curr_netdev)) ?
2821 IB_PORT_ACTIVE : IB_PORT_DOWN;
2822
2823 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2824 curr_port_state : IB_PORT_ACTIVE;
2825 }
2826 spin_unlock_bh(&ibdev->iboe.lock);
2827
2828 ibev.device = &ibdev->ib_dev;
2829 ibev.element.port_num = 1;
2830 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2831 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2832
2833 ib_dispatch_event(&ibev);
2834}
2835
Roland Dreier225c7b12007-05-08 18:00:38 -07002836static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002837 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07002838{
2839 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002840 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002841 struct mlx4_eqe *eqe = NULL;
2842 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002843 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002844
Moni Shouaa5750092015-02-03 16:48:37 +02002845 if (mlx4_is_bonded(dev) &&
2846 ((event == MLX4_DEV_EVENT_PORT_UP) ||
2847 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2848 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2849 if (!ew)
2850 return;
2851 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2852 ew->ib_dev = ibdev;
2853 queue_work(wq, &ew->work);
2854 return;
2855 }
2856
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002857 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2858 eqe = (struct mlx4_eqe *)param;
2859 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00002860 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07002861
2862 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07002863 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00002864 if (p > ibdev->num_ports)
2865 return;
Jack Morgensteina0c64a12012-08-03 08:40:49 +00002866 if (mlx4_is_master(dev) &&
2867 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2868 IB_LINK_LAYER_INFINIBAND) {
2869 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2870 }
Roland Dreier37608ee2008-04-16 21:01:08 -07002871 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07002872 break;
2873
Roland Dreier37608ee2008-04-16 21:01:08 -07002874 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00002875 if (p > ibdev->num_ports)
2876 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07002877 ibev.event = IB_EVENT_PORT_ERR;
2878 break;
2879
2880 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002881 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07002882 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02002883 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002884 break;
2885
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002886 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2887 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2888 if (!ew) {
2889 pr_err("failed to allocate memory for events work\n");
2890 break;
2891 }
2892
2893 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2894 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2895 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002896 /* need to queue only for port owner, which uses GEN_EQE */
2897 if (mlx4_is_master(dev))
2898 queue_work(wq, &ew->work);
2899 else
2900 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002901 return;
2902
Jack Morgensteinfc065732012-08-03 08:40:42 +00002903 case MLX4_DEV_EVENT_SLAVE_INIT:
2904 /* here, p is the slave id */
2905 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02002906 if (mlx4_is_master(dev)) {
2907 int i;
2908
2909 for (i = 1; i <= ibdev->num_ports; i++) {
2910 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2911 == IB_LINK_LAYER_INFINIBAND)
2912 mlx4_ib_slave_alias_guid_event(ibdev,
2913 p, i,
2914 1);
2915 }
2916 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002917 return;
2918
2919 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02002920 if (mlx4_is_master(dev)) {
2921 int i;
2922
2923 for (i = 1; i <= ibdev->num_ports; i++) {
2924 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2925 == IB_LINK_LAYER_INFINIBAND)
2926 mlx4_ib_slave_alias_guid_event(ibdev,
2927 p, i,
2928 0);
2929 }
2930 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002931 /* here, p is the slave id */
2932 do_slave_init(ibdev, p, 0);
2933 return;
2934
Roland Dreier225c7b12007-05-08 18:00:38 -07002935 default:
2936 return;
2937 }
2938
2939 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02002940 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07002941
2942 ib_dispatch_event(&ibev);
2943}
2944
2945static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07002946 .add = mlx4_ib_add,
2947 .remove = mlx4_ib_remove,
2948 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02002949 .protocol = MLX4_PROT_IB_IPV6,
2950 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07002951};
2952
2953static int __init mlx4_ib_init(void)
2954{
Eli Cohenfa417f72010-10-24 21:08:52 -07002955 int err;
2956
2957 wq = create_singlethread_workqueue("mlx4_ib");
2958 if (!wq)
2959 return -ENOMEM;
2960
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002961 err = mlx4_ib_mcg_init();
2962 if (err)
2963 goto clean_wq;
2964
Eli Cohenfa417f72010-10-24 21:08:52 -07002965 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002966 if (err)
2967 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07002968
2969 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002970
2971clean_mcg:
2972 mlx4_ib_mcg_destroy();
2973
2974clean_wq:
2975 destroy_workqueue(wq);
2976 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07002977}
2978
2979static void __exit mlx4_ib_cleanup(void)
2980{
2981 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002982 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07002983 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07002984}
2985
2986module_init(mlx4_ib_init);
2987module_exit(mlx4_ib_cleanup);