blob: 1c7ab6cabbb86989fba8952b8f5e6be645e313dc [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Moni Shouad487ee72013-12-12 18:03:13 +020042#include <net/ipv6.h>
43#include <net/addrconf.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070044
45#include <rdma/ib_smi.h>
46#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070047#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030048#include <rdma/ib_cache.h>
49
50#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070051
52#include <linux/mlx4/driver.h>
53#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030054#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070055
56#include "mlx4_ib.h"
57#include "user.h"
58
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030059#define DRV_NAME MLX4_IB_DRV_NAME
Amir Vadai169a1d82014-02-19 17:47:31 +020060#define DRV_VERSION "2.2-1"
61#define DRV_RELDATE "Feb 2014"
Roland Dreier225c7b12007-05-08 18:00:38 -070062
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030063#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020064#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000065#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030066
Roland Dreier225c7b12007-05-08 18:00:38 -070067MODULE_AUTHOR("Roland Dreier");
68MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_VERSION(DRV_VERSION);
71
Yishai Hadas56c1d232015-02-12 09:49:43 +020072int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000073module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020074MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000075
Roland Dreier68f39482008-02-04 20:20:44 -080076static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070077 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
78 DRV_VERSION " (" DRV_RELDATE ")\n";
79
Jack Morgenstein3806d082012-08-03 08:40:58 +000080static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
81
Eli Cohenfa417f72010-10-24 21:08:52 -070082static struct workqueue_struct *wq;
83
Roland Dreier225c7b12007-05-08 18:00:38 -070084static void init_query_mad(struct ib_smp *mad)
85{
86 mad->base_version = 1;
87 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
88 mad->class_version = 1;
89 mad->method = IB_MGMT_METHOD_GET;
90}
91
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030092static int check_flow_steering_support(struct mlx4_dev *dev)
93{
Matan Barak0a9b7d52013-11-07 15:25:15 +020094 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030095 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096
Matan Barak0a9b7d52013-11-07 15:25:15 +020097 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030098
Matan Barak0a9b7d52013-11-07 15:25:15 +020099 if (dmfs) {
100 int i;
101 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
102 eth_num_ports++;
103 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
104 ib_num_ports++;
105 dmfs &= (!ib_num_ports ||
106 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
107 (!eth_num_ports ||
108 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
109 if (ib_num_ports && mlx4_is_mfunc(dev)) {
110 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
111 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300112 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300113 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200114 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300115}
116
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300117static int num_ib_ports(struct mlx4_dev *dev)
118{
119 int ib_ports = 0;
120 int i;
121
122 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
123 ib_ports++;
124
125 return ib_ports;
126}
127
Moni Shouae26be1b2015-07-30 18:33:29 +0300128static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
129{
130 struct mlx4_ib_dev *ibdev = to_mdev(device);
131 struct net_device *dev;
132
133 rcu_read_lock();
134 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
135
136 if (dev) {
137 if (mlx4_is_bonded(ibdev->dev)) {
138 struct net_device *upper = NULL;
139
140 upper = netdev_master_upper_dev_get_rcu(dev);
141 if (upper) {
142 struct net_device *active;
143
144 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
145 if (active)
146 dev = active;
147 }
148 }
149 }
150 if (dev)
151 dev_hold(dev);
152
153 rcu_read_unlock();
154 return dev;
155}
156
Moni Shoua7e57b852016-01-14 17:50:35 +0200157static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
158 struct mlx4_ib_dev *ibdev,
159 u8 port_num)
Moni Shouae26be1b2015-07-30 18:33:29 +0300160{
161 struct mlx4_cmd_mailbox *mailbox;
162 int err;
163 struct mlx4_dev *dev = ibdev->dev;
164 int i;
165 union ib_gid *gid_tbl;
166
167 mailbox = mlx4_alloc_cmd_mailbox(dev);
168 if (IS_ERR(mailbox))
169 return -ENOMEM;
170
171 gid_tbl = mailbox->buf;
172
173 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
174 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
175
176 err = mlx4_cmd(dev, mailbox->dma,
177 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
178 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
179 MLX4_CMD_WRAPPED);
180 if (mlx4_is_bonded(dev))
181 err += mlx4_cmd(dev, mailbox->dma,
182 MLX4_SET_PORT_GID_TABLE << 8 | 2,
183 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
184 MLX4_CMD_WRAPPED);
185
186 mlx4_free_cmd_mailbox(dev, mailbox);
187 return err;
188}
189
Moni Shoua7e57b852016-01-14 17:50:35 +0200190static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
191 struct mlx4_ib_dev *ibdev,
192 u8 port_num)
193{
194 struct mlx4_cmd_mailbox *mailbox;
195 int err;
196 struct mlx4_dev *dev = ibdev->dev;
197 int i;
198 struct {
199 union ib_gid gid;
200 __be32 rsrvd1[2];
201 __be16 rsrvd2;
202 u8 type;
203 u8 version;
204 __be32 rsrvd3;
205 } *gid_tbl;
206
207 mailbox = mlx4_alloc_cmd_mailbox(dev);
208 if (IS_ERR(mailbox))
209 return -ENOMEM;
210
211 gid_tbl = mailbox->buf;
212 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
213 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
214 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
215 gid_tbl[i].version = 2;
216 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
217 gid_tbl[i].type = 1;
218 else
219 memset(&gid_tbl[i].gid, 0, 12);
220 }
221 }
222
223 err = mlx4_cmd(dev, mailbox->dma,
224 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
225 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
226 MLX4_CMD_WRAPPED);
227 if (mlx4_is_bonded(dev))
228 err += mlx4_cmd(dev, mailbox->dma,
229 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
230 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
231 MLX4_CMD_WRAPPED);
232
233 mlx4_free_cmd_mailbox(dev, mailbox);
234 return err;
235}
236
237static int mlx4_ib_update_gids(struct gid_entry *gids,
238 struct mlx4_ib_dev *ibdev,
239 u8 port_num)
240{
241 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
242 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
243
244 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
245}
246
Moni Shouae26be1b2015-07-30 18:33:29 +0300247static int mlx4_ib_add_gid(struct ib_device *device,
248 u8 port_num,
249 unsigned int index,
250 const union ib_gid *gid,
251 const struct ib_gid_attr *attr,
252 void **context)
253{
254 struct mlx4_ib_dev *ibdev = to_mdev(device);
255 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
256 struct mlx4_port_gid_table *port_gid_table;
257 int free = -1, found = -1;
258 int ret = 0;
259 int hw_update = 0;
260 int i;
261 struct gid_entry *gids = NULL;
262
263 if (!rdma_cap_roce_gid_table(device, port_num))
264 return -EINVAL;
265
266 if (port_num > MLX4_MAX_PORTS)
267 return -EINVAL;
268
269 if (!context)
270 return -EINVAL;
271
272 port_gid_table = &iboe->gids[port_num - 1];
273 spin_lock_bh(&iboe->lock);
274 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
Moni Shouab699a852016-01-14 17:50:33 +0200275 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
276 (port_gid_table->gids[i].gid_type == attr->gid_type)) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300277 found = i;
278 break;
279 }
280 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
281 free = i; /* HW has space */
282 }
283
284 if (found < 0) {
285 if (free < 0) {
286 ret = -ENOSPC;
287 } else {
288 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
289 if (!port_gid_table->gids[free].ctx) {
290 ret = -ENOMEM;
291 } else {
292 *context = port_gid_table->gids[free].ctx;
293 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
Moni Shouab699a852016-01-14 17:50:33 +0200294 port_gid_table->gids[free].gid_type = attr->gid_type;
Moni Shouae26be1b2015-07-30 18:33:29 +0300295 port_gid_table->gids[free].ctx->real_index = free;
296 port_gid_table->gids[free].ctx->refcount = 1;
297 hw_update = 1;
298 }
299 }
300 } else {
301 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
302 *context = ctx;
303 ctx->refcount++;
304 }
305 if (!ret && hw_update) {
306 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
307 if (!gids) {
308 ret = -ENOMEM;
309 } else {
Moni Shouab699a852016-01-14 17:50:33 +0200310 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300311 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
Moni Shouab699a852016-01-14 17:50:33 +0200312 gids[i].gid_type = port_gid_table->gids[i].gid_type;
313 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300314 }
315 }
316 spin_unlock_bh(&iboe->lock);
317
318 if (!ret && hw_update) {
319 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
320 kfree(gids);
321 }
322
323 return ret;
324}
325
326static int mlx4_ib_del_gid(struct ib_device *device,
327 u8 port_num,
328 unsigned int index,
329 void **context)
330{
331 struct gid_cache_context *ctx = *context;
332 struct mlx4_ib_dev *ibdev = to_mdev(device);
333 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
334 struct mlx4_port_gid_table *port_gid_table;
335 int ret = 0;
336 int hw_update = 0;
337 struct gid_entry *gids = NULL;
338
339 if (!rdma_cap_roce_gid_table(device, port_num))
340 return -EINVAL;
341
342 if (port_num > MLX4_MAX_PORTS)
343 return -EINVAL;
344
345 port_gid_table = &iboe->gids[port_num - 1];
346 spin_lock_bh(&iboe->lock);
347 if (ctx) {
348 ctx->refcount--;
349 if (!ctx->refcount) {
350 unsigned int real_index = ctx->real_index;
351
352 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
353 kfree(port_gid_table->gids[real_index].ctx);
354 port_gid_table->gids[real_index].ctx = NULL;
355 hw_update = 1;
356 }
357 }
358 if (!ret && hw_update) {
359 int i;
360
361 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
362 if (!gids) {
363 ret = -ENOMEM;
364 } else {
365 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
366 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
367 }
368 }
369 spin_unlock_bh(&iboe->lock);
370
371 if (!ret && hw_update) {
372 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
373 kfree(gids);
374 }
375 return ret;
376}
377
378int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
379 u8 port_num, int index)
380{
381 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
382 struct gid_cache_context *ctx = NULL;
383 union ib_gid gid;
384 struct mlx4_port_gid_table *port_gid_table;
385 int real_index = -EINVAL;
386 int i;
387 int ret;
388 unsigned long flags;
Moni Shouab699a852016-01-14 17:50:33 +0200389 struct ib_gid_attr attr;
Moni Shouae26be1b2015-07-30 18:33:29 +0300390
391 if (port_num > MLX4_MAX_PORTS)
392 return -EINVAL;
393
394 if (mlx4_is_bonded(ibdev->dev))
395 port_num = 1;
396
397 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
398 return index;
399
Moni Shouab699a852016-01-14 17:50:33 +0200400 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
Moni Shouae26be1b2015-07-30 18:33:29 +0300401 if (ret)
402 return ret;
403
Moni Shouab699a852016-01-14 17:50:33 +0200404 if (attr.ndev)
405 dev_put(attr.ndev);
406
Moni Shouae26be1b2015-07-30 18:33:29 +0300407 if (!memcmp(&gid, &zgid, sizeof(gid)))
408 return -EINVAL;
409
410 spin_lock_irqsave(&iboe->lock, flags);
411 port_gid_table = &iboe->gids[port_num - 1];
412
413 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
Moni Shouab699a852016-01-14 17:50:33 +0200414 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
415 attr.gid_type == port_gid_table->gids[i].gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300416 ctx = port_gid_table->gids[i].ctx;
417 break;
418 }
419 if (ctx)
420 real_index = ctx->real_index;
421 spin_unlock_irqrestore(&iboe->lock, flags);
422 return real_index;
423}
424
Roland Dreier225c7b12007-05-08 18:00:38 -0700425static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300426 struct ib_device_attr *props,
427 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700428{
429 struct mlx4_ib_dev *dev = to_mdev(ibdev);
430 struct ib_smp *in_mad = NULL;
431 struct ib_smp *out_mad = NULL;
432 int err = -ENOMEM;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300433 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300434 struct mlx4_uverbs_ex_query_device cmd;
435 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
436 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700437
Matan Barak4b664c42015-06-11 16:35:27 +0300438 if (uhw->inlen) {
439 if (uhw->inlen < sizeof(cmd))
440 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300441
Matan Barak4b664c42015-06-11 16:35:27 +0300442 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
443 if (err)
444 return err;
445
446 if (cmd.comp_mask)
447 return -EINVAL;
448
449 if (cmd.reserved)
450 return -EINVAL;
451 }
452
453 resp.response_length = offsetof(typeof(resp), response_length) +
454 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700455 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
456 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
457 if (!in_mad || !out_mad)
458 goto out;
459
460 init_query_mad(in_mad);
461 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
462
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000463 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
464 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700465 if (err)
466 goto out;
467
468 memset(props, 0, sizeof *props);
469
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300470 have_ib_ports = num_ib_ports(dev->dev);
471
Roland Dreier225c7b12007-05-08 18:00:38 -0700472 props->fw_ver = dev->dev->caps.fw_ver;
473 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
474 IB_DEVICE_PORT_ACTIVE_EVENT |
475 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700476 IB_DEVICE_RC_RNR_NAK_GEN |
477 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700478 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
479 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
480 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
481 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300482 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700483 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
484 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
485 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700486 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
487 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000488 if (dev->dev->caps.max_gso_sz &&
489 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
490 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700491 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700492 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
493 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
494 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
495 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
496 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
497 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700498 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
499 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000500 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
501 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
502 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
503 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
504 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
505 else
506 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
Matan Barak0a9b7d52013-11-07 15:25:15 +0200507 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300508 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Shani Michaelib4253882013-02-06 16:19:16 +0000509 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700510
Bodong Wang070b3992015-09-22 23:18:11 +0300511 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
512
Roland Dreier225c7b12007-05-08 18:00:38 -0700513 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
514 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200515 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700516 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
517 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
518
519 props->max_mr_size = ~0ull;
520 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200521 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300522 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700523 props->max_sge = min(dev->dev->caps.max_sq_sg,
524 dev->dev->caps.max_rq_sg);
Sagi Grimberga5e14ba2015-10-28 13:28:15 +0200525 props->max_sge_rd = MLX4_MAX_SGE_RD;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700527 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200528 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700529 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
530 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
531 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
532 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200533 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700534 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700535 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200536 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700537 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
538 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
539 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000540 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700541 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700542 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
543 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
544 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
545 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200546 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300547 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
548 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700549
Matan Barak8a7ff142015-07-01 14:31:02 +0300550 if (!mlx4_is_slave(dev->dev))
551 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300552
553 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300554 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300555 if (!err && !mlx4_is_slave(dev->dev)) {
556 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
557 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
558 }
Matan Barak4b664c42015-06-11 16:35:27 +0300559 }
560
561 if (uhw->outlen) {
562 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
563 if (err)
564 goto out;
565 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700566out:
567 kfree(in_mad);
568 kfree(out_mad);
569
570 return err;
571}
572
Eli Cohenfa417f72010-10-24 21:08:52 -0700573static enum rdma_link_layer
574mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
575{
576 struct mlx4_dev *dev = to_mdev(device)->dev;
577
Jack Morgenstein65dab252011-12-13 04:10:41 +0000578 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700579 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
580}
581
582static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000583 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700584{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200585 struct ib_smp *in_mad = NULL;
586 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300587 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000588 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200589 int err = -ENOMEM;
590
591 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
592 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
593 if (!in_mad || !out_mad)
594 goto out;
595
596 init_query_mad(in_mad);
597 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
598 in_mad->attr_mod = cpu_to_be32(port);
599
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000600 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
601 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
602
603 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200604 in_mad, out_mad);
605 if (err)
606 goto out;
607
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300608
Eli Cohenfa417f72010-10-24 21:08:52 -0700609 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
610 props->lmc = out_mad->data[34] & 0x7;
611 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
612 props->sm_sl = out_mad->data[36] & 0xf;
613 props->state = out_mad->data[32] & 0xf;
614 props->phys_state = out_mad->data[33] >> 4;
615 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000616 if (netw_view)
617 props->gid_tbl_len = out_mad->data[50];
618 else
619 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700620 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
621 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
622 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
623 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
624 props->active_width = out_mad->data[31] & 0xf;
625 props->active_speed = out_mad->data[35] >> 4;
626 props->max_mtu = out_mad->data[41] & 0xf;
627 props->active_mtu = out_mad->data[36] >> 4;
628 props->subnet_timeout = out_mad->data[51] & 0x1f;
629 props->max_vl_num = out_mad->data[37] >> 4;
630 props->init_type_reply = out_mad->data[41] >> 4;
631
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300632 /* Check if extended speeds (EDR/FDR/...) are supported */
633 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
634 ext_active_speed = out_mad->data[62] >> 4;
635
636 switch (ext_active_speed) {
637 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200638 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300639 break;
640 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200641 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300642 break;
643 }
644 }
645
646 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200647 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200648 init_query_mad(in_mad);
649 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
650 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300651
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000652 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200653 NULL, NULL, in_mad, out_mad);
654 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200655 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300656
Or Gerlitz8154c072012-03-06 15:50:50 +0200657 /* Checking LinkSpeedActive for FDR-10 */
658 if (out_mad->data[15] & 0x1)
659 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300660 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300661
662 /* Avoid wrong speed value returned by FW if the IB link is down. */
663 if (props->state == IB_PORT_DOWN)
664 props->active_speed = IB_SPEED_SDR;
665
Or Gerlitza9c766b2012-01-11 19:00:29 +0200666out:
667 kfree(in_mad);
668 kfree(out_mad);
669 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700670}
671
672static u8 state_to_phys_state(enum ib_port_state state)
673{
674 return state == IB_PORT_ACTIVE ? 5 : 3;
675}
676
677static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000678 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700679{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200680
681 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
682 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700683 struct net_device *ndev;
684 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200685 struct mlx4_cmd_mailbox *mailbox;
686 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200687 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700688
Or Gerlitza9c766b2012-01-11 19:00:29 +0200689 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
690 if (IS_ERR(mailbox))
691 return PTR_ERR(mailbox);
692
693 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
694 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
695 MLX4_CMD_WRAPPED);
696 if (err)
697 goto out;
698
699 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
700 IB_WIDTH_4X : IB_WIDTH_1X;
Or Gerlitz2e966912012-02-28 18:49:50 +0200701 props->active_speed = IB_SPEED_QDR;
Moni Shouab4a26a22014-02-09 11:54:34 +0200702 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200703 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
704 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700705 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200706 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200707 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700708 props->state = IB_PORT_DOWN;
709 props->phys_state = state_to_phys_state(props->state);
710 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300711 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700712 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300713 if (ndev && is_bonded) {
714 rcu_read_lock(); /* required to get upper dev */
715 ndev = netdev_master_upper_dev_get_rcu(ndev);
716 rcu_read_unlock();
717 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700718 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200719 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700720
721 tmp = iboe_get_mtu(ndev->mtu);
722 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
723
Eli Cohen21d606092010-11-11 21:05:58 +0000724 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700725 IB_PORT_ACTIVE : IB_PORT_DOWN;
726 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200727out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300728 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200729out:
730 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
731 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700732}
733
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000734int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
735 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700736{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200737 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700738
739 memset(props, 0, sizeof *props);
740
Eli Cohenfa417f72010-10-24 21:08:52 -0700741 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000742 ib_link_query_port(ibdev, port, props, netw_view) :
743 eth_link_query_port(ibdev, port, props, netw_view);
Roland Dreier225c7b12007-05-08 18:00:38 -0700744
745 return err;
746}
747
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000748static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
749 struct ib_port_attr *props)
750{
751 /* returns host view */
752 return __mlx4_ib_query_port(ibdev, port, props, 0);
753}
754
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000755int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
756 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700757{
758 struct ib_smp *in_mad = NULL;
759 struct ib_smp *out_mad = NULL;
760 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000761 struct mlx4_ib_dev *dev = to_mdev(ibdev);
762 int clear = 0;
763 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700764
765 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
766 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
767 if (!in_mad || !out_mad)
768 goto out;
769
770 init_query_mad(in_mad);
771 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
772 in_mad->attr_mod = cpu_to_be32(port);
773
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000774 if (mlx4_is_mfunc(dev->dev) && netw_view)
775 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
776
777 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700778 if (err)
779 goto out;
780
781 memcpy(gid->raw, out_mad->data + 8, 8);
782
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000783 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
784 if (index) {
785 /* For any index > 0, return the null guid */
786 err = 0;
787 clear = 1;
788 goto out;
789 }
790 }
791
Roland Dreier225c7b12007-05-08 18:00:38 -0700792 init_query_mad(in_mad);
793 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
794 in_mad->attr_mod = cpu_to_be32(index / 8);
795
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000796 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000797 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700798 if (err)
799 goto out;
800
801 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
802
803out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000804 if (clear)
805 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700806 kfree(in_mad);
807 kfree(out_mad);
808 return err;
809}
810
Eli Cohenfa417f72010-10-24 21:08:52 -0700811static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
812 union ib_gid *gid)
813{
Moni Shoua5070cd22015-07-30 18:33:30 +0300814 int ret;
815
816 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000817 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Moni Shoua5070cd22015-07-30 18:33:30 +0300818
819 if (!rdma_protocol_roce(ibdev, port))
820 return -ENODEV;
821
822 if (!rdma_cap_roce_gid_table(ibdev, port))
823 return -ENODEV;
824
Matan Barak55ee3ab2015-10-15 18:38:45 +0300825 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
Moni Shoua5070cd22015-07-30 18:33:30 +0300826 if (ret == -EAGAIN) {
827 memcpy(gid, &zgid, sizeof(*gid));
828 return 0;
829 }
830
831 return ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700832}
833
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000834int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
835 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700836{
837 struct ib_smp *in_mad = NULL;
838 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000839 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700840 int err = -ENOMEM;
841
842 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
843 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
844 if (!in_mad || !out_mad)
845 goto out;
846
847 init_query_mad(in_mad);
848 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
849 in_mad->attr_mod = cpu_to_be32(index / 32);
850
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000851 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
852 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
853
854 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
855 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700856 if (err)
857 goto out;
858
859 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
860
861out:
862 kfree(in_mad);
863 kfree(out_mad);
864 return err;
865}
866
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000867static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
868{
869 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
870}
871
Roland Dreier225c7b12007-05-08 18:00:38 -0700872static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
873 struct ib_device_modify *props)
874{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000875 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000876 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000877
Roland Dreier225c7b12007-05-08 18:00:38 -0700878 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
879 return -EOPNOTSUPP;
880
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000881 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
882 return 0;
883
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000884 if (mlx4_is_slave(to_mdev(ibdev)->dev))
885 return -EOPNOTSUPP;
886
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000887 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000888 memcpy(ibdev->node_desc, props->node_desc, 64);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000889 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000890
891 /*
892 * If possible, pass node desc to FW, so it can generate
893 * a 144 trap. If cmd fails, just ignore.
894 */
895 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
896 if (IS_ERR(mailbox))
897 return 0;
898
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000899 memcpy(mailbox->buf, props->node_desc, 64);
900 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000901 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000902
903 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -0700904
905 return 0;
906}
907
Jack Morgenstein61565012014-05-29 16:31:01 +0300908static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
909 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -0700910{
911 struct mlx4_cmd_mailbox *mailbox;
912 int err;
913
914 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
915 if (IS_ERR(mailbox))
916 return PTR_ERR(mailbox);
917
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700918 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
919 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
920 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
921 } else {
922 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
923 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
924 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700925
Ido Shamaya130b592015-04-02 16:31:19 +0300926 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
927 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
928 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700929
930 mlx4_free_cmd_mailbox(dev->dev, mailbox);
931 return err;
932}
933
934static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
935 struct ib_port_modify *props)
936{
Jack Morgenstein61565012014-05-29 16:31:01 +0300937 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
938 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -0700939 struct ib_port_attr attr;
940 u32 cap_mask;
941 int err;
942
Jack Morgenstein61565012014-05-29 16:31:01 +0300943 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
944 * of whether port link layer is ETH or IB. For ETH ports, qkey
945 * violations and port capabilities are not meaningful.
946 */
947 if (is_eth)
948 return 0;
949
950 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -0700951
952 err = mlx4_ib_query_port(ibdev, port, &attr);
953 if (err)
954 goto out;
955
956 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
957 ~props->clr_port_cap_mask;
958
Jack Morgenstein61565012014-05-29 16:31:01 +0300959 err = mlx4_ib_SET_PORT(mdev, port,
960 !!(mask & IB_PORT_RESET_QKEY_CNTR),
961 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -0700962
963out:
964 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
965 return err;
966}
967
968static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
969 struct ib_udata *udata)
970{
971 struct mlx4_ib_dev *dev = to_mdev(ibdev);
972 struct mlx4_ib_ucontext *context;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000973 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -0700974 struct mlx4_ib_alloc_ucontext_resp resp;
975 int err;
976
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -0700977 if (!dev->ib_active)
978 return ERR_PTR(-EAGAIN);
979
Or Gerlitz08ff3232012-10-21 14:59:24 +0000980 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
981 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
982 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
983 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
984 } else {
985 resp.dev_caps = dev->dev->caps.userspace_caps;
986 resp.qp_tab_size = dev->dev->caps.num_qps;
987 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
988 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
989 resp.cqe_size = dev->dev->caps.cqe_size;
990 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700991
Yishai Hadasae184dd2015-08-13 18:32:06 +0300992 context = kzalloc(sizeof(*context), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700993 if (!context)
994 return ERR_PTR(-ENOMEM);
995
996 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
997 if (err) {
998 kfree(context);
999 return ERR_PTR(err);
1000 }
1001
1002 INIT_LIST_HEAD(&context->db_page_list);
1003 mutex_init(&context->db_page_mutex);
1004
Or Gerlitz08ff3232012-10-21 14:59:24 +00001005 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1006 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1007 else
1008 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1009
Roland Dreier225c7b12007-05-08 18:00:38 -07001010 if (err) {
1011 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1012 kfree(context);
1013 return ERR_PTR(-EFAULT);
1014 }
1015
1016 return &context->ibucontext;
1017}
1018
1019static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1020{
1021 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1022
1023 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1024 kfree(context);
1025
1026 return 0;
1027}
1028
Yishai Hadasae184dd2015-08-13 18:32:06 +03001029static void mlx4_ib_vma_open(struct vm_area_struct *area)
1030{
1031 /* vma_open is called when a new VMA is created on top of our VMA.
1032 * This is done through either mremap flow or split_vma (usually due
1033 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1034 * vma, as this VMA is strongly hardware related. Therefore we set the
1035 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1036 * calling us again and trying to do incorrect actions. We assume that
1037 * the original vma size is exactly a single page that there will be no
1038 * "splitting" operations on.
1039 */
1040 area->vm_ops = NULL;
1041}
1042
1043static void mlx4_ib_vma_close(struct vm_area_struct *area)
1044{
1045 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1046
1047 /* It's guaranteed that all VMAs opened on a FD are closed before the
1048 * file itself is closed, therefore no sync is needed with the regular
1049 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1050 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1051 * The close operation is usually called under mm->mmap_sem except when
1052 * process is exiting. The exiting case is handled explicitly as part
1053 * of mlx4_ib_disassociate_ucontext.
1054 */
1055 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1056 area->vm_private_data;
1057
1058 /* set the vma context pointer to null in the mlx4_ib driver's private
1059 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1060 */
1061 mlx4_ib_vma_priv_data->vma = NULL;
1062}
1063
1064static const struct vm_operations_struct mlx4_ib_vm_ops = {
1065 .open = mlx4_ib_vma_open,
1066 .close = mlx4_ib_vma_close
1067};
1068
1069static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1070{
1071 int i;
1072 int ret = 0;
1073 struct vm_area_struct *vma;
1074 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1075 struct task_struct *owning_process = NULL;
1076 struct mm_struct *owning_mm = NULL;
1077
1078 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1079 if (!owning_process)
1080 return;
1081
1082 owning_mm = get_task_mm(owning_process);
1083 if (!owning_mm) {
1084 pr_info("no mm, disassociate ucontext is pending task termination\n");
1085 while (1) {
1086 /* make sure that task is dead before returning, it may
1087 * prevent a rare case of module down in parallel to a
1088 * call to mlx4_ib_vma_close.
1089 */
1090 put_task_struct(owning_process);
1091 msleep(1);
1092 owning_process = get_pid_task(ibcontext->tgid,
1093 PIDTYPE_PID);
1094 if (!owning_process ||
1095 owning_process->state == TASK_DEAD) {
1096 pr_info("disassociate ucontext done, task was terminated\n");
1097 /* in case task was dead need to release the task struct */
1098 if (owning_process)
1099 put_task_struct(owning_process);
1100 return;
1101 }
1102 }
1103 }
1104
1105 /* need to protect from a race on closing the vma as part of
1106 * mlx4_ib_vma_close().
1107 */
1108 down_read(&owning_mm->mmap_sem);
1109 for (i = 0; i < HW_BAR_COUNT; i++) {
1110 vma = context->hw_bar_info[i].vma;
1111 if (!vma)
1112 continue;
1113
1114 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1115 context->hw_bar_info[i].vma->vm_start,
1116 PAGE_SIZE);
1117 if (ret) {
1118 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1119 BUG_ON(1);
1120 }
1121
1122 /* context going to be destroyed, should not access ops any more */
1123 context->hw_bar_info[i].vma->vm_ops = NULL;
1124 }
1125
1126 up_read(&owning_mm->mmap_sem);
1127 mmput(owning_mm);
1128 put_task_struct(owning_process);
1129}
1130
1131static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1132 struct mlx4_ib_vma_private_data *vma_private_data)
1133{
1134 vma_private_data->vma = vma;
1135 vma->vm_private_data = vma_private_data;
1136 vma->vm_ops = &mlx4_ib_vm_ops;
1137}
1138
Roland Dreier225c7b12007-05-08 18:00:38 -07001139static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1140{
1141 struct mlx4_ib_dev *dev = to_mdev(context->device);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001142 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
Roland Dreier225c7b12007-05-08 18:00:38 -07001143
1144 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1145 return -EINVAL;
1146
1147 if (vma->vm_pgoff == 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001148 /* We prevent double mmaping on same context */
1149 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1150 return -EINVAL;
1151
Roland Dreier225c7b12007-05-08 18:00:38 -07001152 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1153
1154 if (io_remap_pfn_range(vma, vma->vm_start,
1155 to_mucontext(context)->uar.pfn,
1156 PAGE_SIZE, vma->vm_page_prot))
1157 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001158
1159 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1160
Roland Dreier225c7b12007-05-08 18:00:38 -07001161 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001162 /* We prevent double mmaping on same context */
1163 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1164 return -EINVAL;
1165
Roland Dreiere1d60ec2009-03-30 08:31:05 -07001166 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Roland Dreier225c7b12007-05-08 18:00:38 -07001167
1168 if (io_remap_pfn_range(vma, vma->vm_start,
1169 to_mucontext(context)->uar.pfn +
1170 dev->dev->caps.num_uars,
1171 PAGE_SIZE, vma->vm_page_prot))
1172 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001173
1174 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1175
Matan Barak52033cf2015-06-11 16:35:26 +03001176 } else if (vma->vm_pgoff == 3) {
1177 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001178 int ret;
1179
1180 /* We prevent double mmaping on same context */
1181 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1182 return -EINVAL;
1183
1184 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001185
1186 if (ret)
1187 return ret;
1188
1189 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1190 if (io_remap_pfn_range(vma, vma->vm_start,
1191 (pci_resource_start(dev->dev->persist->pdev,
1192 params.bar) +
1193 params.offset)
1194 >> PAGE_SHIFT,
1195 PAGE_SIZE, vma->vm_page_prot))
1196 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001197
1198 mlx4_ib_set_vma_data(vma,
1199 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
Matan Barak52033cf2015-06-11 16:35:26 +03001200 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -07001201 return -EINVAL;
Matan Barak52033cf2015-06-11 16:35:26 +03001202 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001203
1204 return 0;
1205}
1206
1207static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1208 struct ib_ucontext *context,
1209 struct ib_udata *udata)
1210{
1211 struct mlx4_ib_pd *pd;
1212 int err;
1213
1214 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1215 if (!pd)
1216 return ERR_PTR(-ENOMEM);
1217
1218 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1219 if (err) {
1220 kfree(pd);
1221 return ERR_PTR(err);
1222 }
1223
1224 if (context)
1225 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1226 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1227 kfree(pd);
1228 return ERR_PTR(-EFAULT);
1229 }
1230
1231 return &pd->ibpd;
1232}
1233
1234static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1235{
1236 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1237 kfree(pd);
1238
1239 return 0;
1240}
1241
Sean Hefty012a8ff2011-06-02 09:01:33 -07001242static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1243 struct ib_ucontext *context,
1244 struct ib_udata *udata)
1245{
1246 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001247 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001248 int err;
1249
1250 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1251 return ERR_PTR(-ENOSYS);
1252
1253 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1254 if (!xrcd)
1255 return ERR_PTR(-ENOMEM);
1256
1257 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1258 if (err)
1259 goto err1;
1260
1261 xrcd->pd = ib_alloc_pd(ibdev);
1262 if (IS_ERR(xrcd->pd)) {
1263 err = PTR_ERR(xrcd->pd);
1264 goto err2;
1265 }
1266
Matan Barak8e372102015-06-11 16:35:21 +03001267 cq_attr.cqe = 1;
1268 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001269 if (IS_ERR(xrcd->cq)) {
1270 err = PTR_ERR(xrcd->cq);
1271 goto err3;
1272 }
1273
1274 return &xrcd->ibxrcd;
1275
1276err3:
1277 ib_dealloc_pd(xrcd->pd);
1278err2:
1279 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1280err1:
1281 kfree(xrcd);
1282 return ERR_PTR(err);
1283}
1284
1285static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1286{
1287 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1288 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1289 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1290 kfree(xrcd);
1291
1292 return 0;
1293}
1294
Eli Cohenfa417f72010-10-24 21:08:52 -07001295static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1296{
1297 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1298 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1299 struct mlx4_ib_gid_entry *ge;
1300
1301 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1302 if (!ge)
1303 return -ENOMEM;
1304
1305 ge->gid = *gid;
1306 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1307 ge->port = mqp->port;
1308 ge->added = 1;
1309 }
1310
1311 mutex_lock(&mqp->mutex);
1312 list_add_tail(&ge->list, &mqp->gid_list);
1313 mutex_unlock(&mqp->mutex);
1314
1315 return 0;
1316}
1317
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03001318static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1319 struct mlx4_ib_counters *ctr_table)
1320{
1321 struct counter_index *counter, *tmp_count;
1322
1323 mutex_lock(&ctr_table->mutex);
1324 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1325 list) {
1326 if (counter->allocated)
1327 mlx4_counter_free(ibdev->dev, counter->index);
1328 list_del(&counter->list);
1329 kfree(counter);
1330 }
1331 mutex_unlock(&ctr_table->mutex);
1332}
1333
Eli Cohenfa417f72010-10-24 21:08:52 -07001334int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1335 union ib_gid *gid)
1336{
Eli Cohenfa417f72010-10-24 21:08:52 -07001337 struct net_device *ndev;
1338 int ret = 0;
1339
1340 if (!mqp->port)
1341 return 0;
1342
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001343 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001344 ndev = mdev->iboe.netdevs[mqp->port - 1];
1345 if (ndev)
1346 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001347 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001348
1349 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001350 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001351 dev_put(ndev);
1352 }
1353
1354 return ret;
1355}
1356
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001357struct mlx4_ib_steering {
1358 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001359 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001360 union ib_gid gid;
1361};
1362
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001363static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001364 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001365 union ib_flow_spec *ib_spec,
1366 struct _rule_hw *mlx4_spec)
1367{
1368 enum mlx4_net_trans_rule_id type;
1369
1370 switch (ib_spec->type) {
1371 case IB_FLOW_SPEC_ETH:
1372 type = MLX4_NET_TRANS_RULE_ID_ETH;
1373 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1374 ETH_ALEN);
1375 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1376 ETH_ALEN);
1377 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1378 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1379 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001380 case IB_FLOW_SPEC_IB:
1381 type = MLX4_NET_TRANS_RULE_ID_IB;
1382 mlx4_spec->ib.l3_qpn =
1383 cpu_to_be32(qp_num);
1384 mlx4_spec->ib.qpn_mask =
1385 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1386 break;
1387
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001388
1389 case IB_FLOW_SPEC_IPV4:
1390 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1391 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1392 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1393 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1394 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1395 break;
1396
1397 case IB_FLOW_SPEC_TCP:
1398 case IB_FLOW_SPEC_UDP:
1399 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1400 MLX4_NET_TRANS_RULE_ID_TCP :
1401 MLX4_NET_TRANS_RULE_ID_UDP;
1402 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1403 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1404 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1405 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1406 break;
1407
1408 default:
1409 return -EINVAL;
1410 }
1411 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1412 mlx4_hw_rule_sz(dev, type) < 0)
1413 return -EINVAL;
1414 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1415 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1416 return mlx4_hw_rule_sz(dev, type);
1417}
1418
Matan Baraka37a1a42013-11-07 15:25:16 +02001419struct default_rules {
1420 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1421 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1422 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1423 __u8 link_layer;
1424};
1425static const struct default_rules default_table[] = {
1426 {
1427 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1428 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1429 .rules_create_list = {IB_FLOW_SPEC_IB},
1430 .link_layer = IB_LINK_LAYER_INFINIBAND
1431 }
1432};
1433
1434static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1435 struct ib_flow_attr *flow_attr)
1436{
1437 int i, j, k;
1438 void *ib_flow;
1439 const struct default_rules *pdefault_rules = default_table;
1440 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1441
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001442 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001443 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1444 memset(&field_types, 0, sizeof(field_types));
1445
1446 if (link_layer != pdefault_rules->link_layer)
1447 continue;
1448
1449 ib_flow = flow_attr + 1;
1450 /* we assume the specs are sorted */
1451 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1452 j < flow_attr->num_of_specs; k++) {
1453 union ib_flow_spec *current_flow =
1454 (union ib_flow_spec *)ib_flow;
1455
1456 /* same layer but different type */
1457 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1458 (pdefault_rules->mandatory_fields[k] &
1459 IB_FLOW_SPEC_LAYER_MASK)) &&
1460 (current_flow->type !=
1461 pdefault_rules->mandatory_fields[k]))
1462 goto out;
1463
1464 /* same layer, try match next one */
1465 if (current_flow->type ==
1466 pdefault_rules->mandatory_fields[k]) {
1467 j++;
1468 ib_flow +=
1469 ((union ib_flow_spec *)ib_flow)->size;
1470 }
1471 }
1472
1473 ib_flow = flow_attr + 1;
1474 for (j = 0; j < flow_attr->num_of_specs;
1475 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1476 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1477 /* same layer and same type */
1478 if (((union ib_flow_spec *)ib_flow)->type ==
1479 pdefault_rules->mandatory_not_fields[k])
1480 goto out;
1481
1482 return i;
1483 }
1484out:
1485 return -1;
1486}
1487
1488static int __mlx4_ib_create_default_rules(
1489 struct mlx4_ib_dev *mdev,
1490 struct ib_qp *qp,
1491 const struct default_rules *pdefault_rules,
1492 struct _rule_hw *mlx4_spec) {
1493 int size = 0;
1494 int i;
1495
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001496 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001497 int ret;
1498 union ib_flow_spec ib_spec;
1499 switch (pdefault_rules->rules_create_list[i]) {
1500 case 0:
1501 /* no rule */
1502 continue;
1503 case IB_FLOW_SPEC_IB:
1504 ib_spec.type = IB_FLOW_SPEC_IB;
1505 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1506
1507 break;
1508 default:
1509 /* invalid rule */
1510 return -EINVAL;
1511 }
1512 /* We must put empty rule, qpn is being ignored */
1513 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1514 mlx4_spec);
1515 if (ret < 0) {
1516 pr_info("invalid parsing\n");
1517 return -EINVAL;
1518 }
1519
1520 mlx4_spec = (void *)mlx4_spec + ret;
1521 size += ret;
1522 }
1523 return size;
1524}
1525
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001526static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1527 int domain,
1528 enum mlx4_net_trans_promisc_mode flow_type,
1529 u64 *reg_id)
1530{
1531 int ret, i;
1532 int size = 0;
1533 void *ib_flow;
1534 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1535 struct mlx4_cmd_mailbox *mailbox;
1536 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001537 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001538
1539 static const u16 __mlx4_domain[] = {
1540 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1541 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1542 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1543 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1544 };
1545
1546 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1547 pr_err("Invalid priority value %d\n", flow_attr->priority);
1548 return -EINVAL;
1549 }
1550
1551 if (domain >= IB_FLOW_DOMAIN_NUM) {
1552 pr_err("Invalid domain value %d\n", domain);
1553 return -EINVAL;
1554 }
1555
1556 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1557 return -EINVAL;
1558
1559 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1560 if (IS_ERR(mailbox))
1561 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001562 ctrl = mailbox->buf;
1563
1564 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1565 flow_attr->priority);
1566 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1567 ctrl->port = flow_attr->port;
1568 ctrl->qpn = cpu_to_be32(qp->qp_num);
1569
1570 ib_flow = flow_attr + 1;
1571 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001572 /* Add default flows */
1573 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1574 if (default_flow >= 0) {
1575 ret = __mlx4_ib_create_default_rules(
1576 mdev, qp, default_table + default_flow,
1577 mailbox->buf + size);
1578 if (ret < 0) {
1579 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1580 return -EINVAL;
1581 }
1582 size += ret;
1583 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001584 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001585 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1586 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001587 if (ret < 0) {
1588 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1589 return -EINVAL;
1590 }
1591 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1592 size += ret;
1593 }
1594
1595 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1596 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Matan Barak48564132015-05-31 09:30:15 +03001597 MLX4_CMD_WRAPPED);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001598 if (ret == -ENOMEM)
1599 pr_err("mcg table is full. Fail to register network rule.\n");
1600 else if (ret == -ENXIO)
1601 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1602 else if (ret)
1603 pr_err("Invalid argumant. Fail to register network rule.\n");
1604
1605 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1606 return ret;
1607}
1608
1609static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1610{
1611 int err;
1612 err = mlx4_cmd(dev, reg_id, 0, 0,
1613 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Matan Barak48564132015-05-31 09:30:15 +03001614 MLX4_CMD_WRAPPED);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001615 if (err)
1616 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1617 reg_id);
1618 return err;
1619}
1620
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001621static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1622 u64 *reg_id)
1623{
1624 void *ib_flow;
1625 union ib_flow_spec *ib_spec;
1626 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1627 int err = 0;
1628
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001629 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1630 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001631 return 0; /* do nothing */
1632
1633 ib_flow = flow_attr + 1;
1634 ib_spec = (union ib_flow_spec *)ib_flow;
1635
1636 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1637 return 0; /* do nothing */
1638
1639 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1640 flow_attr->port, qp->qp_num,
1641 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1642 reg_id);
1643 return err;
1644}
1645
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001646static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1647 struct ib_flow_attr *flow_attr,
1648 int domain)
1649{
Moni Shoua146d6e12015-02-03 16:48:38 +02001650 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001651 struct mlx4_ib_flow *mflow;
1652 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001653 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1654 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001655
1656 memset(type, 0, sizeof(type));
1657
1658 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1659 if (!mflow) {
1660 err = -ENOMEM;
1661 goto err_free;
1662 }
1663
1664 switch (flow_attr->type) {
1665 case IB_FLOW_ATTR_NORMAL:
1666 type[0] = MLX4_FS_REGULAR;
1667 break;
1668
1669 case IB_FLOW_ATTR_ALL_DEFAULT:
1670 type[0] = MLX4_FS_ALL_DEFAULT;
1671 break;
1672
1673 case IB_FLOW_ATTR_MC_DEFAULT:
1674 type[0] = MLX4_FS_MC_DEFAULT;
1675 break;
1676
1677 case IB_FLOW_ATTR_SNIFFER:
1678 type[0] = MLX4_FS_UC_SNIFFER;
1679 type[1] = MLX4_FS_MC_SNIFFER;
1680 break;
1681
1682 default:
1683 err = -EINVAL;
1684 goto err_free;
1685 }
1686
1687 while (i < ARRAY_SIZE(type) && type[i]) {
1688 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001689 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001690 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001691 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001692 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001693 /* Application always sees one port so the mirror rule
1694 * must be on port #2
1695 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001696 flow_attr->port = 2;
1697 err = __mlx4_ib_create_flow(qp, flow_attr,
1698 domain, type[j],
1699 &mflow->reg_id[j].mirror);
1700 flow_attr->port = 1;
1701 if (err)
1702 goto err_create_flow;
1703 j++;
1704 }
1705
Roland Dreier11562562015-05-29 23:11:27 -07001706 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001707 }
1708
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001709 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001710 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1711 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001712 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001713 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001714
Moni Shoua146d6e12015-02-03 16:48:38 +02001715 if (is_bonded) {
1716 flow_attr->port = 2;
1717 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1718 &mflow->reg_id[j].mirror);
1719 flow_attr->port = 1;
1720 if (err)
1721 goto err_create_flow;
1722 j++;
1723 }
1724 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001725 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001726 }
1727
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001728 return &mflow->ibflow;
1729
Or Gerlitz571e1b22014-10-30 15:59:28 +02001730err_create_flow:
1731 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001732 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1733 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001734 i--;
1735 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001736
1737 while (j) {
1738 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1739 mflow->reg_id[j].mirror);
1740 j--;
1741 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001742err_free:
1743 kfree(mflow);
1744 return ERR_PTR(err);
1745}
1746
1747static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1748{
1749 int err, ret = 0;
1750 int i = 0;
1751 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1752 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1753
Moni Shoua146d6e12015-02-03 16:48:38 +02001754 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1755 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001756 if (err)
1757 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001758 if (mflow->reg_id[i].mirror) {
1759 err = __mlx4_ib_destroy_flow(mdev->dev,
1760 mflow->reg_id[i].mirror);
1761 if (err)
1762 ret = err;
1763 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001764 i++;
1765 }
1766
1767 kfree(mflow);
1768 return ret;
1769}
1770
Roland Dreier225c7b12007-05-08 18:00:38 -07001771static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1772{
Eli Cohenfa417f72010-10-24 21:08:52 -07001773 int err;
1774 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001775 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001776 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001777 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001778 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001779 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001780
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001781 if (mdev->dev->caps.steering_mode ==
1782 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1783 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1784 if (!ib_steering)
1785 return -ENOMEM;
1786 }
1787
1788 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1789 !!(mqp->flags &
1790 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02001791 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001792 if (err) {
1793 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001794 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001795 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001796
Moni Shoua146d6e12015-02-03 16:48:38 +02001797 reg_id.mirror = 0;
1798 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001799 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1800 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02001801 !!(mqp->flags &
1802 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1803 prot, &reg_id.mirror);
1804 if (err)
1805 goto err_add;
1806 }
1807
Eli Cohenfa417f72010-10-24 21:08:52 -07001808 err = add_gid_entry(ibqp, gid);
1809 if (err)
1810 goto err_add;
1811
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001812 if (ib_steering) {
1813 memcpy(ib_steering->gid.raw, gid->raw, 16);
1814 ib_steering->reg_id = reg_id;
1815 mutex_lock(&mqp->mutex);
1816 list_add(&ib_steering->list, &mqp->steering_rules);
1817 mutex_unlock(&mqp->mutex);
1818 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001819 return 0;
1820
1821err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001822 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001823 prot, reg_id.id);
1824 if (reg_id.mirror)
1825 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1826 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001827err_malloc:
1828 kfree(ib_steering);
1829
Eli Cohenfa417f72010-10-24 21:08:52 -07001830 return err;
1831}
1832
1833static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1834{
1835 struct mlx4_ib_gid_entry *ge;
1836 struct mlx4_ib_gid_entry *tmp;
1837 struct mlx4_ib_gid_entry *ret = NULL;
1838
1839 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1840 if (!memcmp(raw, ge->gid.raw, 16)) {
1841 ret = ge;
1842 break;
1843 }
1844 }
1845
1846 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07001847}
1848
1849static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1850{
Eli Cohenfa417f72010-10-24 21:08:52 -07001851 int err;
1852 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001853 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001854 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07001855 struct net_device *ndev;
1856 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02001857 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001858 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07001859
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001860 if (mdev->dev->caps.steering_mode ==
1861 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1862 struct mlx4_ib_steering *ib_steering;
1863
1864 mutex_lock(&mqp->mutex);
1865 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1866 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1867 list_del(&ib_steering->list);
1868 break;
1869 }
1870 }
1871 mutex_unlock(&mqp->mutex);
1872 if (&ib_steering->list == &mqp->steering_rules) {
1873 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1874 return -EINVAL;
1875 }
1876 reg_id = ib_steering->reg_id;
1877 kfree(ib_steering);
1878 }
1879
1880 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001881 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07001882 if (err)
1883 return err;
1884
Moni Shoua146d6e12015-02-03 16:48:38 +02001885 if (mlx4_is_bonded(dev)) {
1886 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1887 prot, reg_id.mirror);
1888 if (err)
1889 return err;
1890 }
1891
Eli Cohenfa417f72010-10-24 21:08:52 -07001892 mutex_lock(&mqp->mutex);
1893 ge = find_gid_entry(mqp, gid->raw);
1894 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001895 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001896 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1897 if (ndev)
1898 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001899 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02001900 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07001901 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07001902 list_del(&ge->list);
1903 kfree(ge);
1904 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001905 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07001906
1907 mutex_unlock(&mqp->mutex);
1908
1909 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001910}
1911
1912static int init_node_data(struct mlx4_ib_dev *dev)
1913{
1914 struct ib_smp *in_mad = NULL;
1915 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001916 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07001917 int err = -ENOMEM;
1918
1919 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1920 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1921 if (!in_mad || !out_mad)
1922 goto out;
1923
1924 init_query_mad(in_mad);
1925 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001926 if (mlx4_is_master(dev->dev))
1927 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07001928
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001929 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07001930 if (err)
1931 goto out;
1932
1933 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1934
1935 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1936
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001937 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07001938 if (err)
1939 goto out;
1940
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00001941 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07001942 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1943
1944out:
1945 kfree(in_mad);
1946 kfree(out_mad);
1947 return err;
1948}
1949
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001950static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1951 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001952{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001953 struct mlx4_ib_dev *dev =
1954 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001955 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001956}
1957
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001958static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1959 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001960{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001961 struct mlx4_ib_dev *dev =
1962 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001963 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1964 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1965 (int) dev->dev->caps.fw_ver & 0xffff);
1966}
1967
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001968static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1969 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001970{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001971 struct mlx4_ib_dev *dev =
1972 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001973 return sprintf(buf, "%x\n", dev->dev->rev_id);
1974}
1975
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001976static ssize_t show_board(struct device *device, struct device_attribute *attr,
1977 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001978{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001979 struct mlx4_ib_dev *dev =
1980 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1981 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1982 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001983}
1984
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001985static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1986static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1987static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1988static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001989
Tony Jonesf4e91eb2008-02-22 00:13:36 +01001990static struct device_attribute *mlx4_class_attributes[] = {
1991 &dev_attr_hw_rev,
1992 &dev_attr_fw_ver,
1993 &dev_attr_hca_type,
1994 &dev_attr_board_id
Jack Morgensteincd9281d2007-09-18 09:14:18 +02001995};
1996
Matan Barak9433c182014-05-15 15:29:28 +03001997#define MLX4_IB_INVALID_MAC ((u64)-1)
1998static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1999 struct net_device *dev,
2000 int port)
2001{
2002 u64 new_smac = 0;
2003 u64 release_mac = MLX4_IB_INVALID_MAC;
2004 struct mlx4_ib_qp *qp;
2005
2006 read_lock(&dev_base_lock);
2007 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2008 read_unlock(&dev_base_lock);
2009
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002010 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2011
Jack Morgensteind24d9f42014-09-11 14:11:18 +03002012 /* no need for update QP1 and mac registration in non-SRIOV */
2013 if (!mlx4_is_mfunc(ibdev->dev))
2014 return;
2015
Matan Barak9433c182014-05-15 15:29:28 +03002016 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2017 qp = ibdev->qp1_proxy[port - 1];
2018 if (qp) {
2019 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002020 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002021 struct mlx4_update_qp_params update_params;
2022
Jack Morgenstein25476b02014-09-11 14:11:20 +03002023 mutex_lock(&qp->mutex);
2024 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03002025 if (new_smac == old_smac)
2026 goto unlock;
2027
2028 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2029
2030 if (new_smac_index < 0)
2031 goto unlock;
2032
2033 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03002034 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03002035 &update_params)) {
2036 release_mac = new_smac;
2037 goto unlock;
2038 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03002039 /* if old port was zero, no mac was yet registered for this QP */
2040 if (qp->pri.smac_port)
2041 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002042 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002043 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03002044 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03002045 }
2046
2047unlock:
Matan Barak9433c182014-05-15 15:29:28 +03002048 if (release_mac != MLX4_IB_INVALID_MAC)
2049 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03002050 if (qp)
2051 mutex_unlock(&qp->mutex);
2052 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03002053}
2054
Matan Barak9433c182014-05-15 15:29:28 +03002055static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2056 struct net_device *dev,
2057 unsigned long event)
2058
Moni Shouad487ee72013-12-12 18:03:13 +02002059{
2060 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03002061 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02002062 int port;
2063
Moni Shoua5070cd22015-07-30 18:33:30 +03002064 ASSERT_RTNL();
2065
Moni Shouad487ee72013-12-12 18:03:13 +02002066 iboe = &ibdev->iboe;
2067
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002068 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002069 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02002070
Moni Shouad487ee72013-12-12 18:03:13 +02002071 iboe->netdevs[port - 1] =
2072 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02002073
Matan Barak9433c182014-05-15 15:29:28 +03002074 if (dev == iboe->netdevs[port - 1] &&
2075 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2076 event == NETDEV_UP || event == NETDEV_CHANGE))
2077 update_qps_port = port;
2078
Moni Shouad487ee72013-12-12 18:03:13 +02002079 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002080 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03002081
2082 if (update_qps_port > 0)
2083 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002084}
2085
2086static int mlx4_ib_netdev_event(struct notifier_block *this,
2087 unsigned long event, void *ptr)
2088{
Jiri Pirko351638e2013-05-28 01:30:21 +00002089 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002090 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002091
2092 if (!net_eq(dev_net(dev), &init_net))
2093 return NOTIFY_DONE;
2094
2095 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002096 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002097
2098 return NOTIFY_DONE;
2099}
2100
Jack Morgenstein54679e12012-08-03 08:40:43 +00002101static void init_pkeys(struct mlx4_ib_dev *ibdev)
2102{
2103 int port;
2104 int slave;
2105 int i;
2106
2107 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002108 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2109 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002110 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2111 for (i = 0;
2112 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2113 ++i) {
2114 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2115 /* master has the identity virt2phys pkey mapping */
2116 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2117 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2118 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2119 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2120 }
2121 }
2122 }
2123 /* initialize pkey cache */
2124 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2125 for (i = 0;
2126 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2127 ++i)
2128 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2129 (i) ? 0 : 0xFFFF;
2130 }
2131 }
2132}
2133
Shlomo Pongratze605b742012-04-29 17:04:27 +03002134static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2135{
Matan Barakc66fa192015-05-31 09:30:16 +03002136 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002137
Matan Barakc66fa192015-05-31 09:30:16 +03002138 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2139 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002140 if (!ibdev->eq_table)
2141 return;
2142
Matan Barakc66fa192015-05-31 09:30:16 +03002143 for (i = 1; i <= dev->caps.num_ports; i++) {
2144 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2145 j++, total_eqs++) {
2146 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2147 continue;
2148 ibdev->eq_table[eq] = total_eqs;
2149 if (!mlx4_assign_eq(dev, i,
2150 &ibdev->eq_table[eq]))
2151 eq++;
2152 else
2153 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002154 }
2155 }
2156
Matan Barakc66fa192015-05-31 09:30:16 +03002157 for (i = eq; i < dev->caps.num_comp_vectors;
2158 ibdev->eq_table[i++] = -1)
2159 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002160
2161 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002162 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002163}
2164
2165static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2166{
2167 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002168 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002169
Matan Barakc66fa192015-05-31 09:30:16 +03002170 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002171 if (!ibdev->eq_table)
2172 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002173
2174 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002175 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002176
Matan Barakc66fa192015-05-31 09:30:16 +03002177 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002178 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002179
Shlomo Pongratze605b742012-04-29 17:04:27 +03002180 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002181 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002182}
2183
Ira Weiny77386132015-05-13 20:02:58 -04002184static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2185 struct ib_port_immutable *immutable)
2186{
2187 struct ib_port_attr attr;
Matan Barak4ed088e2016-01-14 17:50:43 +02002188 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
Ira Weiny77386132015-05-13 20:02:58 -04002189 int err;
2190
2191 err = mlx4_ib_query_port(ibdev, port_num, &attr);
2192 if (err)
2193 return err;
2194
2195 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2196 immutable->gid_tbl_len = attr.gid_tbl_len;
2197
Matan Barak4ed088e2016-01-14 17:50:43 +02002198 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
Ira Weinyf9b22e32015-05-13 20:02:59 -04002199 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Matan Barak4ed088e2016-01-14 17:50:43 +02002200 } else {
2201 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2202 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2203 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2204 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2205 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2206 }
Ira Weinyf9b22e32015-05-13 20:02:59 -04002207
Ira Weiny337877a2015-06-06 14:38:29 -04002208 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2209
Ira Weiny77386132015-05-13 20:02:58 -04002210 return 0;
2211}
2212
Roland Dreier225c7b12007-05-08 18:00:38 -07002213static void *mlx4_ib_add(struct mlx4_dev *dev)
2214{
2215 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002216 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002217 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002218 int err;
2219 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002220 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002221 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002222 int allocated;
2223 u32 counter_index;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002224 struct counter_index *new_counter_index = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -07002225
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002226 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002227
Jack Morgenstein026149c2012-08-03 08:40:55 +00002228 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002229 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002230 num_ports++;
2231
2232 /* No point in registering a device with no ports... */
2233 if (num_ports == 0)
2234 return NULL;
2235
Roland Dreier225c7b12007-05-08 18:00:38 -07002236 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2237 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002238 dev_err(&dev->persist->pdev->dev,
2239 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002240 return NULL;
2241 }
2242
Eli Cohenfa417f72010-10-24 21:08:52 -07002243 iboe = &ibdev->iboe;
2244
Roland Dreier225c7b12007-05-08 18:00:38 -07002245 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2246 goto err_dealloc;
2247
2248 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2249 goto err_pd;
2250
Roland Dreier4979d182011-01-12 09:50:36 -08002251 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2252 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002253 if (!ibdev->uar_map)
2254 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002255 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002256
Roland Dreier225c7b12007-05-08 18:00:38 -07002257 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002258 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002259
2260 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2261 ibdev->ib_dev.owner = THIS_MODULE;
2262 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002263 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002264 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002265 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2266 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002267 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002268 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
Moni Shoua5070cd22015-07-30 18:33:30 +03002269 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2270 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2271 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
Roland Dreier225c7b12007-05-08 18:00:38 -07002272
Or Gerlitz08ff3232012-10-21 14:59:24 +00002273 if (dev->caps.userspace_caps)
2274 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2275 else
2276 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2277
Roland Dreier225c7b12007-05-08 18:00:38 -07002278 ibdev->ib_dev.uverbs_cmd_mask =
2279 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2280 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2281 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2282 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2283 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2284 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002285 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002286 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2287 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2288 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002289 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002290 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2291 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2292 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002293 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002294 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2295 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2296 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2297 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2298 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002299 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002300 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002301 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2302 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002303
2304 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2305 ibdev->ib_dev.query_port = mlx4_ib_query_port;
Eli Cohenfa417f72010-10-24 21:08:52 -07002306 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
Roland Dreier225c7b12007-05-08 18:00:38 -07002307 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2308 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2309 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2310 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2311 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2312 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2313 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2314 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2315 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2316 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2317 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2318 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2319 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2320 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002321 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002322 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2323 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2324 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2325 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002326 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -07002327 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2328 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2329 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2330 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
Eli Cohen3fdcb972008-04-16 21:09:33 -07002331 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002332 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002333 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2334 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2335 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2336 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2337 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
Matan Barak93769322014-07-31 11:01:30 +03002338 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002339 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
Sagi Grimberg679e34d2015-07-30 10:32:42 +03002340 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +03002341 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
Roland Dreier225c7b12007-05-08 18:00:38 -07002342 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2343 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2344 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
Ira Weiny77386132015-05-13 20:02:58 -04002345 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
Yishai Hadasae184dd2015-08-13 18:32:06 +03002346 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
Roland Dreier225c7b12007-05-08 18:00:38 -07002347
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002348 if (!mlx4_is_slave(ibdev->dev)) {
2349 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2350 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2351 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2352 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2353 }
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002354
Shani Michaelib4253882013-02-06 16:19:16 +00002355 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2356 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2357 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
Shani Michaelib4253882013-02-06 16:19:16 +00002358 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2359
2360 ibdev->ib_dev.uverbs_cmd_mask |=
2361 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2362 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2363 }
2364
Sean Hefty012a8ff2011-06-02 09:01:33 -07002365 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2366 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2367 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2368 ibdev->ib_dev.uverbs_cmd_mask |=
2369 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2370 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2371 }
2372
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002373 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002374 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002375 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2376 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2377
Yann Droneaudf21519b2013-11-06 23:21:49 +01002378 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2379 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2380 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002381 }
2382
Matan Barak4b664c42015-06-11 16:35:27 +03002383 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2384 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
Eran Ben Elishafbfb6622015-10-15 14:44:42 +03002385 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2386 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
Matan Barak4b664c42015-06-11 16:35:27 +03002387
Shlomo Pongratze605b742012-04-29 17:04:27 +03002388 mlx4_ib_alloc_eqs(dev, ibdev);
2389
Eli Cohenfa417f72010-10-24 21:08:52 -07002390 spin_lock_init(&iboe->lock);
2391
Roland Dreier225c7b12007-05-08 18:00:38 -07002392 if (init_node_data(ibdev))
2393 goto err_map;
2394
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002395 for (i = 0; i < ibdev->num_ports; ++i) {
2396 mutex_init(&ibdev->counters_table[i].mutex);
2397 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2398 }
2399
Moni Shouaa5750092015-02-03 16:48:37 +02002400 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2401 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002402 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002403 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002404 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2405 IB_LINK_LAYER_ETHERNET) {
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002406 err = mlx4_counter_alloc(ibdev->dev, &counter_index);
2407 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002408 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002409 counter_index =
2410 mlx4_get_default_counter_index(dev,
2411 i + 1);
2412 else
2413 allocated = 1;
2414 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2415 counter_index = mlx4_get_default_counter_index(dev,
2416 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002417 }
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002418 new_counter_index = kmalloc(sizeof(*new_counter_index),
2419 GFP_KERNEL);
2420 if (!new_counter_index) {
2421 if (allocated)
2422 mlx4_counter_free(ibdev->dev, counter_index);
2423 goto err_counter;
2424 }
2425 new_counter_index->index = counter_index;
2426 new_counter_index->allocated = allocated;
2427 list_add_tail(&new_counter_index->list,
2428 &ibdev->counters_table[i].counters_list);
2429 ibdev->counters_table[i].default_counter = counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002430 pr_info("counter index %d for port %d allocated %d\n",
2431 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002432 }
Moni Shouaa5750092015-02-03 16:48:37 +02002433 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002434 for (i = 1; i < ibdev->num_ports ; ++i) {
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002435 new_counter_index =
2436 kmalloc(sizeof(struct counter_index),
2437 GFP_KERNEL);
2438 if (!new_counter_index)
2439 goto err_counter;
2440 new_counter_index->index = counter_index;
2441 new_counter_index->allocated = 0;
2442 list_add_tail(&new_counter_index->list,
2443 &ibdev->counters_table[i].counters_list);
2444 ibdev->counters_table[i].default_counter =
2445 counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002446 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002447
Matan Barak41966702014-02-02 17:06:47 +02002448 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2449 ib_num_ports++;
2450
Roland Dreier225c7b12007-05-08 18:00:38 -07002451 spin_lock_init(&ibdev->sm_lock);
2452 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002453 INIT_LIST_HEAD(&ibdev->qp_list);
2454 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002455
Matan Barak41966702014-02-02 17:06:47 +02002456 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2457 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002458 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2459 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2460 MLX4_IB_UC_STEER_QPN_ALIGN,
Eugenia Emantayevddae0342014-12-11 10:57:54 +02002461 &ibdev->steer_qpn_base, 0);
Matan Barakc1c98502013-11-07 15:25:17 +02002462 if (err)
2463 goto err_counter;
2464
2465 ibdev->ib_uc_qpns_bitmap =
2466 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2467 sizeof(long),
2468 GFP_KERNEL);
2469 if (!ibdev->ib_uc_qpns_bitmap) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002470 dev_err(&dev->persist->pdev->dev,
2471 "bit map alloc failed\n");
Matan Barakc1c98502013-11-07 15:25:17 +02002472 goto err_steer_qp_release;
2473 }
2474
2475 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2476
2477 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2478 dev, ibdev->steer_qpn_base,
2479 ibdev->steer_qpn_base +
2480 ibdev->steer_qpn_count - 1);
2481 if (err)
2482 goto err_steer_free_bitmap;
2483 }
2484
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002485 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2486 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2487
Ralph Campbell9a6edb62010-05-06 17:03:25 -07002488 if (ib_register_device(&ibdev->ib_dev, NULL))
Matan Barakc1c98502013-11-07 15:25:17 +02002489 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002490
2491 if (mlx4_ib_mad_init(ibdev))
2492 goto err_reg;
2493
Jack Morgensteinfc065732012-08-03 08:40:42 +00002494 if (mlx4_ib_init_sriov(ibdev))
2495 goto err_mad;
2496
Moni Shoua71a39bb2016-01-14 17:50:40 +02002497 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE ||
2498 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
Moni Shouad487ee72013-12-12 18:03:13 +02002499 if (!iboe->nb.notifier_call) {
2500 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2501 err = register_netdevice_notifier(&iboe->nb);
2502 if (err) {
2503 iboe->nb.notifier_call = NULL;
2504 goto err_notif;
2505 }
2506 }
Moni Shoua71a39bb2016-01-14 17:50:40 +02002507 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2508 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2509 if (err) {
2510 goto err_notif;
2511 }
2512 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002513 }
2514
Jack Morgenstein035b1032012-05-10 23:28:09 +03002515 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002516 if (device_create_file(&ibdev->ib_dev.dev,
Jack Morgenstein035b1032012-05-10 23:28:09 +03002517 mlx4_class_attributes[j]))
Eli Cohenfa417f72010-10-24 21:08:52 -07002518 goto err_notif;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002519 }
2520
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002521 ibdev->ib_active = true;
2522
Jack Morgenstein54679e12012-08-03 08:40:43 +00002523 if (mlx4_is_mfunc(ibdev->dev))
2524 init_pkeys(ibdev);
2525
Jack Morgenstein3806d082012-08-03 08:40:58 +00002526 /* create paravirt contexts for any VFs which are active */
2527 if (mlx4_is_master(ibdev->dev)) {
2528 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2529 if (j == mlx4_master_func_num(ibdev->dev))
2530 continue;
2531 if (mlx4_is_slave_active(ibdev->dev, j))
2532 do_slave_init(ibdev, j, 1);
2533 }
2534 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002535 return ibdev;
2536
Eli Cohenfa417f72010-10-24 21:08:52 -07002537err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002538 if (ibdev->iboe.nb.notifier_call) {
2539 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2540 pr_warn("failure unregistering notifier\n");
2541 ibdev->iboe.nb.notifier_call = NULL;
2542 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002543 flush_workqueue(wq);
2544
Jack Morgensteinfc065732012-08-03 08:40:42 +00002545 mlx4_ib_close_sriov(ibdev);
2546
2547err_mad:
2548 mlx4_ib_mad_cleanup(ibdev);
2549
Roland Dreier225c7b12007-05-08 18:00:38 -07002550err_reg:
2551 ib_unregister_device(&ibdev->ib_dev);
2552
Matan Barakc1c98502013-11-07 15:25:17 +02002553err_steer_free_bitmap:
2554 kfree(ibdev->ib_uc_qpns_bitmap);
2555
2556err_steer_qp_release:
2557 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2558 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2559 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002560err_counter:
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002561 for (i = 0; i < ibdev->num_ports; ++i)
2562 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2563
Roland Dreier225c7b12007-05-08 18:00:38 -07002564err_map:
2565 iounmap(ibdev->uar_map);
2566
2567err_uar:
2568 mlx4_uar_free(dev, &ibdev->priv_uar);
2569
2570err_pd:
2571 mlx4_pd_free(dev, ibdev->priv_pdn);
2572
2573err_dealloc:
2574 ib_dealloc_device(&ibdev->ib_dev);
2575
2576 return NULL;
2577}
2578
Matan Barakc1c98502013-11-07 15:25:17 +02002579int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2580{
2581 int offset;
2582
2583 WARN_ON(!dev->ib_uc_qpns_bitmap);
2584
2585 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2586 dev->steer_qpn_count,
2587 get_count_order(count));
2588 if (offset < 0)
2589 return offset;
2590
2591 *qpn = dev->steer_qpn_base + offset;
2592 return 0;
2593}
2594
2595void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2596{
2597 if (!qpn ||
2598 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2599 return;
2600
2601 BUG_ON(qpn < dev->steer_qpn_base);
2602
2603 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2604 qpn - dev->steer_qpn_base,
2605 get_count_order(count));
2606}
2607
2608int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2609 int is_attach)
2610{
2611 int err;
2612 size_t flow_size;
2613 struct ib_flow_attr *flow = NULL;
2614 struct ib_flow_spec_ib *ib_spec;
2615
2616 if (is_attach) {
2617 flow_size = sizeof(struct ib_flow_attr) +
2618 sizeof(struct ib_flow_spec_ib);
2619 flow = kzalloc(flow_size, GFP_KERNEL);
2620 if (!flow)
2621 return -ENOMEM;
2622 flow->port = mqp->port;
2623 flow->num_of_specs = 1;
2624 flow->size = flow_size;
2625 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2626 ib_spec->type = IB_FLOW_SPEC_IB;
2627 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2628 /* Add an empty rule for IB L2 */
2629 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2630
2631 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2632 IB_FLOW_DOMAIN_NIC,
2633 MLX4_FS_REGULAR,
2634 &mqp->reg_id);
2635 } else {
2636 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2637 }
2638 kfree(flow);
2639 return err;
2640}
2641
Roland Dreier225c7b12007-05-08 18:00:38 -07002642static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2643{
2644 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2645 int p;
2646
Moni Shoua4bf97152014-08-21 14:28:42 +03002647 ibdev->ib_active = false;
2648 flush_workqueue(wq);
2649
Jack Morgensteinfc065732012-08-03 08:40:42 +00002650 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07002651 mlx4_ib_mad_cleanup(ibdev);
2652 ib_unregister_device(&ibdev->ib_dev);
Eli Cohenfa417f72010-10-24 21:08:52 -07002653 if (ibdev->iboe.nb.notifier_call) {
2654 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002655 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07002656 ibdev->iboe.nb.notifier_call = NULL;
2657 }
Matan Barakc1c98502013-11-07 15:25:17 +02002658
2659 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2660 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2661 ibdev->steer_qpn_count);
2662 kfree(ibdev->ib_uc_qpns_bitmap);
2663 }
2664
Eli Cohenfa417f72010-10-24 21:08:52 -07002665 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002666 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002667 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2668
Eli Cohenfa417f72010-10-24 21:08:52 -07002669 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07002670 mlx4_CLOSE_PORT(dev, p);
2671
Shlomo Pongratze605b742012-04-29 17:04:27 +03002672 mlx4_ib_free_eqs(dev, ibdev);
2673
Roland Dreier225c7b12007-05-08 18:00:38 -07002674 mlx4_uar_free(dev, &ibdev->priv_uar);
2675 mlx4_pd_free(dev, ibdev->priv_pdn);
2676 ib_dealloc_device(&ibdev->ib_dev);
2677}
2678
Jack Morgensteinfc065732012-08-03 08:40:42 +00002679static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2680{
2681 struct mlx4_ib_demux_work **dm = NULL;
2682 struct mlx4_dev *dev = ibdev->dev;
2683 int i;
2684 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02002685 struct mlx4_active_ports actv_ports;
2686 unsigned int ports;
2687 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002688
2689 if (!mlx4_is_master(dev))
2690 return;
2691
Matan Barak449fc482014-03-19 18:11:52 +02002692 actv_ports = mlx4_get_active_ports(dev, slave);
2693 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2694 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2695
2696 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002697 if (!dm) {
2698 pr_err("failed to allocate memory for tunneling qp update\n");
Maninder Singha39a98f2015-07-08 09:43:35 +05302699 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002700 }
2701
Matan Barak449fc482014-03-19 18:11:52 +02002702 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00002703 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2704 if (!dm[i]) {
2705 pr_err("failed to allocate memory for tunneling qp update work struct\n");
Maninder Singha39a98f2015-07-08 09:43:35 +05302706 while (--i >= 0)
2707 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002708 goto out;
2709 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002710 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02002711 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002712 dm[i]->slave = slave;
2713 dm[i]->do_init = do_init;
2714 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04002715 }
2716 /* initialize or tear down tunnel QPs for the slave */
2717 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2718 if (!ibdev->sriov.is_going_down) {
2719 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00002720 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2721 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04002722 } else {
2723 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2724 for (i = 0; i < ports; i++)
2725 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002726 }
2727out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00002728 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00002729 return;
2730}
2731
Yishai Hadas35f05da2015-02-08 11:49:34 +02002732static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
2733{
2734 struct mlx4_ib_qp *mqp;
2735 unsigned long flags_qp;
2736 unsigned long flags_cq;
2737 struct mlx4_ib_cq *send_mcq, *recv_mcq;
2738 struct list_head cq_notify_list;
2739 struct mlx4_cq *mcq;
2740 unsigned long flags;
2741
2742 pr_warn("mlx4_ib_handle_catas_error was started\n");
2743 INIT_LIST_HEAD(&cq_notify_list);
2744
2745 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2746 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2747
2748 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2749 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2750 if (mqp->sq.tail != mqp->sq.head) {
2751 send_mcq = to_mcq(mqp->ibqp.send_cq);
2752 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2753 if (send_mcq->mcq.comp &&
2754 mqp->ibqp.send_cq->comp_handler) {
2755 if (!send_mcq->mcq.reset_notify_added) {
2756 send_mcq->mcq.reset_notify_added = 1;
2757 list_add_tail(&send_mcq->mcq.reset_notify,
2758 &cq_notify_list);
2759 }
2760 }
2761 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2762 }
2763 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2764 /* Now, handle the QP's receive queue */
2765 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2766 /* no handling is needed for SRQ */
2767 if (!mqp->ibqp.srq) {
2768 if (mqp->rq.tail != mqp->rq.head) {
2769 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2770 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2771 if (recv_mcq->mcq.comp &&
2772 mqp->ibqp.recv_cq->comp_handler) {
2773 if (!recv_mcq->mcq.reset_notify_added) {
2774 recv_mcq->mcq.reset_notify_added = 1;
2775 list_add_tail(&recv_mcq->mcq.reset_notify,
2776 &cq_notify_list);
2777 }
2778 }
2779 spin_unlock_irqrestore(&recv_mcq->lock,
2780 flags_cq);
2781 }
2782 }
2783 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2784 }
2785
2786 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
2787 mcq->comp(mcq);
2788 }
2789 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2790 pr_warn("mlx4_ib_handle_catas_error ended\n");
2791}
2792
Moni Shouaa5750092015-02-03 16:48:37 +02002793static void handle_bonded_port_state_event(struct work_struct *work)
2794{
2795 struct ib_event_work *ew =
2796 container_of(work, struct ib_event_work, work);
2797 struct mlx4_ib_dev *ibdev = ew->ib_dev;
2798 enum ib_port_state bonded_port_state = IB_PORT_NOP;
2799 int i;
2800 struct ib_event ibev;
2801
2802 kfree(ew);
2803 spin_lock_bh(&ibdev->iboe.lock);
2804 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2805 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02002806 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02002807
Moni Shoua217e8b12015-03-18 16:51:35 +02002808 if (!curr_netdev)
2809 continue;
2810
2811 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02002812 (netif_running(curr_netdev) &&
2813 netif_carrier_ok(curr_netdev)) ?
2814 IB_PORT_ACTIVE : IB_PORT_DOWN;
2815
2816 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2817 curr_port_state : IB_PORT_ACTIVE;
2818 }
2819 spin_unlock_bh(&ibdev->iboe.lock);
2820
2821 ibev.device = &ibdev->ib_dev;
2822 ibev.element.port_num = 1;
2823 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2824 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2825
2826 ib_dispatch_event(&ibev);
2827}
2828
Roland Dreier225c7b12007-05-08 18:00:38 -07002829static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002830 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07002831{
2832 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002833 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002834 struct mlx4_eqe *eqe = NULL;
2835 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00002836 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002837
Moni Shouaa5750092015-02-03 16:48:37 +02002838 if (mlx4_is_bonded(dev) &&
2839 ((event == MLX4_DEV_EVENT_PORT_UP) ||
2840 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2841 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2842 if (!ew)
2843 return;
2844 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2845 ew->ib_dev = ibdev;
2846 queue_work(wq, &ew->work);
2847 return;
2848 }
2849
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002850 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2851 eqe = (struct mlx4_eqe *)param;
2852 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00002853 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07002854
2855 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07002856 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00002857 if (p > ibdev->num_ports)
2858 return;
Jack Morgensteina0c64a12012-08-03 08:40:49 +00002859 if (mlx4_is_master(dev) &&
2860 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2861 IB_LINK_LAYER_INFINIBAND) {
2862 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2863 }
Roland Dreier37608ee2008-04-16 21:01:08 -07002864 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07002865 break;
2866
Roland Dreier37608ee2008-04-16 21:01:08 -07002867 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00002868 if (p > ibdev->num_ports)
2869 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07002870 ibev.event = IB_EVENT_PORT_ERR;
2871 break;
2872
2873 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002874 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07002875 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02002876 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002877 break;
2878
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002879 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2880 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2881 if (!ew) {
2882 pr_err("failed to allocate memory for events work\n");
2883 break;
2884 }
2885
2886 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2887 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2888 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002889 /* need to queue only for port owner, which uses GEN_EQE */
2890 if (mlx4_is_master(dev))
2891 queue_work(wq, &ew->work);
2892 else
2893 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03002894 return;
2895
Jack Morgensteinfc065732012-08-03 08:40:42 +00002896 case MLX4_DEV_EVENT_SLAVE_INIT:
2897 /* here, p is the slave id */
2898 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02002899 if (mlx4_is_master(dev)) {
2900 int i;
2901
2902 for (i = 1; i <= ibdev->num_ports; i++) {
2903 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2904 == IB_LINK_LAYER_INFINIBAND)
2905 mlx4_ib_slave_alias_guid_event(ibdev,
2906 p, i,
2907 1);
2908 }
2909 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002910 return;
2911
2912 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02002913 if (mlx4_is_master(dev)) {
2914 int i;
2915
2916 for (i = 1; i <= ibdev->num_ports; i++) {
2917 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2918 == IB_LINK_LAYER_INFINIBAND)
2919 mlx4_ib_slave_alias_guid_event(ibdev,
2920 p, i,
2921 0);
2922 }
2923 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00002924 /* here, p is the slave id */
2925 do_slave_init(ibdev, p, 0);
2926 return;
2927
Roland Dreier225c7b12007-05-08 18:00:38 -07002928 default:
2929 return;
2930 }
2931
2932 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02002933 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07002934
2935 ib_dispatch_event(&ibev);
2936}
2937
2938static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07002939 .add = mlx4_ib_add,
2940 .remove = mlx4_ib_remove,
2941 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02002942 .protocol = MLX4_PROT_IB_IPV6,
2943 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07002944};
2945
2946static int __init mlx4_ib_init(void)
2947{
Eli Cohenfa417f72010-10-24 21:08:52 -07002948 int err;
2949
2950 wq = create_singlethread_workqueue("mlx4_ib");
2951 if (!wq)
2952 return -ENOMEM;
2953
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002954 err = mlx4_ib_mcg_init();
2955 if (err)
2956 goto clean_wq;
2957
Eli Cohenfa417f72010-10-24 21:08:52 -07002958 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002959 if (err)
2960 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07002961
2962 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002963
2964clean_mcg:
2965 mlx4_ib_mcg_destroy();
2966
2967clean_wq:
2968 destroy_workqueue(wq);
2969 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07002970}
2971
2972static void __exit mlx4_ib_cleanup(void)
2973{
2974 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00002975 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07002976 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07002977}
2978
2979module_init(mlx4_ib_init);
2980module_exit(mlx4_ib_cleanup);