blob: 2e3789fffcab7de4dcfe5cbcffa40a034a273c30 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010042#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010043#include <linux/sched/task.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044
Moni Shouad487ee72013-12-12 18:03:13 +020045#include <net/ipv6.h>
46#include <net/addrconf.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010047#include <net/devlink.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070048
49#include <rdma/ib_smi.h>
50#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070051#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030052#include <rdma/ib_cache.h>
53
54#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070055
56#include <linux/mlx4/driver.h>
57#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030058#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070059
60#include "mlx4_ib.h"
Leon Romanovsky9ce28a22016-09-22 17:31:14 +030061#include <rdma/mlx4-abi.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070062
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030063#define DRV_NAME MLX4_IB_DRV_NAME
Tariq Toukan0a528ee2017-06-07 16:26:15 +030064#define DRV_VERSION "4.0-0"
Roland Dreier225c7b12007-05-08 18:00:38 -070065
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030066#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020067#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000068#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030069
Roland Dreier225c7b12007-05-08 18:00:38 -070070MODULE_AUTHOR("Roland Dreier");
71MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72MODULE_LICENSE("Dual BSD/GPL");
Roland Dreier225c7b12007-05-08 18:00:38 -070073
Yishai Hadas56c1d232015-02-12 09:49:43 +020074int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000075module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020076MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000077
Roland Dreier68f39482008-02-04 20:20:44 -080078static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070079 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
Tariq Toukan0a528ee2017-06-07 16:26:15 +030080 DRV_VERSION "\n";
Roland Dreier225c7b12007-05-08 18:00:38 -070081
Jack Morgenstein3806d082012-08-03 08:40:58 +000082static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
Guy Levi400b1eb2017-07-04 16:24:24 +030083static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 u8 port_num);
Jack Morgenstein3806d082012-08-03 08:40:58 +000085
Eli Cohenfa417f72010-10-24 21:08:52 -070086static struct workqueue_struct *wq;
87
Roland Dreier225c7b12007-05-08 18:00:38 -070088static void init_query_mad(struct ib_smp *mad)
89{
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
94}
95
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096static int check_flow_steering_support(struct mlx4_dev *dev)
97{
Matan Barak0a9b7d52013-11-07 15:25:15 +020098 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030099 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300100
Matan Barak0a9b7d52013-11-07 15:25:15 +0200101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300102
Matan Barak0a9b7d52013-11-07 15:25:15 +0200103 if (dmfs) {
104 int i;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 eth_num_ports++;
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 ib_num_ports++;
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 (!eth_num_ports ||
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300116 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300117 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200118 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300119}
120
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300121static int num_ib_ports(struct mlx4_dev *dev)
122{
123 int ib_ports = 0;
124 int i;
125
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 ib_ports++;
128
129 return ib_ports;
130}
131
Moni Shouae26be1b2015-07-30 18:33:29 +0300132static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133{
134 struct mlx4_ib_dev *ibdev = to_mdev(device);
135 struct net_device *dev;
136
137 rcu_read_lock();
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139
140 if (dev) {
141 if (mlx4_is_bonded(ibdev->dev)) {
142 struct net_device *upper = NULL;
143
144 upper = netdev_master_upper_dev_get_rcu(dev);
145 if (upper) {
146 struct net_device *active;
147
148 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 if (active)
150 dev = active;
151 }
152 }
153 }
154 if (dev)
155 dev_hold(dev);
156
157 rcu_read_unlock();
158 return dev;
159}
160
Moni Shoua7e57b852016-01-14 17:50:35 +0200161static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 struct mlx4_ib_dev *ibdev,
163 u8 port_num)
Moni Shouae26be1b2015-07-30 18:33:29 +0300164{
165 struct mlx4_cmd_mailbox *mailbox;
166 int err;
167 struct mlx4_dev *dev = ibdev->dev;
168 int i;
169 union ib_gid *gid_tbl;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return -ENOMEM;
174
175 gid_tbl = mailbox->buf;
176
177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179
180 err = mlx4_cmd(dev, mailbox->dma,
181 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 MLX4_CMD_WRAPPED);
184 if (mlx4_is_bonded(dev))
185 err += mlx4_cmd(dev, mailbox->dma,
186 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 MLX4_CMD_WRAPPED);
189
190 mlx4_free_cmd_mailbox(dev, mailbox);
191 return err;
192}
193
Moni Shoua7e57b852016-01-14 17:50:35 +0200194static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 struct mlx4_ib_dev *ibdev,
196 u8 port_num)
197{
198 struct mlx4_cmd_mailbox *mailbox;
199 int err;
200 struct mlx4_dev *dev = ibdev->dev;
201 int i;
202 struct {
203 union ib_gid gid;
204 __be32 rsrvd1[2];
205 __be16 rsrvd2;
206 u8 type;
207 u8 version;
208 __be32 rsrvd3;
209 } *gid_tbl;
210
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
212 if (IS_ERR(mailbox))
213 return -ENOMEM;
214
215 gid_tbl = mailbox->buf;
216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 }
225 }
226
227 err = mlx4_cmd(dev, mailbox->dma,
228 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
229 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
230 MLX4_CMD_WRAPPED);
231 if (mlx4_is_bonded(dev))
232 err += mlx4_cmd(dev, mailbox->dma,
233 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
234 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
235 MLX4_CMD_WRAPPED);
236
237 mlx4_free_cmd_mailbox(dev, mailbox);
238 return err;
239}
240
241static int mlx4_ib_update_gids(struct gid_entry *gids,
242 struct mlx4_ib_dev *ibdev,
243 u8 port_num)
244{
245 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
246 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
247
248 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
249}
250
Moni Shouae26be1b2015-07-30 18:33:29 +0300251static int mlx4_ib_add_gid(struct ib_device *device,
252 u8 port_num,
253 unsigned int index,
254 const union ib_gid *gid,
255 const struct ib_gid_attr *attr,
256 void **context)
257{
258 struct mlx4_ib_dev *ibdev = to_mdev(device);
259 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 struct mlx4_port_gid_table *port_gid_table;
261 int free = -1, found = -1;
262 int ret = 0;
263 int hw_update = 0;
264 int i;
265 struct gid_entry *gids = NULL;
266
267 if (!rdma_cap_roce_gid_table(device, port_num))
268 return -EINVAL;
269
270 if (port_num > MLX4_MAX_PORTS)
271 return -EINVAL;
272
273 if (!context)
274 return -EINVAL;
275
276 port_gid_table = &iboe->gids[port_num - 1];
277 spin_lock_bh(&iboe->lock);
278 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
Moni Shouab699a852016-01-14 17:50:33 +0200279 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
280 (port_gid_table->gids[i].gid_type == attr->gid_type)) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300281 found = i;
282 break;
283 }
284 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
285 free = i; /* HW has space */
286 }
287
288 if (found < 0) {
289 if (free < 0) {
290 ret = -ENOSPC;
291 } else {
292 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
293 if (!port_gid_table->gids[free].ctx) {
294 ret = -ENOMEM;
295 } else {
296 *context = port_gid_table->gids[free].ctx;
297 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
Moni Shouab699a852016-01-14 17:50:33 +0200298 port_gid_table->gids[free].gid_type = attr->gid_type;
Moni Shouae26be1b2015-07-30 18:33:29 +0300299 port_gid_table->gids[free].ctx->real_index = free;
300 port_gid_table->gids[free].ctx->refcount = 1;
301 hw_update = 1;
302 }
303 }
304 } else {
305 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
306 *context = ctx;
307 ctx->refcount++;
308 }
309 if (!ret && hw_update) {
310 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
311 if (!gids) {
312 ret = -ENOMEM;
313 } else {
Moni Shouab699a852016-01-14 17:50:33 +0200314 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300315 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
Moni Shouab699a852016-01-14 17:50:33 +0200316 gids[i].gid_type = port_gid_table->gids[i].gid_type;
317 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300318 }
319 }
320 spin_unlock_bh(&iboe->lock);
321
322 if (!ret && hw_update) {
323 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
324 kfree(gids);
325 }
326
327 return ret;
328}
329
330static int mlx4_ib_del_gid(struct ib_device *device,
331 u8 port_num,
332 unsigned int index,
333 void **context)
334{
335 struct gid_cache_context *ctx = *context;
336 struct mlx4_ib_dev *ibdev = to_mdev(device);
337 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
338 struct mlx4_port_gid_table *port_gid_table;
339 int ret = 0;
340 int hw_update = 0;
341 struct gid_entry *gids = NULL;
342
343 if (!rdma_cap_roce_gid_table(device, port_num))
344 return -EINVAL;
345
346 if (port_num > MLX4_MAX_PORTS)
347 return -EINVAL;
348
349 port_gid_table = &iboe->gids[port_num - 1];
350 spin_lock_bh(&iboe->lock);
351 if (ctx) {
352 ctx->refcount--;
353 if (!ctx->refcount) {
354 unsigned int real_index = ctx->real_index;
355
356 memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
357 kfree(port_gid_table->gids[real_index].ctx);
358 port_gid_table->gids[real_index].ctx = NULL;
359 hw_update = 1;
360 }
361 }
362 if (!ret && hw_update) {
363 int i;
364
365 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
366 if (!gids) {
367 ret = -ENOMEM;
368 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
371 }
372 }
373 spin_unlock_bh(&iboe->lock);
374
375 if (!ret && hw_update) {
376 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
377 kfree(gids);
378 }
379 return ret;
380}
381
382int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
383 u8 port_num, int index)
384{
385 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
386 struct gid_cache_context *ctx = NULL;
387 union ib_gid gid;
388 struct mlx4_port_gid_table *port_gid_table;
389 int real_index = -EINVAL;
390 int i;
391 int ret;
392 unsigned long flags;
Moni Shouab699a852016-01-14 17:50:33 +0200393 struct ib_gid_attr attr;
Moni Shouae26be1b2015-07-30 18:33:29 +0300394
395 if (port_num > MLX4_MAX_PORTS)
396 return -EINVAL;
397
398 if (mlx4_is_bonded(ibdev->dev))
399 port_num = 1;
400
401 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
402 return index;
403
Moni Shouab699a852016-01-14 17:50:33 +0200404 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
Moni Shouae26be1b2015-07-30 18:33:29 +0300405 if (ret)
406 return ret;
407
Moni Shouab699a852016-01-14 17:50:33 +0200408 if (attr.ndev)
409 dev_put(attr.ndev);
410
Moni Shouae26be1b2015-07-30 18:33:29 +0300411 if (!memcmp(&gid, &zgid, sizeof(gid)))
412 return -EINVAL;
413
414 spin_lock_irqsave(&iboe->lock, flags);
415 port_gid_table = &iboe->gids[port_num - 1];
416
417 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
Moni Shouab699a852016-01-14 17:50:33 +0200418 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
419 attr.gid_type == port_gid_table->gids[i].gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300420 ctx = port_gid_table->gids[i].ctx;
421 break;
422 }
423 if (ctx)
424 real_index = ctx->real_index;
425 spin_unlock_irqrestore(&iboe->lock, flags);
426 return real_index;
427}
428
Roland Dreier225c7b12007-05-08 18:00:38 -0700429static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300430 struct ib_device_attr *props,
431 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700432{
433 struct mlx4_ib_dev *dev = to_mdev(ibdev);
434 struct ib_smp *in_mad = NULL;
435 struct ib_smp *out_mad = NULL;
Pan Bian46d07032016-12-04 14:45:38 +0800436 int err;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300437 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300438 struct mlx4_uverbs_ex_query_device cmd;
439 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
440 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700441
Matan Barak4b664c42015-06-11 16:35:27 +0300442 if (uhw->inlen) {
443 if (uhw->inlen < sizeof(cmd))
444 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300445
Matan Barak4b664c42015-06-11 16:35:27 +0300446 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
447 if (err)
448 return err;
449
450 if (cmd.comp_mask)
451 return -EINVAL;
452
453 if (cmd.reserved)
454 return -EINVAL;
455 }
456
457 resp.response_length = offsetof(typeof(resp), response_length) +
458 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700459 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
460 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
Pan Bian46d07032016-12-04 14:45:38 +0800461 err = -ENOMEM;
Roland Dreier225c7b12007-05-08 18:00:38 -0700462 if (!in_mad || !out_mad)
463 goto out;
464
465 init_query_mad(in_mad);
466 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
467
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000468 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
469 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700470 if (err)
471 goto out;
472
473 memset(props, 0, sizeof *props);
474
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300475 have_ib_ports = num_ib_ports(dev->dev);
476
Roland Dreier225c7b12007-05-08 18:00:38 -0700477 props->fw_ver = dev->dev->caps.fw_ver;
478 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
479 IB_DEVICE_PORT_ACTIVE_EVENT |
480 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700481 IB_DEVICE_RC_RNR_NAK_GEN |
482 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
484 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
485 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
486 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700488 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
489 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
490 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
492 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000493 if (dev->dev->caps.max_gso_sz &&
494 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
495 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700496 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700497 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
498 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
499 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
500 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
501 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
502 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700503 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
504 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000505 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
506 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
507 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
508 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
509 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
510 else
511 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
512 }
Bart Van Asscheca920f52016-06-03 07:58:32 -0700513 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
514 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Roland Dreier225c7b12007-05-08 18:00:38 -0700515
Bodong Wang070b3992015-09-22 23:18:11 +0300516 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
517
Roland Dreier225c7b12007-05-08 18:00:38 -0700518 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
519 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200520 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700521 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
522 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
523
524 props->max_mr_size = ~0ull;
525 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300527 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700528 props->max_sge = min(dev->dev->caps.max_sq_sg,
529 dev->dev->caps.max_rq_sg);
Sagi Grimberga5e14ba2015-10-28 13:28:15 +0200530 props->max_sge_rd = MLX4_MAX_SGE_RD;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200531 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700532 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200533 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700534 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
535 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
536 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
537 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200538 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700539 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700540 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200541 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700542 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
543 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
544 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000545 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700546 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700547 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
548 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
549 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
550 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200551 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300552 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
553 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Maor Gottlieb731e0412016-11-10 11:30:58 +0200554 props->max_ah = INT_MAX;
Roland Dreier225c7b12007-05-08 18:00:38 -0700555
Guy Levi400b1eb2017-07-04 16:24:24 +0300556 if ((dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
557 (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
Guy Levi6afff1c2017-07-04 16:24:27 +0300558 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET)) {
559 props->rss_caps.max_rwq_indirection_tables = props->max_qp;
560 props->rss_caps.max_rwq_indirection_table_size =
561 dev->dev->caps.max_rss_tbl_sz;
562 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
Guy Levi400b1eb2017-07-04 16:24:24 +0300563 props->max_wq_type_rq = props->max_qp;
Guy Levi6afff1c2017-07-04 16:24:27 +0300564 }
Guy Levi400b1eb2017-07-04 16:24:24 +0300565
Yonatan Cohen0fd586d2017-11-13 10:51:17 +0200566 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
567 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
568
Matan Barak8a7ff142015-07-01 14:31:02 +0300569 if (!mlx4_is_slave(dev->dev))
570 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300571
572 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300573 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300574 if (!err && !mlx4_is_slave(dev->dev)) {
575 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
576 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
577 }
Matan Barak4b664c42015-06-11 16:35:27 +0300578 }
579
Maor Gottliebea30b962017-06-21 09:26:28 +0300580 if (uhw->outlen >= resp.response_length +
581 sizeof(resp.max_inl_recv_sz)) {
582 resp.response_length += sizeof(resp.max_inl_recv_sz);
583 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
584 sizeof(struct mlx4_wqe_data_seg);
585 }
586
Guy Levi09d208b22017-10-25 22:39:34 +0300587 if (uhw->outlen >= resp.response_length + sizeof(resp.rss_caps)) {
588 resp.response_length += sizeof(resp.rss_caps);
589 if (props->rss_caps.supported_qpts) {
590 resp.rss_caps.rx_hash_function =
591 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
Guy Levi07d84f72017-12-24 14:51:24 +0200592
Guy Levi09d208b22017-10-25 22:39:34 +0300593 resp.rss_caps.rx_hash_fields_mask =
594 MLX4_IB_RX_HASH_SRC_IPV4 |
595 MLX4_IB_RX_HASH_DST_IPV4 |
596 MLX4_IB_RX_HASH_SRC_IPV6 |
597 MLX4_IB_RX_HASH_DST_IPV6 |
598 MLX4_IB_RX_HASH_SRC_PORT_TCP |
599 MLX4_IB_RX_HASH_DST_PORT_TCP |
600 MLX4_IB_RX_HASH_SRC_PORT_UDP |
601 MLX4_IB_RX_HASH_DST_PORT_UDP;
Guy Levi07d84f72017-12-24 14:51:24 +0200602
603 if (dev->dev->caps.tunnel_offload_mode ==
604 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
605 resp.rss_caps.rx_hash_fields_mask |=
606 MLX4_IB_RX_HASH_INNER;
Guy Levi09d208b22017-10-25 22:39:34 +0300607 }
608 }
609
Matan Barak4b664c42015-06-11 16:35:27 +0300610 if (uhw->outlen) {
611 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
612 if (err)
613 goto out;
614 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700615out:
616 kfree(in_mad);
617 kfree(out_mad);
618
619 return err;
620}
621
Eli Cohenfa417f72010-10-24 21:08:52 -0700622static enum rdma_link_layer
623mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
624{
625 struct mlx4_dev *dev = to_mdev(device)->dev;
626
Jack Morgenstein65dab252011-12-13 04:10:41 +0000627 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700628 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
629}
630
631static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000632 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700633{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200634 struct ib_smp *in_mad = NULL;
635 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300636 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000637 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200638 int err = -ENOMEM;
639
640 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
641 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
642 if (!in_mad || !out_mad)
643 goto out;
644
645 init_query_mad(in_mad);
646 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
647 in_mad->attr_mod = cpu_to_be32(port);
648
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000649 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
650 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
651
652 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200653 in_mad, out_mad);
654 if (err)
655 goto out;
656
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300657
Eli Cohenfa417f72010-10-24 21:08:52 -0700658 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
659 props->lmc = out_mad->data[34] & 0x7;
660 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
661 props->sm_sl = out_mad->data[36] & 0xf;
662 props->state = out_mad->data[32] & 0xf;
663 props->phys_state = out_mad->data[33] >> 4;
664 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000665 if (netw_view)
666 props->gid_tbl_len = out_mad->data[50];
667 else
668 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700669 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
670 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
671 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
672 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
673 props->active_width = out_mad->data[31] & 0xf;
674 props->active_speed = out_mad->data[35] >> 4;
675 props->max_mtu = out_mad->data[41] & 0xf;
676 props->active_mtu = out_mad->data[36] >> 4;
677 props->subnet_timeout = out_mad->data[51] & 0x1f;
678 props->max_vl_num = out_mad->data[37] >> 4;
679 props->init_type_reply = out_mad->data[41] >> 4;
680
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300681 /* Check if extended speeds (EDR/FDR/...) are supported */
682 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
683 ext_active_speed = out_mad->data[62] >> 4;
684
685 switch (ext_active_speed) {
686 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200687 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300688 break;
689 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200690 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300691 break;
692 }
693 }
694
695 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200696 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200697 init_query_mad(in_mad);
698 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
699 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300700
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000701 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200702 NULL, NULL, in_mad, out_mad);
703 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200704 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300705
Or Gerlitz8154c072012-03-06 15:50:50 +0200706 /* Checking LinkSpeedActive for FDR-10 */
707 if (out_mad->data[15] & 0x1)
708 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300709 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300710
711 /* Avoid wrong speed value returned by FW if the IB link is down. */
712 if (props->state == IB_PORT_DOWN)
713 props->active_speed = IB_SPEED_SDR;
714
Or Gerlitza9c766b2012-01-11 19:00:29 +0200715out:
716 kfree(in_mad);
717 kfree(out_mad);
718 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700719}
720
721static u8 state_to_phys_state(enum ib_port_state state)
722{
723 return state == IB_PORT_ACTIVE ? 5 : 3;
724}
725
726static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Leon Romanovsky850b7412017-01-25 20:26:18 +0200727 struct ib_port_attr *props)
Eli Cohenfa417f72010-10-24 21:08:52 -0700728{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200729
730 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
731 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700732 struct net_device *ndev;
733 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200734 struct mlx4_cmd_mailbox *mailbox;
735 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200736 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700737
Or Gerlitza9c766b2012-01-11 19:00:29 +0200738 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
739 if (IS_ERR(mailbox))
740 return PTR_ERR(mailbox);
741
742 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
743 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
744 MLX4_CMD_WRAPPED);
745 if (err)
746 goto out;
747
Saeed Mahameed6fa26202016-11-10 11:30:59 +0200748 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
749 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
750 IB_WIDTH_4X : IB_WIDTH_1X;
751 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
752 IB_SPEED_FDR : IB_SPEED_QDR;
Moni Shouab4a26a22014-02-09 11:54:34 +0200753 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200754 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
755 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700756 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200757 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200758 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700759 props->state = IB_PORT_DOWN;
760 props->phys_state = state_to_phys_state(props->state);
761 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300762 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700763 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300764 if (ndev && is_bonded) {
765 rcu_read_lock(); /* required to get upper dev */
766 ndev = netdev_master_upper_dev_get_rcu(ndev);
767 rcu_read_unlock();
768 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700769 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200770 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700771
772 tmp = iboe_get_mtu(ndev->mtu);
773 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
774
Eli Cohen21d606092010-11-11 21:05:58 +0000775 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700776 IB_PORT_ACTIVE : IB_PORT_DOWN;
777 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200778out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300779 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200780out:
781 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
782 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700783}
784
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000785int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
786 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700787{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200788 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700789
Or Gerlitzc4550c62017-01-24 13:02:39 +0200790 /* props being zeroed by the caller, avoid zeroing it here */
Roland Dreier225c7b12007-05-08 18:00:38 -0700791
Eli Cohenfa417f72010-10-24 21:08:52 -0700792 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000793 ib_link_query_port(ibdev, port, props, netw_view) :
Leon Romanovsky850b7412017-01-25 20:26:18 +0200794 eth_link_query_port(ibdev, port, props);
Roland Dreier225c7b12007-05-08 18:00:38 -0700795
796 return err;
797}
798
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000799static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
800 struct ib_port_attr *props)
801{
802 /* returns host view */
803 return __mlx4_ib_query_port(ibdev, port, props, 0);
804}
805
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000806int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
807 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700808{
809 struct ib_smp *in_mad = NULL;
810 struct ib_smp *out_mad = NULL;
811 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000812 struct mlx4_ib_dev *dev = to_mdev(ibdev);
813 int clear = 0;
814 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700815
816 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
817 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
818 if (!in_mad || !out_mad)
819 goto out;
820
821 init_query_mad(in_mad);
822 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
823 in_mad->attr_mod = cpu_to_be32(port);
824
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000825 if (mlx4_is_mfunc(dev->dev) && netw_view)
826 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
827
828 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700829 if (err)
830 goto out;
831
832 memcpy(gid->raw, out_mad->data + 8, 8);
833
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000834 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
835 if (index) {
836 /* For any index > 0, return the null guid */
837 err = 0;
838 clear = 1;
839 goto out;
840 }
841 }
842
Roland Dreier225c7b12007-05-08 18:00:38 -0700843 init_query_mad(in_mad);
844 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
845 in_mad->attr_mod = cpu_to_be32(index / 8);
846
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000847 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000848 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700849 if (err)
850 goto out;
851
852 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
853
854out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000855 if (clear)
856 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700857 kfree(in_mad);
858 kfree(out_mad);
859 return err;
860}
861
Eli Cohenfa417f72010-10-24 21:08:52 -0700862static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
863 union ib_gid *gid)
864{
Moni Shoua5070cd22015-07-30 18:33:30 +0300865 int ret;
866
867 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000868 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Moni Shoua5070cd22015-07-30 18:33:30 +0300869
870 if (!rdma_protocol_roce(ibdev, port))
871 return -ENODEV;
872
873 if (!rdma_cap_roce_gid_table(ibdev, port))
874 return -ENODEV;
875
Matan Barak55ee3ab2015-10-15 18:38:45 +0300876 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
Moni Shoua5070cd22015-07-30 18:33:30 +0300877 if (ret == -EAGAIN) {
878 memcpy(gid, &zgid, sizeof(*gid));
879 return 0;
880 }
881
882 return ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700883}
884
Jack Morgensteinfd10ed82016-09-12 19:16:21 +0300885static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
886{
887 union sl2vl_tbl_to_u64 sl2vl64;
888 struct ib_smp *in_mad = NULL;
889 struct ib_smp *out_mad = NULL;
890 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
891 int err = -ENOMEM;
892 int jj;
893
894 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
895 *sl2vl_tbl = 0;
896 return 0;
897 }
898
899 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
900 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
901 if (!in_mad || !out_mad)
902 goto out;
903
904 init_query_mad(in_mad);
905 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
906 in_mad->attr_mod = 0;
907
908 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
909 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
910
911 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
912 in_mad, out_mad);
913 if (err)
914 goto out;
915
916 for (jj = 0; jj < 8; jj++)
917 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
918 *sl2vl_tbl = sl2vl64.sl64;
919
920out:
921 kfree(in_mad);
922 kfree(out_mad);
923 return err;
924}
925
926static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
927{
928 u64 sl2vl;
929 int i;
930 int err;
931
932 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
933 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
934 continue;
935 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
936 if (err) {
937 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
938 i, err);
939 sl2vl = 0;
940 }
941 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
942 }
943}
944
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000945int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
946 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700947{
948 struct ib_smp *in_mad = NULL;
949 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000950 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700951 int err = -ENOMEM;
952
953 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
954 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
955 if (!in_mad || !out_mad)
956 goto out;
957
958 init_query_mad(in_mad);
959 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
960 in_mad->attr_mod = cpu_to_be32(index / 32);
961
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000962 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
963 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
964
965 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
966 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700967 if (err)
968 goto out;
969
970 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
971
972out:
973 kfree(in_mad);
974 kfree(out_mad);
975 return err;
976}
977
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000978static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
979{
980 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
981}
982
Roland Dreier225c7b12007-05-08 18:00:38 -0700983static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
984 struct ib_device_modify *props)
985{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000986 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000987 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000988
Roland Dreier225c7b12007-05-08 18:00:38 -0700989 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
990 return -EOPNOTSUPP;
991
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000992 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
993 return 0;
994
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000995 if (mlx4_is_slave(to_mdev(ibdev)->dev))
996 return -EOPNOTSUPP;
997
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000998 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700999 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteindf7fba62012-08-03 08:26:45 +00001000 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001001
1002 /*
1003 * If possible, pass node desc to FW, so it can generate
1004 * a 144 trap. If cmd fails, just ignore.
1005 */
1006 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1007 if (IS_ERR(mailbox))
1008 return 0;
1009
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001010 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001011 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00001012 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001013
1014 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -07001015
1016 return 0;
1017}
1018
Jack Morgenstein61565012014-05-29 16:31:01 +03001019static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1020 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -07001021{
1022 struct mlx4_cmd_mailbox *mailbox;
1023 int err;
1024
1025 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1026 if (IS_ERR(mailbox))
1027 return PTR_ERR(mailbox);
1028
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001029 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1030 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1031 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1032 } else {
1033 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1034 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1035 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001036
Ido Shamaya130b592015-04-02 16:31:19 +03001037 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1038 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1039 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -07001040
1041 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1042 return err;
1043}
1044
1045static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1046 struct ib_port_modify *props)
1047{
Jack Morgenstein61565012014-05-29 16:31:01 +03001048 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1049 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -07001050 struct ib_port_attr attr;
1051 u32 cap_mask;
1052 int err;
1053
Jack Morgenstein61565012014-05-29 16:31:01 +03001054 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1055 * of whether port link layer is ETH or IB. For ETH ports, qkey
1056 * violations and port capabilities are not meaningful.
1057 */
1058 if (is_eth)
1059 return 0;
1060
1061 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -07001062
Or Gerlitzc4550c62017-01-24 13:02:39 +02001063 err = ib_query_port(ibdev, port, &attr);
Roland Dreier225c7b12007-05-08 18:00:38 -07001064 if (err)
1065 goto out;
1066
1067 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1068 ~props->clr_port_cap_mask;
1069
Jack Morgenstein61565012014-05-29 16:31:01 +03001070 err = mlx4_ib_SET_PORT(mdev, port,
1071 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1072 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -07001073
1074out:
1075 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1076 return err;
1077}
1078
1079static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
1080 struct ib_udata *udata)
1081{
1082 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1083 struct mlx4_ib_ucontext *context;
Or Gerlitz08ff3232012-10-21 14:59:24 +00001084 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001085 struct mlx4_ib_alloc_ucontext_resp resp;
1086 int err;
1087
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07001088 if (!dev->ib_active)
1089 return ERR_PTR(-EAGAIN);
1090
Or Gerlitz08ff3232012-10-21 14:59:24 +00001091 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1092 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1093 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1094 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1095 } else {
1096 resp.dev_caps = dev->dev->caps.userspace_caps;
1097 resp.qp_tab_size = dev->dev->caps.num_qps;
1098 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1099 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1100 resp.cqe_size = dev->dev->caps.cqe_size;
1101 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001102
Yishai Hadasae184dd2015-08-13 18:32:06 +03001103 context = kzalloc(sizeof(*context), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -07001104 if (!context)
1105 return ERR_PTR(-ENOMEM);
1106
1107 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1108 if (err) {
1109 kfree(context);
1110 return ERR_PTR(err);
1111 }
1112
1113 INIT_LIST_HEAD(&context->db_page_list);
1114 mutex_init(&context->db_page_mutex);
1115
Guy Levi400b1eb2017-07-04 16:24:24 +03001116 INIT_LIST_HEAD(&context->wqn_ranges_list);
1117 mutex_init(&context->wqn_ranges_mutex);
1118
Or Gerlitz08ff3232012-10-21 14:59:24 +00001119 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1120 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1121 else
1122 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1123
Roland Dreier225c7b12007-05-08 18:00:38 -07001124 if (err) {
1125 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1126 kfree(context);
1127 return ERR_PTR(-EFAULT);
1128 }
1129
1130 return &context->ibucontext;
1131}
1132
1133static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1134{
1135 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1136
1137 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1138 kfree(context);
1139
1140 return 0;
1141}
1142
Yishai Hadasae184dd2015-08-13 18:32:06 +03001143static void mlx4_ib_vma_open(struct vm_area_struct *area)
1144{
1145 /* vma_open is called when a new VMA is created on top of our VMA.
1146 * This is done through either mremap flow or split_vma (usually due
1147 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1148 * vma, as this VMA is strongly hardware related. Therefore we set the
1149 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1150 * calling us again and trying to do incorrect actions. We assume that
1151 * the original vma size is exactly a single page that there will be no
1152 * "splitting" operations on.
1153 */
1154 area->vm_ops = NULL;
1155}
1156
1157static void mlx4_ib_vma_close(struct vm_area_struct *area)
1158{
1159 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1160
1161 /* It's guaranteed that all VMAs opened on a FD are closed before the
1162 * file itself is closed, therefore no sync is needed with the regular
1163 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1164 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1165 * The close operation is usually called under mm->mmap_sem except when
1166 * process is exiting. The exiting case is handled explicitly as part
1167 * of mlx4_ib_disassociate_ucontext.
1168 */
1169 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1170 area->vm_private_data;
1171
1172 /* set the vma context pointer to null in the mlx4_ib driver's private
1173 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1174 */
1175 mlx4_ib_vma_priv_data->vma = NULL;
1176}
1177
1178static const struct vm_operations_struct mlx4_ib_vm_ops = {
1179 .open = mlx4_ib_vma_open,
1180 .close = mlx4_ib_vma_close
1181};
1182
1183static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1184{
1185 int i;
1186 int ret = 0;
1187 struct vm_area_struct *vma;
1188 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1189 struct task_struct *owning_process = NULL;
1190 struct mm_struct *owning_mm = NULL;
1191
1192 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1193 if (!owning_process)
1194 return;
1195
1196 owning_mm = get_task_mm(owning_process);
1197 if (!owning_mm) {
1198 pr_info("no mm, disassociate ucontext is pending task termination\n");
1199 while (1) {
1200 /* make sure that task is dead before returning, it may
1201 * prevent a rare case of module down in parallel to a
1202 * call to mlx4_ib_vma_close.
1203 */
1204 put_task_struct(owning_process);
Leon Romanovsky98e77d92017-05-23 11:29:42 +03001205 usleep_range(1000, 2000);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001206 owning_process = get_pid_task(ibcontext->tgid,
1207 PIDTYPE_PID);
1208 if (!owning_process ||
1209 owning_process->state == TASK_DEAD) {
1210 pr_info("disassociate ucontext done, task was terminated\n");
1211 /* in case task was dead need to release the task struct */
1212 if (owning_process)
1213 put_task_struct(owning_process);
1214 return;
1215 }
1216 }
1217 }
1218
1219 /* need to protect from a race on closing the vma as part of
1220 * mlx4_ib_vma_close().
1221 */
Maor Gottlieb22c36532017-03-29 06:03:00 +03001222 down_write(&owning_mm->mmap_sem);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001223 for (i = 0; i < HW_BAR_COUNT; i++) {
1224 vma = context->hw_bar_info[i].vma;
1225 if (!vma)
1226 continue;
1227
1228 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1229 context->hw_bar_info[i].vma->vm_start,
1230 PAGE_SIZE);
1231 if (ret) {
1232 pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1233 BUG_ON(1);
1234 }
1235
Maor Gottliebca37a662017-03-29 06:03:01 +03001236 context->hw_bar_info[i].vma->vm_flags &=
1237 ~(VM_SHARED | VM_MAYSHARE);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001238 /* context going to be destroyed, should not access ops any more */
1239 context->hw_bar_info[i].vma->vm_ops = NULL;
1240 }
1241
Maor Gottlieb22c36532017-03-29 06:03:00 +03001242 up_write(&owning_mm->mmap_sem);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001243 mmput(owning_mm);
1244 put_task_struct(owning_process);
1245}
1246
1247static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1248 struct mlx4_ib_vma_private_data *vma_private_data)
1249{
1250 vma_private_data->vma = vma;
1251 vma->vm_private_data = vma_private_data;
1252 vma->vm_ops = &mlx4_ib_vm_ops;
1253}
1254
Roland Dreier225c7b12007-05-08 18:00:38 -07001255static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1256{
1257 struct mlx4_ib_dev *dev = to_mdev(context->device);
Yishai Hadasae184dd2015-08-13 18:32:06 +03001258 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
Roland Dreier225c7b12007-05-08 18:00:38 -07001259
1260 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1261 return -EINVAL;
1262
1263 if (vma->vm_pgoff == 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001264 /* We prevent double mmaping on same context */
1265 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1266 return -EINVAL;
1267
Roland Dreier225c7b12007-05-08 18:00:38 -07001268 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1269
1270 if (io_remap_pfn_range(vma, vma->vm_start,
1271 to_mucontext(context)->uar.pfn,
1272 PAGE_SIZE, vma->vm_page_prot))
1273 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001274
1275 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1276
Roland Dreier225c7b12007-05-08 18:00:38 -07001277 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
Yishai Hadasae184dd2015-08-13 18:32:06 +03001278 /* We prevent double mmaping on same context */
1279 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1280 return -EINVAL;
1281
Roland Dreiere1d60ec2009-03-30 08:31:05 -07001282 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Roland Dreier225c7b12007-05-08 18:00:38 -07001283
1284 if (io_remap_pfn_range(vma, vma->vm_start,
1285 to_mucontext(context)->uar.pfn +
1286 dev->dev->caps.num_uars,
1287 PAGE_SIZE, vma->vm_page_prot))
1288 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001289
1290 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1291
Matan Barak52033cf2015-06-11 16:35:26 +03001292 } else if (vma->vm_pgoff == 3) {
1293 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001294 int ret;
1295
1296 /* We prevent double mmaping on same context */
1297 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1298 return -EINVAL;
1299
1300 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001301
1302 if (ret)
1303 return ret;
1304
1305 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1306 if (io_remap_pfn_range(vma, vma->vm_start,
1307 (pci_resource_start(dev->dev->persist->pdev,
1308 params.bar) +
1309 params.offset)
1310 >> PAGE_SHIFT,
1311 PAGE_SIZE, vma->vm_page_prot))
1312 return -EAGAIN;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001313
1314 mlx4_ib_set_vma_data(vma,
1315 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
Matan Barak52033cf2015-06-11 16:35:26 +03001316 } else {
Roland Dreier225c7b12007-05-08 18:00:38 -07001317 return -EINVAL;
Matan Barak52033cf2015-06-11 16:35:26 +03001318 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001319
1320 return 0;
1321}
1322
1323static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1324 struct ib_ucontext *context,
1325 struct ib_udata *udata)
1326{
1327 struct mlx4_ib_pd *pd;
1328 int err;
1329
Steve Wise52924432018-03-01 13:58:20 -08001330 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -07001331 if (!pd)
1332 return ERR_PTR(-ENOMEM);
1333
1334 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1335 if (err) {
1336 kfree(pd);
1337 return ERR_PTR(err);
1338 }
1339
1340 if (context)
1341 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1342 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1343 kfree(pd);
1344 return ERR_PTR(-EFAULT);
1345 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001346 return &pd->ibpd;
1347}
1348
1349static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1350{
1351 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1352 kfree(pd);
1353
1354 return 0;
1355}
1356
Sean Hefty012a8ff2011-06-02 09:01:33 -07001357static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1358 struct ib_ucontext *context,
1359 struct ib_udata *udata)
1360{
1361 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001362 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001363 int err;
1364
1365 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1366 return ERR_PTR(-ENOSYS);
1367
1368 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1369 if (!xrcd)
1370 return ERR_PTR(-ENOMEM);
1371
1372 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1373 if (err)
1374 goto err1;
1375
Christoph Hellwiged082d32016-09-05 12:56:17 +02001376 xrcd->pd = ib_alloc_pd(ibdev, 0);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001377 if (IS_ERR(xrcd->pd)) {
1378 err = PTR_ERR(xrcd->pd);
1379 goto err2;
1380 }
1381
Matan Barak8e372102015-06-11 16:35:21 +03001382 cq_attr.cqe = 1;
1383 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001384 if (IS_ERR(xrcd->cq)) {
1385 err = PTR_ERR(xrcd->cq);
1386 goto err3;
1387 }
1388
1389 return &xrcd->ibxrcd;
1390
1391err3:
1392 ib_dealloc_pd(xrcd->pd);
1393err2:
1394 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1395err1:
1396 kfree(xrcd);
1397 return ERR_PTR(err);
1398}
1399
1400static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1401{
1402 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1403 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1404 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1405 kfree(xrcd);
1406
1407 return 0;
1408}
1409
Eli Cohenfa417f72010-10-24 21:08:52 -07001410static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1411{
1412 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1413 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1414 struct mlx4_ib_gid_entry *ge;
1415
1416 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1417 if (!ge)
1418 return -ENOMEM;
1419
1420 ge->gid = *gid;
1421 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1422 ge->port = mqp->port;
1423 ge->added = 1;
1424 }
1425
1426 mutex_lock(&mqp->mutex);
1427 list_add_tail(&ge->list, &mqp->gid_list);
1428 mutex_unlock(&mqp->mutex);
1429
1430 return 0;
1431}
1432
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03001433static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1434 struct mlx4_ib_counters *ctr_table)
1435{
1436 struct counter_index *counter, *tmp_count;
1437
1438 mutex_lock(&ctr_table->mutex);
1439 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1440 list) {
1441 if (counter->allocated)
1442 mlx4_counter_free(ibdev->dev, counter->index);
1443 list_del(&counter->list);
1444 kfree(counter);
1445 }
1446 mutex_unlock(&ctr_table->mutex);
1447}
1448
Eli Cohenfa417f72010-10-24 21:08:52 -07001449int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1450 union ib_gid *gid)
1451{
Eli Cohenfa417f72010-10-24 21:08:52 -07001452 struct net_device *ndev;
1453 int ret = 0;
1454
1455 if (!mqp->port)
1456 return 0;
1457
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001458 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001459 ndev = mdev->iboe.netdevs[mqp->port - 1];
1460 if (ndev)
1461 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001462 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001463
1464 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001465 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001466 dev_put(ndev);
1467 }
1468
1469 return ret;
1470}
1471
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001472struct mlx4_ib_steering {
1473 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001474 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001475 union ib_gid gid;
1476};
1477
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001478#define LAST_ETH_FIELD vlan_tag
1479#define LAST_IB_FIELD sl
1480#define LAST_IPV4_FIELD dst_ip
1481#define LAST_TCP_UDP_FIELD src_port
1482
1483/* Field is the last supported field */
1484#define FIELDS_NOT_SUPPORTED(filter, field)\
1485 memchr_inv((void *)&filter.field +\
1486 sizeof(filter.field), 0,\
1487 sizeof(filter) -\
1488 offsetof(typeof(filter), field) -\
1489 sizeof(filter.field))
1490
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001491static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001492 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001493 union ib_flow_spec *ib_spec,
1494 struct _rule_hw *mlx4_spec)
1495{
1496 enum mlx4_net_trans_rule_id type;
1497
1498 switch (ib_spec->type) {
1499 case IB_FLOW_SPEC_ETH:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001500 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1501 return -ENOTSUPP;
1502
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001503 type = MLX4_NET_TRANS_RULE_ID_ETH;
1504 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1505 ETH_ALEN);
1506 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1507 ETH_ALEN);
1508 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1509 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1510 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001511 case IB_FLOW_SPEC_IB:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001512 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1513 return -ENOTSUPP;
1514
Matan Baraka37a1a42013-11-07 15:25:16 +02001515 type = MLX4_NET_TRANS_RULE_ID_IB;
1516 mlx4_spec->ib.l3_qpn =
1517 cpu_to_be32(qp_num);
1518 mlx4_spec->ib.qpn_mask =
1519 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1520 break;
1521
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001522
1523 case IB_FLOW_SPEC_IPV4:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001524 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1525 return -ENOTSUPP;
1526
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001527 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1528 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1529 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1530 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1531 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1532 break;
1533
1534 case IB_FLOW_SPEC_TCP:
1535 case IB_FLOW_SPEC_UDP:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001536 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1537 return -ENOTSUPP;
1538
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001539 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1540 MLX4_NET_TRANS_RULE_ID_TCP :
1541 MLX4_NET_TRANS_RULE_ID_UDP;
1542 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1543 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1544 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1545 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1546 break;
1547
1548 default:
1549 return -EINVAL;
1550 }
1551 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1552 mlx4_hw_rule_sz(dev, type) < 0)
1553 return -EINVAL;
1554 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1555 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1556 return mlx4_hw_rule_sz(dev, type);
1557}
1558
Matan Baraka37a1a42013-11-07 15:25:16 +02001559struct default_rules {
1560 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1561 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1562 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1563 __u8 link_layer;
1564};
1565static const struct default_rules default_table[] = {
1566 {
1567 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1568 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1569 .rules_create_list = {IB_FLOW_SPEC_IB},
1570 .link_layer = IB_LINK_LAYER_INFINIBAND
1571 }
1572};
1573
1574static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1575 struct ib_flow_attr *flow_attr)
1576{
1577 int i, j, k;
1578 void *ib_flow;
1579 const struct default_rules *pdefault_rules = default_table;
1580 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1581
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001582 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001583 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1584 memset(&field_types, 0, sizeof(field_types));
1585
1586 if (link_layer != pdefault_rules->link_layer)
1587 continue;
1588
1589 ib_flow = flow_attr + 1;
1590 /* we assume the specs are sorted */
1591 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1592 j < flow_attr->num_of_specs; k++) {
1593 union ib_flow_spec *current_flow =
1594 (union ib_flow_spec *)ib_flow;
1595
1596 /* same layer but different type */
1597 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1598 (pdefault_rules->mandatory_fields[k] &
1599 IB_FLOW_SPEC_LAYER_MASK)) &&
1600 (current_flow->type !=
1601 pdefault_rules->mandatory_fields[k]))
1602 goto out;
1603
1604 /* same layer, try match next one */
1605 if (current_flow->type ==
1606 pdefault_rules->mandatory_fields[k]) {
1607 j++;
1608 ib_flow +=
1609 ((union ib_flow_spec *)ib_flow)->size;
1610 }
1611 }
1612
1613 ib_flow = flow_attr + 1;
1614 for (j = 0; j < flow_attr->num_of_specs;
1615 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1616 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1617 /* same layer and same type */
1618 if (((union ib_flow_spec *)ib_flow)->type ==
1619 pdefault_rules->mandatory_not_fields[k])
1620 goto out;
1621
1622 return i;
1623 }
1624out:
1625 return -1;
1626}
1627
1628static int __mlx4_ib_create_default_rules(
1629 struct mlx4_ib_dev *mdev,
1630 struct ib_qp *qp,
1631 const struct default_rules *pdefault_rules,
1632 struct _rule_hw *mlx4_spec) {
1633 int size = 0;
1634 int i;
1635
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001636 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001637 int ret;
1638 union ib_flow_spec ib_spec;
1639 switch (pdefault_rules->rules_create_list[i]) {
1640 case 0:
1641 /* no rule */
1642 continue;
1643 case IB_FLOW_SPEC_IB:
1644 ib_spec.type = IB_FLOW_SPEC_IB;
1645 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1646
1647 break;
1648 default:
1649 /* invalid rule */
1650 return -EINVAL;
1651 }
1652 /* We must put empty rule, qpn is being ignored */
1653 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1654 mlx4_spec);
1655 if (ret < 0) {
1656 pr_info("invalid parsing\n");
1657 return -EINVAL;
1658 }
1659
1660 mlx4_spec = (void *)mlx4_spec + ret;
1661 size += ret;
1662 }
1663 return size;
1664}
1665
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001666static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1667 int domain,
1668 enum mlx4_net_trans_promisc_mode flow_type,
1669 u64 *reg_id)
1670{
1671 int ret, i;
1672 int size = 0;
1673 void *ib_flow;
1674 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1675 struct mlx4_cmd_mailbox *mailbox;
1676 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001677 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001678
1679 static const u16 __mlx4_domain[] = {
1680 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1681 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1682 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1683 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1684 };
1685
1686 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1687 pr_err("Invalid priority value %d\n", flow_attr->priority);
1688 return -EINVAL;
1689 }
1690
1691 if (domain >= IB_FLOW_DOMAIN_NUM) {
1692 pr_err("Invalid domain value %d\n", domain);
1693 return -EINVAL;
1694 }
1695
1696 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1697 return -EINVAL;
1698
1699 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1700 if (IS_ERR(mailbox))
1701 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001702 ctrl = mailbox->buf;
1703
1704 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1705 flow_attr->priority);
1706 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1707 ctrl->port = flow_attr->port;
1708 ctrl->qpn = cpu_to_be32(qp->qp_num);
1709
1710 ib_flow = flow_attr + 1;
1711 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001712 /* Add default flows */
1713 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1714 if (default_flow >= 0) {
1715 ret = __mlx4_ib_create_default_rules(
1716 mdev, qp, default_table + default_flow,
1717 mailbox->buf + size);
1718 if (ret < 0) {
1719 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1720 return -EINVAL;
1721 }
1722 size += ret;
1723 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001724 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001725 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1726 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001727 if (ret < 0) {
1728 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1729 return -EINVAL;
1730 }
1731 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1732 size += ret;
1733 }
1734
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001735 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1736 flow_attr->num_of_specs == 1) {
1737 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1738 enum ib_flow_spec_type header_spec =
1739 ((union ib_flow_spec *)(flow_attr + 1))->type;
1740
1741 if (header_spec == IB_FLOW_SPEC_ETH)
1742 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1743 }
1744
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001745 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1746 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001747 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001748 if (ret == -ENOMEM)
1749 pr_err("mcg table is full. Fail to register network rule.\n");
1750 else if (ret == -ENXIO)
1751 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1752 else if (ret)
Colin Ian King35fc7b72016-04-25 20:26:50 +01001753 pr_err("Invalid argument. Fail to register network rule.\n");
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001754
1755 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1756 return ret;
1757}
1758
1759static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1760{
1761 int err;
1762 err = mlx4_cmd(dev, reg_id, 0, 0,
1763 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001764 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001765 if (err)
1766 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1767 reg_id);
1768 return err;
1769}
1770
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001771static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1772 u64 *reg_id)
1773{
1774 void *ib_flow;
1775 union ib_flow_spec *ib_spec;
1776 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1777 int err = 0;
1778
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001779 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1780 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001781 return 0; /* do nothing */
1782
1783 ib_flow = flow_attr + 1;
1784 ib_spec = (union ib_flow_spec *)ib_flow;
1785
1786 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1787 return 0; /* do nothing */
1788
1789 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1790 flow_attr->port, qp->qp_num,
1791 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1792 reg_id);
1793 return err;
1794}
1795
Marina Varshaver0e451e82016-02-18 18:31:06 +02001796static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1797 struct ib_flow_attr *flow_attr,
1798 enum mlx4_net_trans_promisc_mode *type)
1799{
1800 int err = 0;
1801
1802 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1803 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1804 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1805 return -EOPNOTSUPP;
1806 }
1807
1808 if (flow_attr->num_of_specs == 0) {
1809 type[0] = MLX4_FS_MC_SNIFFER;
1810 type[1] = MLX4_FS_UC_SNIFFER;
1811 } else {
1812 union ib_flow_spec *ib_spec;
1813
1814 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1815 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1816 return -EINVAL;
1817
1818 /* if all is zero than MC and UC */
1819 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1820 type[0] = MLX4_FS_MC_SNIFFER;
1821 type[1] = MLX4_FS_UC_SNIFFER;
1822 } else {
1823 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1824 ib_spec->eth.mask.dst_mac[1],
1825 ib_spec->eth.mask.dst_mac[2],
1826 ib_spec->eth.mask.dst_mac[3],
1827 ib_spec->eth.mask.dst_mac[4],
1828 ib_spec->eth.mask.dst_mac[5]};
1829
1830 /* Above xor was only on MC bit, non empty mask is valid
1831 * only if this bit is set and rest are zero.
1832 */
1833 if (!is_zero_ether_addr(&mac[0]))
1834 return -EINVAL;
1835
1836 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1837 type[0] = MLX4_FS_MC_SNIFFER;
1838 else
1839 type[0] = MLX4_FS_UC_SNIFFER;
1840 }
1841 }
1842
1843 return err;
1844}
1845
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001846static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1847 struct ib_flow_attr *flow_attr,
1848 int domain)
1849{
Moni Shoua146d6e12015-02-03 16:48:38 +02001850 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001851 struct mlx4_ib_flow *mflow;
1852 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001853 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1854 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001855
Yishai Hadas5533c182016-06-22 17:27:30 +03001856 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1857 return ERR_PTR(-EINVAL);
1858
Marina Varshaver0e451e82016-02-18 18:31:06 +02001859 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1860 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
Marina Varshavera3100a72016-02-18 18:31:05 +02001861 return ERR_PTR(-EOPNOTSUPP);
1862
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001863 memset(type, 0, sizeof(type));
1864
1865 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1866 if (!mflow) {
1867 err = -ENOMEM;
1868 goto err_free;
1869 }
1870
1871 switch (flow_attr->type) {
1872 case IB_FLOW_ATTR_NORMAL:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001873 /* If dont trap flag (continue match) is set, under specific
1874 * condition traffic be replicated to given qp,
1875 * without stealing it
1876 */
1877 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1878 err = mlx4_ib_add_dont_trap_rule(dev,
1879 flow_attr,
1880 type);
1881 if (err)
1882 goto err_free;
1883 } else {
1884 type[0] = MLX4_FS_REGULAR;
1885 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001886 break;
1887
1888 case IB_FLOW_ATTR_ALL_DEFAULT:
1889 type[0] = MLX4_FS_ALL_DEFAULT;
1890 break;
1891
1892 case IB_FLOW_ATTR_MC_DEFAULT:
1893 type[0] = MLX4_FS_MC_DEFAULT;
1894 break;
1895
1896 case IB_FLOW_ATTR_SNIFFER:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001897 type[0] = MLX4_FS_MIRROR_RX_PORT;
1898 type[1] = MLX4_FS_MIRROR_SX_PORT;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001899 break;
1900
1901 default:
1902 err = -EINVAL;
1903 goto err_free;
1904 }
1905
1906 while (i < ARRAY_SIZE(type) && type[i]) {
1907 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001908 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001909 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001910 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001911 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001912 /* Application always sees one port so the mirror rule
1913 * must be on port #2
1914 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001915 flow_attr->port = 2;
1916 err = __mlx4_ib_create_flow(qp, flow_attr,
1917 domain, type[j],
1918 &mflow->reg_id[j].mirror);
1919 flow_attr->port = 1;
1920 if (err)
1921 goto err_create_flow;
1922 j++;
1923 }
1924
Roland Dreier11562562015-05-29 23:11:27 -07001925 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001926 }
1927
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001928 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001929 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1930 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001931 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001932 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001933
Moni Shoua146d6e12015-02-03 16:48:38 +02001934 if (is_bonded) {
1935 flow_attr->port = 2;
1936 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1937 &mflow->reg_id[j].mirror);
1938 flow_attr->port = 1;
1939 if (err)
1940 goto err_create_flow;
1941 j++;
1942 }
1943 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001944 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001945 }
1946
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001947 return &mflow->ibflow;
1948
Or Gerlitz571e1b22014-10-30 15:59:28 +02001949err_create_flow:
1950 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001951 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1952 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001953 i--;
1954 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001955
1956 while (j) {
1957 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1958 mflow->reg_id[j].mirror);
1959 j--;
1960 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001961err_free:
1962 kfree(mflow);
1963 return ERR_PTR(err);
1964}
1965
1966static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1967{
1968 int err, ret = 0;
1969 int i = 0;
1970 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1971 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1972
Moni Shoua146d6e12015-02-03 16:48:38 +02001973 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1974 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001975 if (err)
1976 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001977 if (mflow->reg_id[i].mirror) {
1978 err = __mlx4_ib_destroy_flow(mdev->dev,
1979 mflow->reg_id[i].mirror);
1980 if (err)
1981 ret = err;
1982 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001983 i++;
1984 }
1985
1986 kfree(mflow);
1987 return ret;
1988}
1989
Roland Dreier225c7b12007-05-08 18:00:38 -07001990static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1991{
Eli Cohenfa417f72010-10-24 21:08:52 -07001992 int err;
1993 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001994 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001995 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001996 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001997 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001998 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001999
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002000 if (mdev->dev->caps.steering_mode ==
2001 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2002 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
2003 if (!ib_steering)
2004 return -ENOMEM;
2005 }
2006
2007 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
2008 !!(mqp->flags &
2009 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02002010 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002011 if (err) {
2012 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002013 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002014 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002015
Moni Shoua146d6e12015-02-03 16:48:38 +02002016 reg_id.mirror = 0;
2017 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02002018 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
2019 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02002020 !!(mqp->flags &
2021 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
2022 prot, &reg_id.mirror);
2023 if (err)
2024 goto err_add;
2025 }
2026
Eli Cohenfa417f72010-10-24 21:08:52 -07002027 err = add_gid_entry(ibqp, gid);
2028 if (err)
2029 goto err_add;
2030
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002031 if (ib_steering) {
2032 memcpy(ib_steering->gid.raw, gid->raw, 16);
2033 ib_steering->reg_id = reg_id;
2034 mutex_lock(&mqp->mutex);
2035 list_add(&ib_steering->list, &mqp->steering_rules);
2036 mutex_unlock(&mqp->mutex);
2037 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002038 return 0;
2039
2040err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002041 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02002042 prot, reg_id.id);
2043 if (reg_id.mirror)
2044 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2045 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002046err_malloc:
2047 kfree(ib_steering);
2048
Eli Cohenfa417f72010-10-24 21:08:52 -07002049 return err;
2050}
2051
2052static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
2053{
2054 struct mlx4_ib_gid_entry *ge;
2055 struct mlx4_ib_gid_entry *tmp;
2056 struct mlx4_ib_gid_entry *ret = NULL;
2057
2058 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
2059 if (!memcmp(raw, ge->gid.raw, 16)) {
2060 ret = ge;
2061 break;
2062 }
2063 }
2064
2065 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07002066}
2067
2068static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2069{
Eli Cohenfa417f72010-10-24 21:08:52 -07002070 int err;
2071 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02002072 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002073 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07002074 struct net_device *ndev;
2075 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02002076 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02002077 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07002078
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002079 if (mdev->dev->caps.steering_mode ==
2080 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2081 struct mlx4_ib_steering *ib_steering;
2082
2083 mutex_lock(&mqp->mutex);
2084 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
2085 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
2086 list_del(&ib_steering->list);
2087 break;
2088 }
2089 }
2090 mutex_unlock(&mqp->mutex);
2091 if (&ib_steering->list == &mqp->steering_rules) {
2092 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
2093 return -EINVAL;
2094 }
2095 reg_id = ib_steering->reg_id;
2096 kfree(ib_steering);
2097 }
2098
2099 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02002100 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07002101 if (err)
2102 return err;
2103
Moni Shoua146d6e12015-02-03 16:48:38 +02002104 if (mlx4_is_bonded(dev)) {
2105 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2106 prot, reg_id.mirror);
2107 if (err)
2108 return err;
2109 }
2110
Eli Cohenfa417f72010-10-24 21:08:52 -07002111 mutex_lock(&mqp->mutex);
2112 ge = find_gid_entry(mqp, gid->raw);
2113 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002114 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07002115 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
2116 if (ndev)
2117 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002118 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002119 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07002120 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07002121 list_del(&ge->list);
2122 kfree(ge);
2123 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002124 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07002125
2126 mutex_unlock(&mqp->mutex);
2127
2128 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002129}
2130
2131static int init_node_data(struct mlx4_ib_dev *dev)
2132{
2133 struct ib_smp *in_mad = NULL;
2134 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002135 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07002136 int err = -ENOMEM;
2137
2138 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
2139 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2140 if (!in_mad || !out_mad)
2141 goto out;
2142
2143 init_query_mad(in_mad);
2144 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002145 if (mlx4_is_master(dev->dev))
2146 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07002147
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002148 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002149 if (err)
2150 goto out;
2151
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002152 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
Roland Dreier225c7b12007-05-08 18:00:38 -07002153
2154 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2155
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002156 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002157 if (err)
2158 goto out;
2159
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002160 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07002161 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2162
2163out:
2164 kfree(in_mad);
2165 kfree(out_mad);
2166 return err;
2167}
2168
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002169static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2170 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002171{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002172 struct mlx4_ib_dev *dev =
2173 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002174 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002175}
2176
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002177static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2178 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002179{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002180 struct mlx4_ib_dev *dev =
2181 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002182 return sprintf(buf, "%x\n", dev->dev->rev_id);
2183}
2184
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002185static ssize_t show_board(struct device *device, struct device_attribute *attr,
2186 char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002187{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002188 struct mlx4_ib_dev *dev =
2189 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2190 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2191 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002192}
2193
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002194static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002195static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2196static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002197
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002198static struct device_attribute *mlx4_class_attributes[] = {
2199 &dev_attr_hw_rev,
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002200 &dev_attr_hca_type,
2201 &dev_attr_board_id
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002202};
2203
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002204struct diag_counter {
2205 const char *name;
2206 u32 offset;
2207};
2208
2209#define DIAG_COUNTER(_name, _offset) \
2210 { .name = #_name, .offset = _offset }
2211
2212static const struct diag_counter diag_basic[] = {
2213 DIAG_COUNTER(rq_num_lle, 0x00),
2214 DIAG_COUNTER(sq_num_lle, 0x04),
2215 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2216 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2217 DIAG_COUNTER(rq_num_lpe, 0x18),
2218 DIAG_COUNTER(sq_num_lpe, 0x1C),
2219 DIAG_COUNTER(rq_num_wrfe, 0x20),
2220 DIAG_COUNTER(sq_num_wrfe, 0x24),
2221 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2222 DIAG_COUNTER(sq_num_bre, 0x34),
2223 DIAG_COUNTER(sq_num_rire, 0x44),
2224 DIAG_COUNTER(rq_num_rire, 0x48),
2225 DIAG_COUNTER(sq_num_rae, 0x4C),
2226 DIAG_COUNTER(rq_num_rae, 0x50),
2227 DIAG_COUNTER(sq_num_roe, 0x54),
2228 DIAG_COUNTER(sq_num_tree, 0x5C),
2229 DIAG_COUNTER(sq_num_rree, 0x64),
2230 DIAG_COUNTER(rq_num_rnr, 0x68),
2231 DIAG_COUNTER(sq_num_rnr, 0x6C),
2232 DIAG_COUNTER(rq_num_oos, 0x100),
2233 DIAG_COUNTER(sq_num_oos, 0x104),
2234};
2235
2236static const struct diag_counter diag_ext[] = {
2237 DIAG_COUNTER(rq_num_dup, 0x130),
2238 DIAG_COUNTER(sq_num_to, 0x134),
2239};
2240
2241static const struct diag_counter diag_device_only[] = {
2242 DIAG_COUNTER(num_cqovf, 0x1A0),
2243 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2244};
2245
2246static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2247 u8 port_num)
2248{
2249 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2250 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2251
2252 if (!diag[!!port_num].name)
2253 return NULL;
2254
2255 return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2256 diag[!!port_num].num_counters,
2257 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2258}
2259
2260static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2261 struct rdma_hw_stats *stats,
2262 u8 port, int index)
2263{
2264 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2265 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2266 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2267 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2268 int ret;
2269 int i;
2270
2271 ret = mlx4_query_diag_counters(dev->dev,
2272 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2273 diag[!!port].offset, hw_value,
2274 diag[!!port].num_counters, port);
2275
2276 if (ret)
2277 return ret;
2278
2279 for (i = 0; i < diag[!!port].num_counters; i++)
2280 stats->value[i] = hw_value[i];
2281
2282 return diag[!!port].num_counters;
2283}
2284
2285static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2286 const char ***name,
2287 u32 **offset,
2288 u32 *num,
2289 bool port)
2290{
2291 u32 num_counters;
2292
2293 num_counters = ARRAY_SIZE(diag_basic);
2294
2295 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2296 num_counters += ARRAY_SIZE(diag_ext);
2297
2298 if (!port)
2299 num_counters += ARRAY_SIZE(diag_device_only);
2300
2301 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2302 if (!*name)
2303 return -ENOMEM;
2304
2305 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2306 if (!*offset)
2307 goto err_name;
2308
2309 *num = num_counters;
2310
2311 return 0;
2312
2313err_name:
2314 kfree(*name);
2315 return -ENOMEM;
2316}
2317
2318static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2319 const char **name,
2320 u32 *offset,
2321 bool port)
2322{
2323 int i;
2324 int j;
2325
2326 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2327 name[i] = diag_basic[i].name;
2328 offset[i] = diag_basic[i].offset;
2329 }
2330
2331 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2332 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2333 name[j] = diag_ext[i].name;
2334 offset[j] = diag_ext[i].offset;
2335 }
2336 }
2337
2338 if (!port) {
2339 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2340 name[j] = diag_device_only[i].name;
2341 offset[j] = diag_device_only[i].offset;
2342 }
2343 }
2344}
2345
2346static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2347{
2348 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2349 int i;
2350 int ret;
2351 bool per_port = !!(ibdev->dev->caps.flags2 &
2352 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2353
Kamal Heib69d269d382016-09-12 19:16:22 +03002354 if (mlx4_is_slave(ibdev->dev))
2355 return 0;
2356
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002357 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2358 /* i == 1 means we are building port counters */
2359 if (i && !per_port)
2360 continue;
2361
2362 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2363 &diag[i].offset,
2364 &diag[i].num_counters, i);
2365 if (ret)
2366 goto err_alloc;
2367
2368 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2369 diag[i].offset, i);
2370 }
2371
2372 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
2373 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
2374
2375 return 0;
2376
2377err_alloc:
2378 if (i) {
2379 kfree(diag[i - 1].name);
2380 kfree(diag[i - 1].offset);
2381 }
2382
2383 return ret;
2384}
2385
2386static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2387{
2388 int i;
2389
2390 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2391 kfree(ibdev->diag_counters[i].offset);
2392 kfree(ibdev->diag_counters[i].name);
2393 }
2394}
2395
Matan Barak9433c182014-05-15 15:29:28 +03002396#define MLX4_IB_INVALID_MAC ((u64)-1)
2397static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2398 struct net_device *dev,
2399 int port)
2400{
2401 u64 new_smac = 0;
2402 u64 release_mac = MLX4_IB_INVALID_MAC;
2403 struct mlx4_ib_qp *qp;
2404
2405 read_lock(&dev_base_lock);
2406 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2407 read_unlock(&dev_base_lock);
2408
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002409 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2410
Jack Morgensteind24d9f42014-09-11 14:11:18 +03002411 /* no need for update QP1 and mac registration in non-SRIOV */
2412 if (!mlx4_is_mfunc(ibdev->dev))
2413 return;
2414
Matan Barak9433c182014-05-15 15:29:28 +03002415 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2416 qp = ibdev->qp1_proxy[port - 1];
2417 if (qp) {
2418 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002419 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002420 struct mlx4_update_qp_params update_params;
2421
Jack Morgenstein25476b02014-09-11 14:11:20 +03002422 mutex_lock(&qp->mutex);
2423 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03002424 if (new_smac == old_smac)
2425 goto unlock;
2426
2427 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2428
2429 if (new_smac_index < 0)
2430 goto unlock;
2431
2432 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03002433 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03002434 &update_params)) {
2435 release_mac = new_smac;
2436 goto unlock;
2437 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03002438 /* if old port was zero, no mac was yet registered for this QP */
2439 if (qp->pri.smac_port)
2440 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002441 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002442 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03002443 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03002444 }
2445
2446unlock:
Matan Barak9433c182014-05-15 15:29:28 +03002447 if (release_mac != MLX4_IB_INVALID_MAC)
2448 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03002449 if (qp)
2450 mutex_unlock(&qp->mutex);
2451 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03002452}
2453
Matan Barak9433c182014-05-15 15:29:28 +03002454static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2455 struct net_device *dev,
2456 unsigned long event)
2457
Moni Shouad487ee72013-12-12 18:03:13 +02002458{
2459 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03002460 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02002461 int port;
2462
Moni Shoua5070cd22015-07-30 18:33:30 +03002463 ASSERT_RTNL();
2464
Moni Shouad487ee72013-12-12 18:03:13 +02002465 iboe = &ibdev->iboe;
2466
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002467 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002468 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02002469
Moni Shouad487ee72013-12-12 18:03:13 +02002470 iboe->netdevs[port - 1] =
2471 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02002472
Matan Barak9433c182014-05-15 15:29:28 +03002473 if (dev == iboe->netdevs[port - 1] &&
2474 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2475 event == NETDEV_UP || event == NETDEV_CHANGE))
2476 update_qps_port = port;
2477
Moni Shouad487ee72013-12-12 18:03:13 +02002478 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002479 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03002480
2481 if (update_qps_port > 0)
2482 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002483}
2484
2485static int mlx4_ib_netdev_event(struct notifier_block *this,
2486 unsigned long event, void *ptr)
2487{
Jiri Pirko351638e2013-05-28 01:30:21 +00002488 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002489 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002490
2491 if (!net_eq(dev_net(dev), &init_net))
2492 return NOTIFY_DONE;
2493
2494 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002495 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002496
2497 return NOTIFY_DONE;
2498}
2499
Jack Morgenstein54679e12012-08-03 08:40:43 +00002500static void init_pkeys(struct mlx4_ib_dev *ibdev)
2501{
2502 int port;
2503 int slave;
2504 int i;
2505
2506 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002507 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2508 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002509 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2510 for (i = 0;
2511 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2512 ++i) {
2513 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2514 /* master has the identity virt2phys pkey mapping */
2515 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2516 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2517 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2518 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2519 }
2520 }
2521 }
2522 /* initialize pkey cache */
2523 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2524 for (i = 0;
2525 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2526 ++i)
2527 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2528 (i) ? 0 : 0xFFFF;
2529 }
2530 }
2531}
2532
Shlomo Pongratze605b742012-04-29 17:04:27 +03002533static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2534{
Matan Barakc66fa192015-05-31 09:30:16 +03002535 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002536
Matan Barakc66fa192015-05-31 09:30:16 +03002537 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2538 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002539 if (!ibdev->eq_table)
2540 return;
2541
Matan Barakc66fa192015-05-31 09:30:16 +03002542 for (i = 1; i <= dev->caps.num_ports; i++) {
2543 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2544 j++, total_eqs++) {
2545 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2546 continue;
2547 ibdev->eq_table[eq] = total_eqs;
2548 if (!mlx4_assign_eq(dev, i,
2549 &ibdev->eq_table[eq]))
2550 eq++;
2551 else
2552 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002553 }
2554 }
2555
Matan Barakc66fa192015-05-31 09:30:16 +03002556 for (i = eq; i < dev->caps.num_comp_vectors;
2557 ibdev->eq_table[i++] = -1)
2558 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002559
2560 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002561 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002562}
2563
2564static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2565{
2566 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002567 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002568
Matan Barakc66fa192015-05-31 09:30:16 +03002569 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002570 if (!ibdev->eq_table)
2571 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002572
2573 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002574 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002575
Matan Barakc66fa192015-05-31 09:30:16 +03002576 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002577 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002578
Shlomo Pongratze605b742012-04-29 17:04:27 +03002579 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002580 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002581}
2582
Ira Weiny77386132015-05-13 20:02:58 -04002583static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2584 struct ib_port_immutable *immutable)
2585{
2586 struct ib_port_attr attr;
Matan Barak4ed088e2016-01-14 17:50:43 +02002587 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
Ira Weiny77386132015-05-13 20:02:58 -04002588 int err;
2589
Matan Barak4ed088e2016-01-14 17:50:43 +02002590 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
Ira Weinyf9b22e32015-05-13 20:02:59 -04002591 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002592 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002593 } else {
2594 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2595 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2596 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2597 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2598 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002599 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2600 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2601 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2602 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002603 }
Ira Weinyf9b22e32015-05-13 20:02:59 -04002604
Or Gerlitzc4550c62017-01-24 13:02:39 +02002605 err = ib_query_port(ibdev, port_num, &attr);
2606 if (err)
2607 return err;
2608
2609 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2610 immutable->gid_tbl_len = attr.gid_tbl_len;
2611
Ira Weiny77386132015-05-13 20:02:58 -04002612 return 0;
2613}
2614
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002615static void get_fw_ver_str(struct ib_device *device, char *str)
Ira Weinye9db59f2016-06-15 02:22:00 -04002616{
2617 struct mlx4_ib_dev *dev =
2618 container_of(device, struct mlx4_ib_dev, ib_dev);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002619 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
Ira Weinye9db59f2016-06-15 02:22:00 -04002620 (int) (dev->dev->caps.fw_ver >> 32),
2621 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2622 (int) dev->dev->caps.fw_ver & 0xffff);
2623}
2624
Roland Dreier225c7b12007-05-08 18:00:38 -07002625static void *mlx4_ib_add(struct mlx4_dev *dev)
2626{
2627 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002628 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002629 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002630 int err;
2631 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002632 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002633 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002634 int allocated;
2635 u32 counter_index;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002636 struct counter_index *new_counter_index = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -07002637
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002638 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002639
Jack Morgenstein026149c2012-08-03 08:40:55 +00002640 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002641 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002642 num_ports++;
2643
2644 /* No point in registering a device with no ports... */
2645 if (num_ports == 0)
2646 return NULL;
2647
Roland Dreier225c7b12007-05-08 18:00:38 -07002648 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2649 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002650 dev_err(&dev->persist->pdev->dev,
2651 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002652 return NULL;
2653 }
2654
Eli Cohenfa417f72010-10-24 21:08:52 -07002655 iboe = &ibdev->iboe;
2656
Roland Dreier225c7b12007-05-08 18:00:38 -07002657 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2658 goto err_dealloc;
2659
2660 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2661 goto err_pd;
2662
Roland Dreier4979d182011-01-12 09:50:36 -08002663 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2664 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002665 if (!ibdev->uar_map)
2666 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002667 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002668
Roland Dreier225c7b12007-05-08 18:00:38 -07002669 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002670 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002671
2672 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2673 ibdev->ib_dev.owner = THIS_MODULE;
2674 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002675 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002676 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002677 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2678 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002679 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Bart Van Assched66c88a82017-01-20 13:04:20 -08002680 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
Moni Shoua5070cd22015-07-30 18:33:30 +03002681 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2682 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2683 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
Roland Dreier225c7b12007-05-08 18:00:38 -07002684
Or Gerlitz08ff3232012-10-21 14:59:24 +00002685 if (dev->caps.userspace_caps)
2686 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2687 else
2688 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2689
Roland Dreier225c7b12007-05-08 18:00:38 -07002690 ibdev->ib_dev.uverbs_cmd_mask =
2691 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2692 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2693 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2694 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2695 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2696 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002697 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002698 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2699 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2700 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002701 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002702 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2703 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2704 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002705 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002706 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2707 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2708 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2709 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2710 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002711 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002712 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002713 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2714 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002715
2716 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2717 ibdev->ib_dev.query_port = mlx4_ib_query_port;
Eli Cohenfa417f72010-10-24 21:08:52 -07002718 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
Roland Dreier225c7b12007-05-08 18:00:38 -07002719 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2720 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2721 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2722 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2723 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2724 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2725 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2726 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2727 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2728 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2729 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2730 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2731 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2732 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002733 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002734 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2735 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2736 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2737 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002738 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -07002739 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2740 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2741 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2742 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
Eli Cohen3fdcb972008-04-16 21:09:33 -07002743 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002744 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -07002745 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2746 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2747 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2748 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2749 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
Matan Barak93769322014-07-31 11:01:30 +03002750 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
Roland Dreier225c7b12007-05-08 18:00:38 -07002751 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
Sagi Grimberg679e34d2015-07-30 10:32:42 +03002752 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +03002753 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
Roland Dreier225c7b12007-05-08 18:00:38 -07002754 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2755 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2756 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
Ira Weiny77386132015-05-13 20:02:58 -04002757 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
Ira Weinye9db59f2016-06-15 02:22:00 -04002758 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
Yishai Hadasae184dd2015-08-13 18:32:06 +03002759 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
Roland Dreier225c7b12007-05-08 18:00:38 -07002760
Yonatan Cohen34d9a272017-11-13 10:51:14 +02002761 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2762 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
2763
Guy Levi400b1eb2017-07-04 16:24:24 +03002764 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2765 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2766 IB_LINK_LAYER_ETHERNET) ||
2767 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2768 IB_LINK_LAYER_ETHERNET))) {
2769 ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
2770 ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
2771 ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
Guy Levib8d46ca2017-07-04 16:24:25 +03002772 ibdev->ib_dev.create_rwq_ind_table =
2773 mlx4_ib_create_rwq_ind_table;
2774 ibdev->ib_dev.destroy_rwq_ind_table =
2775 mlx4_ib_destroy_rwq_ind_table;
Guy Levi400b1eb2017-07-04 16:24:24 +03002776 ibdev->ib_dev.uverbs_ex_cmd_mask |=
Guy Levib8d46ca2017-07-04 16:24:25 +03002777 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
2778 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2779 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2780 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2781 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Guy Levi400b1eb2017-07-04 16:24:24 +03002782 }
2783
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002784 if (!mlx4_is_slave(ibdev->dev)) {
2785 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2786 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2787 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2788 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2789 }
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002790
Shani Michaelib4253882013-02-06 16:19:16 +00002791 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2792 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2793 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
Shani Michaelib4253882013-02-06 16:19:16 +00002794 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2795
2796 ibdev->ib_dev.uverbs_cmd_mask |=
2797 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2798 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2799 }
2800
Sean Hefty012a8ff2011-06-02 09:01:33 -07002801 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2802 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2803 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2804 ibdev->ib_dev.uverbs_cmd_mask |=
2805 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2806 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2807 }
2808
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002809 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002810 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002811 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2812 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2813
Yann Droneaudf21519b2013-11-06 23:21:49 +01002814 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2815 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2816 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002817 }
2818
Matan Barak4b664c42015-06-11 16:35:27 +03002819 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2820 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
Eran Ben Elishafbfb6622015-10-15 14:44:42 +03002821 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2822 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
Matan Barak4b664c42015-06-11 16:35:27 +03002823
Shlomo Pongratze605b742012-04-29 17:04:27 +03002824 mlx4_ib_alloc_eqs(dev, ibdev);
2825
Eli Cohenfa417f72010-10-24 21:08:52 -07002826 spin_lock_init(&iboe->lock);
2827
Roland Dreier225c7b12007-05-08 18:00:38 -07002828 if (init_node_data(ibdev))
2829 goto err_map;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03002830 mlx4_init_sl2vl_tbl(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002831
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002832 for (i = 0; i < ibdev->num_ports; ++i) {
2833 mutex_init(&ibdev->counters_table[i].mutex);
2834 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2835 }
2836
Moni Shouaa5750092015-02-03 16:48:37 +02002837 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2838 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002839 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002840 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002841 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2842 IB_LINK_LAYER_ETHERNET) {
Moshe Shemeshf3301872017-06-21 09:29:36 +03002843 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2844 MLX4_RES_USAGE_DRIVER);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002845 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002846 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002847 counter_index =
2848 mlx4_get_default_counter_index(dev,
2849 i + 1);
2850 else
2851 allocated = 1;
2852 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2853 counter_index = mlx4_get_default_counter_index(dev,
2854 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002855 }
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002856 new_counter_index = kmalloc(sizeof(*new_counter_index),
2857 GFP_KERNEL);
2858 if (!new_counter_index) {
2859 if (allocated)
2860 mlx4_counter_free(ibdev->dev, counter_index);
2861 goto err_counter;
2862 }
2863 new_counter_index->index = counter_index;
2864 new_counter_index->allocated = allocated;
2865 list_add_tail(&new_counter_index->list,
2866 &ibdev->counters_table[i].counters_list);
2867 ibdev->counters_table[i].default_counter = counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002868 pr_info("counter index %d for port %d allocated %d\n",
2869 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002870 }
Moni Shouaa5750092015-02-03 16:48:37 +02002871 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002872 for (i = 1; i < ibdev->num_ports ; ++i) {
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002873 new_counter_index =
2874 kmalloc(sizeof(struct counter_index),
2875 GFP_KERNEL);
2876 if (!new_counter_index)
2877 goto err_counter;
2878 new_counter_index->index = counter_index;
2879 new_counter_index->allocated = 0;
2880 list_add_tail(&new_counter_index->list,
2881 &ibdev->counters_table[i].counters_list);
2882 ibdev->counters_table[i].default_counter =
2883 counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002884 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002885
Matan Barak41966702014-02-02 17:06:47 +02002886 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2887 ib_num_ports++;
2888
Roland Dreier225c7b12007-05-08 18:00:38 -07002889 spin_lock_init(&ibdev->sm_lock);
2890 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002891 INIT_LIST_HEAD(&ibdev->qp_list);
2892 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002893
Matan Barak41966702014-02-02 17:06:47 +02002894 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2895 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002896 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2897 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2898 MLX4_IB_UC_STEER_QPN_ALIGN,
Moshe Shemeshf3301872017-06-21 09:29:36 +03002899 &ibdev->steer_qpn_base, 0,
2900 MLX4_RES_USAGE_DRIVER);
Matan Barakc1c98502013-11-07 15:25:17 +02002901 if (err)
2902 goto err_counter;
2903
2904 ibdev->ib_uc_qpns_bitmap =
2905 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2906 sizeof(long),
2907 GFP_KERNEL);
Leon Romanovsky15d46262016-11-03 16:44:12 +02002908 if (!ibdev->ib_uc_qpns_bitmap)
Matan Barakc1c98502013-11-07 15:25:17 +02002909 goto err_steer_qp_release;
Matan Barakc1c98502013-11-07 15:25:17 +02002910
Eran Ben Elisha1f22e452016-11-10 11:31:00 +02002911 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2912 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2913 ibdev->steer_qpn_count);
2914 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2915 dev, ibdev->steer_qpn_base,
2916 ibdev->steer_qpn_base +
2917 ibdev->steer_qpn_count - 1);
2918 if (err)
2919 goto err_steer_free_bitmap;
2920 } else {
2921 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2922 ibdev->steer_qpn_count);
2923 }
Matan Barakc1c98502013-11-07 15:25:17 +02002924 }
2925
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002926 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2927 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2928
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002929 if (mlx4_ib_alloc_diag_counters(ibdev))
Matan Barakc1c98502013-11-07 15:25:17 +02002930 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002931
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002932 if (ib_register_device(&ibdev->ib_dev, NULL))
2933 goto err_diag_counters;
2934
Roland Dreier225c7b12007-05-08 18:00:38 -07002935 if (mlx4_ib_mad_init(ibdev))
2936 goto err_reg;
2937
Jack Morgensteinfc065732012-08-03 08:40:42 +00002938 if (mlx4_ib_init_sriov(ibdev))
2939 goto err_mad;
2940
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002941 if (!iboe->nb.notifier_call) {
2942 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2943 err = register_netdevice_notifier(&iboe->nb);
2944 if (err) {
2945 iboe->nb.notifier_call = NULL;
2946 goto err_notif;
Moni Shouad487ee72013-12-12 18:03:13 +02002947 }
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002948 }
2949 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2950 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2951 if (err)
2952 goto err_notif;
Eli Cohenfa417f72010-10-24 21:08:52 -07002953 }
2954
Jack Morgenstein035b1032012-05-10 23:28:09 +03002955 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002956 if (device_create_file(&ibdev->ib_dev.dev,
Jack Morgenstein035b1032012-05-10 23:28:09 +03002957 mlx4_class_attributes[j]))
Eli Cohenfa417f72010-10-24 21:08:52 -07002958 goto err_notif;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002959 }
2960
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002961 ibdev->ib_active = true;
Jiri Pirko09d4d082016-02-26 17:32:24 +01002962 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2963 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2964 &ibdev->ib_dev);
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002965
Jack Morgenstein54679e12012-08-03 08:40:43 +00002966 if (mlx4_is_mfunc(ibdev->dev))
2967 init_pkeys(ibdev);
2968
Jack Morgenstein3806d082012-08-03 08:40:58 +00002969 /* create paravirt contexts for any VFs which are active */
2970 if (mlx4_is_master(ibdev->dev)) {
2971 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2972 if (j == mlx4_master_func_num(ibdev->dev))
2973 continue;
2974 if (mlx4_is_slave_active(ibdev->dev, j))
2975 do_slave_init(ibdev, j, 1);
2976 }
2977 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002978 return ibdev;
2979
Eli Cohenfa417f72010-10-24 21:08:52 -07002980err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002981 if (ibdev->iboe.nb.notifier_call) {
2982 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2983 pr_warn("failure unregistering notifier\n");
2984 ibdev->iboe.nb.notifier_call = NULL;
2985 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002986 flush_workqueue(wq);
2987
Jack Morgensteinfc065732012-08-03 08:40:42 +00002988 mlx4_ib_close_sriov(ibdev);
2989
2990err_mad:
2991 mlx4_ib_mad_cleanup(ibdev);
2992
Roland Dreier225c7b12007-05-08 18:00:38 -07002993err_reg:
2994 ib_unregister_device(&ibdev->ib_dev);
2995
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002996err_diag_counters:
2997 mlx4_ib_diag_cleanup(ibdev);
2998
Matan Barakc1c98502013-11-07 15:25:17 +02002999err_steer_free_bitmap:
3000 kfree(ibdev->ib_uc_qpns_bitmap);
3001
3002err_steer_qp_release:
Jack Morgenstein852f6922018-01-12 07:58:40 +02003003 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3004 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00003005err_counter:
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03003006 for (i = 0; i < ibdev->num_ports; ++i)
3007 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
3008
Roland Dreier225c7b12007-05-08 18:00:38 -07003009err_map:
Jack Morgenstein99e68909d2017-03-21 12:57:05 +02003010 mlx4_ib_free_eqs(dev, ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003011 iounmap(ibdev->uar_map);
3012
3013err_uar:
3014 mlx4_uar_free(dev, &ibdev->priv_uar);
3015
3016err_pd:
3017 mlx4_pd_free(dev, ibdev->priv_pdn);
3018
3019err_dealloc:
3020 ib_dealloc_device(&ibdev->ib_dev);
3021
3022 return NULL;
3023}
3024
Matan Barakc1c98502013-11-07 15:25:17 +02003025int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
3026{
3027 int offset;
3028
3029 WARN_ON(!dev->ib_uc_qpns_bitmap);
3030
3031 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
3032 dev->steer_qpn_count,
3033 get_count_order(count));
3034 if (offset < 0)
3035 return offset;
3036
3037 *qpn = dev->steer_qpn_base + offset;
3038 return 0;
3039}
3040
3041void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
3042{
3043 if (!qpn ||
3044 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
3045 return;
3046
3047 BUG_ON(qpn < dev->steer_qpn_base);
3048
3049 bitmap_release_region(dev->ib_uc_qpns_bitmap,
3050 qpn - dev->steer_qpn_base,
3051 get_count_order(count));
3052}
3053
3054int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
3055 int is_attach)
3056{
3057 int err;
3058 size_t flow_size;
3059 struct ib_flow_attr *flow = NULL;
3060 struct ib_flow_spec_ib *ib_spec;
3061
3062 if (is_attach) {
3063 flow_size = sizeof(struct ib_flow_attr) +
3064 sizeof(struct ib_flow_spec_ib);
3065 flow = kzalloc(flow_size, GFP_KERNEL);
3066 if (!flow)
3067 return -ENOMEM;
3068 flow->port = mqp->port;
3069 flow->num_of_specs = 1;
3070 flow->size = flow_size;
3071 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
3072 ib_spec->type = IB_FLOW_SPEC_IB;
3073 ib_spec->size = sizeof(struct ib_flow_spec_ib);
3074 /* Add an empty rule for IB L2 */
3075 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
3076
3077 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3078 IB_FLOW_DOMAIN_NIC,
3079 MLX4_FS_REGULAR,
3080 &mqp->reg_id);
3081 } else {
3082 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
3083 }
3084 kfree(flow);
3085 return err;
3086}
3087
Roland Dreier225c7b12007-05-08 18:00:38 -07003088static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3089{
3090 struct mlx4_ib_dev *ibdev = ibdev_ptr;
3091 int p;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003092 int i;
Roland Dreier225c7b12007-05-08 18:00:38 -07003093
Jiri Pirko09d4d082016-02-26 17:32:24 +01003094 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3095 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
Moni Shoua4bf97152014-08-21 14:28:42 +03003096 ibdev->ib_active = false;
3097 flush_workqueue(wq);
3098
Jack Morgensteinfc065732012-08-03 08:40:42 +00003099 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07003100 mlx4_ib_mad_cleanup(ibdev);
3101 ib_unregister_device(&ibdev->ib_dev);
Mark Bloch3f85f2a2016-07-19 20:54:58 +03003102 mlx4_ib_diag_cleanup(ibdev);
Eli Cohenfa417f72010-10-24 21:08:52 -07003103 if (ibdev->iboe.nb.notifier_call) {
3104 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03003105 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07003106 ibdev->iboe.nb.notifier_call = NULL;
3107 }
Matan Barakc1c98502013-11-07 15:25:17 +02003108
Jack Morgenstein852f6922018-01-12 07:58:40 +02003109 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3110 ibdev->steer_qpn_count);
3111 kfree(ibdev->ib_uc_qpns_bitmap);
Matan Barakc1c98502013-11-07 15:25:17 +02003112
Eli Cohenfa417f72010-10-24 21:08:52 -07003113 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00003114 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03003115 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3116
Eli Cohenfa417f72010-10-24 21:08:52 -07003117 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07003118 mlx4_CLOSE_PORT(dev, p);
3119
Shlomo Pongratze605b742012-04-29 17:04:27 +03003120 mlx4_ib_free_eqs(dev, ibdev);
3121
Roland Dreier225c7b12007-05-08 18:00:38 -07003122 mlx4_uar_free(dev, &ibdev->priv_uar);
3123 mlx4_pd_free(dev, ibdev->priv_pdn);
3124 ib_dealloc_device(&ibdev->ib_dev);
3125}
3126
Jack Morgensteinfc065732012-08-03 08:40:42 +00003127static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3128{
3129 struct mlx4_ib_demux_work **dm = NULL;
3130 struct mlx4_dev *dev = ibdev->dev;
3131 int i;
3132 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02003133 struct mlx4_active_ports actv_ports;
3134 unsigned int ports;
3135 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003136
3137 if (!mlx4_is_master(dev))
3138 return;
3139
Matan Barak449fc482014-03-19 18:11:52 +02003140 actv_ports = mlx4_get_active_ports(dev, slave);
3141 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3142 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3143
3144 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003145 if (!dm)
Maninder Singha39a98f2015-07-08 09:43:35 +05303146 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003147
Matan Barak449fc482014-03-19 18:11:52 +02003148 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00003149 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3150 if (!dm[i]) {
Maninder Singha39a98f2015-07-08 09:43:35 +05303151 while (--i >= 0)
3152 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003153 goto out;
3154 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003155 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02003156 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003157 dm[i]->slave = slave;
3158 dm[i]->do_init = do_init;
3159 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003160 }
3161 /* initialize or tear down tunnel QPs for the slave */
3162 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3163 if (!ibdev->sriov.is_going_down) {
3164 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00003165 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3166 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003167 } else {
3168 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3169 for (i = 0; i < ports; i++)
3170 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003171 }
3172out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00003173 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003174 return;
3175}
3176
Yishai Hadas35f05da2015-02-08 11:49:34 +02003177static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3178{
3179 struct mlx4_ib_qp *mqp;
3180 unsigned long flags_qp;
3181 unsigned long flags_cq;
3182 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3183 struct list_head cq_notify_list;
3184 struct mlx4_cq *mcq;
3185 unsigned long flags;
3186
3187 pr_warn("mlx4_ib_handle_catas_error was started\n");
3188 INIT_LIST_HEAD(&cq_notify_list);
3189
3190 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3191 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3192
3193 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3194 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3195 if (mqp->sq.tail != mqp->sq.head) {
3196 send_mcq = to_mcq(mqp->ibqp.send_cq);
3197 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3198 if (send_mcq->mcq.comp &&
3199 mqp->ibqp.send_cq->comp_handler) {
3200 if (!send_mcq->mcq.reset_notify_added) {
3201 send_mcq->mcq.reset_notify_added = 1;
3202 list_add_tail(&send_mcq->mcq.reset_notify,
3203 &cq_notify_list);
3204 }
3205 }
3206 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3207 }
3208 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3209 /* Now, handle the QP's receive queue */
3210 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3211 /* no handling is needed for SRQ */
3212 if (!mqp->ibqp.srq) {
3213 if (mqp->rq.tail != mqp->rq.head) {
3214 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3215 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3216 if (recv_mcq->mcq.comp &&
3217 mqp->ibqp.recv_cq->comp_handler) {
3218 if (!recv_mcq->mcq.reset_notify_added) {
3219 recv_mcq->mcq.reset_notify_added = 1;
3220 list_add_tail(&recv_mcq->mcq.reset_notify,
3221 &cq_notify_list);
3222 }
3223 }
3224 spin_unlock_irqrestore(&recv_mcq->lock,
3225 flags_cq);
3226 }
3227 }
3228 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3229 }
3230
3231 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3232 mcq->comp(mcq);
3233 }
3234 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3235 pr_warn("mlx4_ib_handle_catas_error ended\n");
3236}
3237
Moni Shouaa5750092015-02-03 16:48:37 +02003238static void handle_bonded_port_state_event(struct work_struct *work)
3239{
3240 struct ib_event_work *ew =
3241 container_of(work, struct ib_event_work, work);
3242 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3243 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3244 int i;
3245 struct ib_event ibev;
3246
3247 kfree(ew);
3248 spin_lock_bh(&ibdev->iboe.lock);
3249 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3250 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02003251 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02003252
Moni Shoua217e8b12015-03-18 16:51:35 +02003253 if (!curr_netdev)
3254 continue;
3255
3256 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02003257 (netif_running(curr_netdev) &&
3258 netif_carrier_ok(curr_netdev)) ?
3259 IB_PORT_ACTIVE : IB_PORT_DOWN;
3260
3261 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3262 curr_port_state : IB_PORT_ACTIVE;
3263 }
3264 spin_unlock_bh(&ibdev->iboe.lock);
3265
3266 ibev.device = &ibdev->ib_dev;
3267 ibev.element.port_num = 1;
3268 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3269 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3270
3271 ib_dispatch_event(&ibev);
3272}
3273
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003274void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3275{
3276 u64 sl2vl;
3277 int err;
3278
3279 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3280 if (err) {
3281 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3282 port, err);
3283 sl2vl = 0;
3284 }
3285 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3286}
3287
3288static void ib_sl2vl_update_work(struct work_struct *work)
3289{
3290 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3291 struct mlx4_ib_dev *mdev = ew->ib_dev;
3292 int port = ew->port;
3293
3294 mlx4_ib_sl2vl_update(mdev, port);
3295
3296 kfree(ew);
3297}
3298
3299void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3300 int port)
3301{
3302 struct ib_event_work *ew;
3303
3304 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3305 if (ew) {
3306 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3307 ew->port = port;
3308 ew->ib_dev = ibdev;
3309 queue_work(wq, &ew->work);
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003310 }
3311}
3312
Roland Dreier225c7b12007-05-08 18:00:38 -07003313static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003314 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07003315{
3316 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003317 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003318 struct mlx4_eqe *eqe = NULL;
3319 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003320 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003321
Moni Shouaa5750092015-02-03 16:48:37 +02003322 if (mlx4_is_bonded(dev) &&
3323 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3324 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3325 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3326 if (!ew)
3327 return;
3328 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3329 ew->ib_dev = ibdev;
3330 queue_work(wq, &ew->work);
3331 return;
3332 }
3333
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003334 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3335 eqe = (struct mlx4_eqe *)param;
3336 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00003337 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07003338
3339 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07003340 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003341 if (p > ibdev->num_ports)
3342 return;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003343 if (!mlx4_is_slave(dev) &&
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003344 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3345 IB_LINK_LAYER_INFINIBAND) {
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003346 if (mlx4_is_master(dev))
3347 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3348 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3349 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3350 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003351 }
Roland Dreier37608ee2008-04-16 21:01:08 -07003352 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07003353 break;
3354
Roland Dreier37608ee2008-04-16 21:01:08 -07003355 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003356 if (p > ibdev->num_ports)
3357 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07003358 ibev.event = IB_EVENT_PORT_ERR;
3359 break;
3360
3361 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07003362 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07003363 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02003364 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003365 break;
3366
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003367 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3368 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003369 if (!ew)
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003370 break;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003371
3372 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3373 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3374 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00003375 /* need to queue only for port owner, which uses GEN_EQE */
3376 if (mlx4_is_master(dev))
3377 queue_work(wq, &ew->work);
3378 else
3379 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003380 return;
3381
Jack Morgensteinfc065732012-08-03 08:40:42 +00003382 case MLX4_DEV_EVENT_SLAVE_INIT:
3383 /* here, p is the slave id */
3384 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02003385 if (mlx4_is_master(dev)) {
3386 int i;
3387
3388 for (i = 1; i <= ibdev->num_ports; i++) {
3389 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3390 == IB_LINK_LAYER_INFINIBAND)
3391 mlx4_ib_slave_alias_guid_event(ibdev,
3392 p, i,
3393 1);
3394 }
3395 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003396 return;
3397
3398 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02003399 if (mlx4_is_master(dev)) {
3400 int i;
3401
3402 for (i = 1; i <= ibdev->num_ports; i++) {
3403 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3404 == IB_LINK_LAYER_INFINIBAND)
3405 mlx4_ib_slave_alias_guid_event(ibdev,
3406 p, i,
3407 0);
3408 }
3409 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003410 /* here, p is the slave id */
3411 do_slave_init(ibdev, p, 0);
3412 return;
3413
Roland Dreier225c7b12007-05-08 18:00:38 -07003414 default:
3415 return;
3416 }
3417
3418 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02003419 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07003420
3421 ib_dispatch_event(&ibev);
3422}
3423
3424static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07003425 .add = mlx4_ib_add,
3426 .remove = mlx4_ib_remove,
3427 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02003428 .protocol = MLX4_PROT_IB_IPV6,
3429 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07003430};
3431
3432static int __init mlx4_ib_init(void)
3433{
Eli Cohenfa417f72010-10-24 21:08:52 -07003434 int err;
3435
Bhaktipriya Shridhar41cd3942016-08-15 23:42:48 +05303436 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
Eli Cohenfa417f72010-10-24 21:08:52 -07003437 if (!wq)
3438 return -ENOMEM;
3439
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003440 err = mlx4_ib_mcg_init();
3441 if (err)
3442 goto clean_wq;
3443
Eli Cohenfa417f72010-10-24 21:08:52 -07003444 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003445 if (err)
3446 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07003447
3448 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003449
3450clean_mcg:
3451 mlx4_ib_mcg_destroy();
3452
3453clean_wq:
3454 destroy_workqueue(wq);
3455 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07003456}
3457
3458static void __exit mlx4_ib_cleanup(void)
3459{
3460 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003461 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07003462 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07003463}
3464
3465module_init(mlx4_ib_init);
3466module_exit(mlx4_ib_cleanup);