blob: 25d09d53b51c429915d7e6f26b5981dc2d461896 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037#include <linux/errno.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070038#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030041#include <linux/if_vlan.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010042#include <linux/sched/mm.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010043#include <linux/sched/task.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010044
Moni Shouad487ee72013-12-12 18:03:13 +020045#include <net/ipv6.h>
46#include <net/addrconf.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010047#include <net/devlink.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070048
49#include <rdma/ib_smi.h>
50#include <rdma/ib_user_verbs.h>
Eli Cohenfa417f72010-10-24 21:08:52 -070051#include <rdma/ib_addr.h>
Moni Shouae26be1b2015-07-30 18:33:29 +030052#include <rdma/ib_cache.h>
53
54#include <net/bonding.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070055
56#include <linux/mlx4/driver.h>
57#include <linux/mlx4/cmd.h>
Matan Barak9433c182014-05-15 15:29:28 +030058#include <linux/mlx4/qp.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070059
60#include "mlx4_ib.h"
Leon Romanovsky9ce28a22016-09-22 17:31:14 +030061#include <rdma/mlx4-abi.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070062
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +030063#define DRV_NAME MLX4_IB_DRV_NAME
Tariq Toukan0a528ee2017-06-07 16:26:15 +030064#define DRV_VERSION "4.0-0"
Roland Dreier225c7b12007-05-08 18:00:38 -070065
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030066#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
Matan Baraka37a1a42013-11-07 15:25:16 +020067#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
Markus Stockhausen50e2ec92014-08-13 14:07:30 +000068#define MLX4_IB_CARD_REV_A0 0xA0
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030069
Roland Dreier225c7b12007-05-08 18:00:38 -070070MODULE_AUTHOR("Roland Dreier");
71MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72MODULE_LICENSE("Dual BSD/GPL");
Roland Dreier225c7b12007-05-08 18:00:38 -070073
Yishai Hadas56c1d232015-02-12 09:49:43 +020074int mlx4_ib_sm_guid_assign = 0;
Jack Morgensteina0c64a12012-08-03 08:40:49 +000075module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
Yishai Hadas56c1d232015-02-12 09:49:43 +020076MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
Jack Morgensteina0c64a12012-08-03 08:40:49 +000077
Roland Dreier68f39482008-02-04 20:20:44 -080078static const char mlx4_ib_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -070079 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
Tariq Toukan0a528ee2017-06-07 16:26:15 +030080 DRV_VERSION "\n";
Roland Dreier225c7b12007-05-08 18:00:38 -070081
Jack Morgenstein3806d082012-08-03 08:40:58 +000082static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
Guy Levi400b1eb2017-07-04 16:24:24 +030083static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84 u8 port_num);
Jack Morgenstein3806d082012-08-03 08:40:58 +000085
Eli Cohenfa417f72010-10-24 21:08:52 -070086static struct workqueue_struct *wq;
87
Roland Dreier225c7b12007-05-08 18:00:38 -070088static void init_query_mad(struct ib_smp *mad)
89{
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
94}
95
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030096static int check_flow_steering_support(struct mlx4_dev *dev)
97{
Matan Barak0a9b7d52013-11-07 15:25:15 +020098 int eth_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +030099 int ib_num_ports = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300100
Matan Barak0a9b7d52013-11-07 15:25:15 +0200101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300102
Matan Barak0a9b7d52013-11-07 15:25:15 +0200103 if (dmfs) {
104 int i;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106 eth_num_ports++;
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108 ib_num_ports++;
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111 (!eth_num_ports ||
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115 dmfs = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300116 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300117 }
Matan Barak0a9b7d52013-11-07 15:25:15 +0200118 return dmfs;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +0300119}
120
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300121static int num_ib_ports(struct mlx4_dev *dev)
122{
123 int ib_ports = 0;
124 int i;
125
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127 ib_ports++;
128
129 return ib_ports;
130}
131
Moni Shouae26be1b2015-07-30 18:33:29 +0300132static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133{
134 struct mlx4_ib_dev *ibdev = to_mdev(device);
135 struct net_device *dev;
136
137 rcu_read_lock();
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139
140 if (dev) {
141 if (mlx4_is_bonded(ibdev->dev)) {
142 struct net_device *upper = NULL;
143
144 upper = netdev_master_upper_dev_get_rcu(dev);
145 if (upper) {
146 struct net_device *active;
147
148 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149 if (active)
150 dev = active;
151 }
152 }
153 }
154 if (dev)
155 dev_hold(dev);
156
157 rcu_read_unlock();
158 return dev;
159}
160
Moni Shoua7e57b852016-01-14 17:50:35 +0200161static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 struct mlx4_ib_dev *ibdev,
163 u8 port_num)
Moni Shouae26be1b2015-07-30 18:33:29 +0300164{
165 struct mlx4_cmd_mailbox *mailbox;
166 int err;
167 struct mlx4_dev *dev = ibdev->dev;
168 int i;
169 union ib_gid *gid_tbl;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return -ENOMEM;
174
175 gid_tbl = mailbox->buf;
176
177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179
180 err = mlx4_cmd(dev, mailbox->dma,
181 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183 MLX4_CMD_WRAPPED);
184 if (mlx4_is_bonded(dev))
185 err += mlx4_cmd(dev, mailbox->dma,
186 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188 MLX4_CMD_WRAPPED);
189
190 mlx4_free_cmd_mailbox(dev, mailbox);
191 return err;
192}
193
Moni Shoua7e57b852016-01-14 17:50:35 +0200194static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 struct mlx4_ib_dev *ibdev,
196 u8 port_num)
197{
198 struct mlx4_cmd_mailbox *mailbox;
199 int err;
200 struct mlx4_dev *dev = ibdev->dev;
201 int i;
202 struct {
203 union ib_gid gid;
204 __be32 rsrvd1[2];
205 __be16 rsrvd2;
206 u8 type;
207 u8 version;
208 __be32 rsrvd3;
209 } *gid_tbl;
210
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
212 if (IS_ERR(mailbox))
213 return -ENOMEM;
214
215 gid_tbl = mailbox->buf;
216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1;
Moni Shoua7e57b852016-01-14 17:50:35 +0200222 }
223 }
224
225 err = mlx4_cmd(dev, mailbox->dma,
226 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
228 MLX4_CMD_WRAPPED);
229 if (mlx4_is_bonded(dev))
230 err += mlx4_cmd(dev, mailbox->dma,
231 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
233 MLX4_CMD_WRAPPED);
234
235 mlx4_free_cmd_mailbox(dev, mailbox);
236 return err;
237}
238
239static int mlx4_ib_update_gids(struct gid_entry *gids,
240 struct mlx4_ib_dev *ibdev,
241 u8 port_num)
242{
243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
245
246 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
247}
248
Parav Panditf4df9a72018-06-05 08:40:16 +0300249static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
Moni Shouae26be1b2015-07-30 18:33:29 +0300250{
Parav Pandit414448d2018-04-01 15:08:24 +0300251 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
Moni Shouae26be1b2015-07-30 18:33:29 +0300252 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
253 struct mlx4_port_gid_table *port_gid_table;
254 int free = -1, found = -1;
255 int ret = 0;
256 int hw_update = 0;
257 int i;
258 struct gid_entry *gids = NULL;
259
Parav Pandit414448d2018-04-01 15:08:24 +0300260 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
Moni Shouae26be1b2015-07-30 18:33:29 +0300261 return -EINVAL;
262
Parav Pandit414448d2018-04-01 15:08:24 +0300263 if (attr->port_num > MLX4_MAX_PORTS)
Moni Shouae26be1b2015-07-30 18:33:29 +0300264 return -EINVAL;
265
266 if (!context)
267 return -EINVAL;
268
Parav Pandit414448d2018-04-01 15:08:24 +0300269 port_gid_table = &iboe->gids[attr->port_num - 1];
Moni Shouae26be1b2015-07-30 18:33:29 +0300270 spin_lock_bh(&iboe->lock);
271 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
Parav Panditf4df9a72018-06-05 08:40:16 +0300272 if (!memcmp(&port_gid_table->gids[i].gid,
273 &attr->gid, sizeof(attr->gid)) &&
274 port_gid_table->gids[i].gid_type == attr->gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300275 found = i;
276 break;
277 }
Parav Pandit25e62652018-05-22 20:33:45 +0300278 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
Moni Shouae26be1b2015-07-30 18:33:29 +0300279 free = i; /* HW has space */
280 }
281
282 if (found < 0) {
283 if (free < 0) {
284 ret = -ENOSPC;
285 } else {
286 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
287 if (!port_gid_table->gids[free].ctx) {
288 ret = -ENOMEM;
289 } else {
290 *context = port_gid_table->gids[free].ctx;
Parav Panditf4df9a72018-06-05 08:40:16 +0300291 memcpy(&port_gid_table->gids[free].gid,
292 &attr->gid, sizeof(attr->gid));
Moni Shouab699a852016-01-14 17:50:33 +0200293 port_gid_table->gids[free].gid_type = attr->gid_type;
Moni Shouae26be1b2015-07-30 18:33:29 +0300294 port_gid_table->gids[free].ctx->real_index = free;
295 port_gid_table->gids[free].ctx->refcount = 1;
296 hw_update = 1;
297 }
298 }
299 } else {
300 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
301 *context = ctx;
302 ctx->refcount++;
303 }
304 if (!ret && hw_update) {
Kees Cook6da2ec52018-06-12 13:55:00 -0700305 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
306 GFP_ATOMIC);
Moni Shouae26be1b2015-07-30 18:33:29 +0300307 if (!gids) {
308 ret = -ENOMEM;
309 } else {
Moni Shouab699a852016-01-14 17:50:33 +0200310 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300311 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
Moni Shouab699a852016-01-14 17:50:33 +0200312 gids[i].gid_type = port_gid_table->gids[i].gid_type;
313 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300314 }
315 }
316 spin_unlock_bh(&iboe->lock);
317
318 if (!ret && hw_update) {
Parav Pandit414448d2018-04-01 15:08:24 +0300319 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
Moni Shouae26be1b2015-07-30 18:33:29 +0300320 kfree(gids);
321 }
322
323 return ret;
324}
325
Parav Pandit414448d2018-04-01 15:08:24 +0300326static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
Moni Shouae26be1b2015-07-30 18:33:29 +0300327{
328 struct gid_cache_context *ctx = *context;
Parav Pandit414448d2018-04-01 15:08:24 +0300329 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
Moni Shouae26be1b2015-07-30 18:33:29 +0300330 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
331 struct mlx4_port_gid_table *port_gid_table;
332 int ret = 0;
333 int hw_update = 0;
334 struct gid_entry *gids = NULL;
335
Parav Pandit414448d2018-04-01 15:08:24 +0300336 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
Moni Shouae26be1b2015-07-30 18:33:29 +0300337 return -EINVAL;
338
Parav Pandit414448d2018-04-01 15:08:24 +0300339 if (attr->port_num > MLX4_MAX_PORTS)
Moni Shouae26be1b2015-07-30 18:33:29 +0300340 return -EINVAL;
341
Parav Pandit414448d2018-04-01 15:08:24 +0300342 port_gid_table = &iboe->gids[attr->port_num - 1];
Moni Shouae26be1b2015-07-30 18:33:29 +0300343 spin_lock_bh(&iboe->lock);
344 if (ctx) {
345 ctx->refcount--;
346 if (!ctx->refcount) {
347 unsigned int real_index = ctx->real_index;
348
Parav Pandit25e62652018-05-22 20:33:45 +0300349 memset(&port_gid_table->gids[real_index].gid, 0,
350 sizeof(port_gid_table->gids[real_index].gid));
Moni Shouae26be1b2015-07-30 18:33:29 +0300351 kfree(port_gid_table->gids[real_index].ctx);
352 port_gid_table->gids[real_index].ctx = NULL;
353 hw_update = 1;
354 }
355 }
356 if (!ret && hw_update) {
357 int i;
358
Kees Cook6da2ec52018-06-12 13:55:00 -0700359 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
360 GFP_ATOMIC);
Moni Shouae26be1b2015-07-30 18:33:29 +0300361 if (!gids) {
362 ret = -ENOMEM;
363 } else {
Jack Ma1817792018-03-05 20:09:46 +0200364 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
365 memcpy(&gids[i].gid,
366 &port_gid_table->gids[i].gid,
367 sizeof(union ib_gid));
368 gids[i].gid_type =
369 port_gid_table->gids[i].gid_type;
370 }
Moni Shouae26be1b2015-07-30 18:33:29 +0300371 }
372 }
373 spin_unlock_bh(&iboe->lock);
374
375 if (!ret && hw_update) {
Parav Pandit414448d2018-04-01 15:08:24 +0300376 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
Moni Shouae26be1b2015-07-30 18:33:29 +0300377 kfree(gids);
378 }
379 return ret;
380}
381
382int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
Parav Pandit74920522018-06-13 10:22:07 +0300383 const struct ib_gid_attr *attr)
Moni Shouae26be1b2015-07-30 18:33:29 +0300384{
385 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
386 struct gid_cache_context *ctx = NULL;
Moni Shouae26be1b2015-07-30 18:33:29 +0300387 struct mlx4_port_gid_table *port_gid_table;
388 int real_index = -EINVAL;
389 int i;
Moni Shouae26be1b2015-07-30 18:33:29 +0300390 unsigned long flags;
Parav Pandit74920522018-06-13 10:22:07 +0300391 u8 port_num = attr->port_num;
Moni Shouae26be1b2015-07-30 18:33:29 +0300392
393 if (port_num > MLX4_MAX_PORTS)
394 return -EINVAL;
395
396 if (mlx4_is_bonded(ibdev->dev))
397 port_num = 1;
398
399 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
Parav Pandit74920522018-06-13 10:22:07 +0300400 return attr->index;
Moni Shouab699a852016-01-14 17:50:33 +0200401
Moni Shouae26be1b2015-07-30 18:33:29 +0300402 spin_lock_irqsave(&iboe->lock, flags);
403 port_gid_table = &iboe->gids[port_num - 1];
404
405 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
Parav Pandit74920522018-06-13 10:22:07 +0300406 if (!memcmp(&port_gid_table->gids[i].gid,
407 &attr->gid, sizeof(attr->gid)) &&
408 attr->gid_type == port_gid_table->gids[i].gid_type) {
Moni Shouae26be1b2015-07-30 18:33:29 +0300409 ctx = port_gid_table->gids[i].ctx;
410 break;
411 }
412 if (ctx)
413 real_index = ctx->real_index;
414 spin_unlock_irqrestore(&iboe->lock, flags);
415 return real_index;
416}
417
Yishai Hadas9c711722018-03-15 16:56:39 +0200418#define field_avail(type, fld, sz) (offsetof(type, fld) + \
419 sizeof(((type *)0)->fld) <= (sz))
420
Roland Dreier225c7b12007-05-08 18:00:38 -0700421static int mlx4_ib_query_device(struct ib_device *ibdev,
Matan Barak2528e332015-06-11 16:35:25 +0300422 struct ib_device_attr *props,
423 struct ib_udata *uhw)
Roland Dreier225c7b12007-05-08 18:00:38 -0700424{
425 struct mlx4_ib_dev *dev = to_mdev(ibdev);
426 struct ib_smp *in_mad = NULL;
427 struct ib_smp *out_mad = NULL;
Pan Bian46d07032016-12-04 14:45:38 +0800428 int err;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300429 int have_ib_ports;
Matan Barak4b664c42015-06-11 16:35:27 +0300430 struct mlx4_uverbs_ex_query_device cmd;
431 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
432 struct mlx4_clock_params clock_params;
Roland Dreier225c7b12007-05-08 18:00:38 -0700433
Matan Barak4b664c42015-06-11 16:35:27 +0300434 if (uhw->inlen) {
435 if (uhw->inlen < sizeof(cmd))
436 return -EINVAL;
Matan Barak2528e332015-06-11 16:35:25 +0300437
Matan Barak4b664c42015-06-11 16:35:27 +0300438 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
439 if (err)
440 return err;
441
442 if (cmd.comp_mask)
443 return -EINVAL;
444
445 if (cmd.reserved)
446 return -EINVAL;
447 }
448
449 resp.response_length = offsetof(typeof(resp), response_length) +
450 sizeof(resp.response_length);
Roland Dreier225c7b12007-05-08 18:00:38 -0700451 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
452 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
Pan Bian46d07032016-12-04 14:45:38 +0800453 err = -ENOMEM;
Roland Dreier225c7b12007-05-08 18:00:38 -0700454 if (!in_mad || !out_mad)
455 goto out;
456
457 init_query_mad(in_mad);
458 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
459
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000460 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
461 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700462 if (err)
463 goto out;
464
465 memset(props, 0, sizeof *props);
466
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300467 have_ib_ports = num_ib_ports(dev->dev);
468
Roland Dreier225c7b12007-05-08 18:00:38 -0700469 props->fw_ver = dev->dev->caps.fw_ver;
470 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
471 IB_DEVICE_PORT_ACTIVE_EVENT |
472 IB_DEVICE_SYS_IMAGE_GUID |
Ron Livne521e5752008-07-14 23:48:48 -0700473 IB_DEVICE_RC_RNR_NAK_GEN |
474 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
Roland Dreier225c7b12007-05-08 18:00:38 -0700475 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
476 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
477 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
478 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
Jack Morgenstein3dec4872014-09-11 14:11:19 +0300479 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
Roland Dreier225c7b12007-05-08 18:00:38 -0700480 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
481 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
482 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
Eli Cohen8ff095e2008-04-16 21:01:10 -0700483 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
484 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
Markus Stockhausen50e2ec92014-08-13 14:07:30 +0000485 if (dev->dev->caps.max_gso_sz &&
486 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
487 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
Eli Cohenb832be12008-04-16 21:09:27 -0700488 props->device_cap_flags |= IB_DEVICE_UD_TSO;
Roland Dreier95d04f02008-07-23 08:12:26 -0700489 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
490 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
491 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
492 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
493 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
494 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
Sean Hefty0a1405d2011-06-02 11:32:15 -0700495 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
496 props->device_cap_flags |= IB_DEVICE_XRC;
Shani Michaelib4253882013-02-06 16:19:16 +0000497 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
498 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
499 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
500 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
501 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
502 else
503 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
504 }
Bart Van Asscheca920f52016-06-03 07:58:32 -0700505 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
506 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
Roland Dreier225c7b12007-05-08 18:00:38 -0700507
Bodong Wang070b3992015-09-22 23:18:11 +0300508 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
509
Roland Dreier225c7b12007-05-08 18:00:38 -0700510 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
511 0xffffff;
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200512 props->vendor_part_id = dev->dev->persist->pdev->device;
Roland Dreier225c7b12007-05-08 18:00:38 -0700513 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
514 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
515
516 props->max_mr_size = ~0ull;
517 props->page_size_cap = dev->dev->caps.page_size_cap;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200518 props->max_qp = dev->dev->quotas.qp;
Sagi Grimbergfc2d0042012-05-24 16:08:08 +0300519 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
Leon Romanovsky8f28b172018-09-03 09:11:14 +0300520 props->max_send_sge =
521 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
522 props->max_recv_sge =
523 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
524 props->max_sge_rd = MLX4_MAX_SGE_RD;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200525 props->max_cq = dev->dev->quotas.cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700526 props->max_cqe = dev->dev->caps.max_cqes;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200527 props->max_mr = dev->dev->quotas.mpt;
Roland Dreier225c7b12007-05-08 18:00:38 -0700528 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
529 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
530 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
531 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200532 props->max_srq = dev->dev->quotas.srq;
Jack Morgensteinc8681f12007-06-21 13:39:10 -0700533 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700534 props->max_srq_sge = dev->dev->caps.max_srq_sge;
Eli Cohen5a0fd092010-10-07 16:24:16 +0200535 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
Roland Dreier225c7b12007-05-08 18:00:38 -0700536 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
537 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
538 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
Dotan Barak47e956b2012-07-11 15:39:29 +0000539 props->masked_atomic_cap = props->atomic_cap;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700540 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
Roland Dreier225c7b12007-05-08 18:00:38 -0700541 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
542 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
543 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
544 props->max_mcast_grp;
Eli Cohena5bbe892012-02-09 18:10:06 +0200545 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
Matan Barak4b664c42015-06-11 16:35:27 +0300546 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
547 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
Maor Gottlieb731e0412016-11-10 11:30:58 +0200548 props->max_ah = INT_MAX;
Roland Dreier225c7b12007-05-08 18:00:38 -0700549
Guy Levi6d06c9a2018-03-15 16:56:40 +0200550 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
551 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
552 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
553 props->rss_caps.max_rwq_indirection_tables =
554 props->max_qp;
555 props->rss_caps.max_rwq_indirection_table_size =
556 dev->dev->caps.max_rss_tbl_sz;
557 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
558 props->max_wq_type_rq = props->max_qp;
559 }
560
561 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
562 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
Guy Levi6afff1c2017-07-04 16:24:27 +0300563 }
Guy Levi400b1eb2017-07-04 16:24:24 +0300564
Yonatan Cohen0fd586d2017-11-13 10:51:17 +0200565 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
566 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
567
Matan Barak8a7ff142015-07-01 14:31:02 +0300568 if (!mlx4_is_slave(dev->dev))
569 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
Matan Barak4b664c42015-06-11 16:35:27 +0300570
571 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
Matan Barak4b664c42015-06-11 16:35:27 +0300572 resp.response_length += sizeof(resp.hca_core_clock_offset);
Matan Barak8a7ff142015-07-01 14:31:02 +0300573 if (!err && !mlx4_is_slave(dev->dev)) {
Jason Gunthorpe48962f52018-03-13 16:26:46 -0600574 resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
Matan Barak8a7ff142015-07-01 14:31:02 +0300575 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
576 }
Matan Barak4b664c42015-06-11 16:35:27 +0300577 }
578
Maor Gottliebea30b962017-06-21 09:26:28 +0300579 if (uhw->outlen >= resp.response_length +
580 sizeof(resp.max_inl_recv_sz)) {
581 resp.response_length += sizeof(resp.max_inl_recv_sz);
582 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
583 sizeof(struct mlx4_wqe_data_seg);
584 }
585
Yishai Hadas9c711722018-03-15 16:56:39 +0200586 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
Guy Levi09d208b22017-10-25 22:39:34 +0300587 if (props->rss_caps.supported_qpts) {
588 resp.rss_caps.rx_hash_function =
589 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
Guy Levi07d84f72017-12-24 14:51:24 +0200590
Guy Levi09d208b22017-10-25 22:39:34 +0300591 resp.rss_caps.rx_hash_fields_mask =
592 MLX4_IB_RX_HASH_SRC_IPV4 |
593 MLX4_IB_RX_HASH_DST_IPV4 |
594 MLX4_IB_RX_HASH_SRC_IPV6 |
595 MLX4_IB_RX_HASH_DST_IPV6 |
596 MLX4_IB_RX_HASH_SRC_PORT_TCP |
597 MLX4_IB_RX_HASH_DST_PORT_TCP |
598 MLX4_IB_RX_HASH_SRC_PORT_UDP |
599 MLX4_IB_RX_HASH_DST_PORT_UDP;
Guy Levi07d84f72017-12-24 14:51:24 +0200600
601 if (dev->dev->caps.tunnel_offload_mode ==
602 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
603 resp.rss_caps.rx_hash_fields_mask |=
604 MLX4_IB_RX_HASH_INNER;
Guy Levi09d208b22017-10-25 22:39:34 +0300605 }
Yishai Hadas9c711722018-03-15 16:56:39 +0200606 resp.response_length = offsetof(typeof(resp), rss_caps) +
607 sizeof(resp.rss_caps);
608 }
609
610 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
611 if (dev->dev->caps.max_gso_sz &&
612 ((mlx4_ib_port_link_layer(ibdev, 1) ==
613 IB_LINK_LAYER_ETHERNET) ||
614 (mlx4_ib_port_link_layer(ibdev, 2) ==
615 IB_LINK_LAYER_ETHERNET))) {
616 resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
617 resp.tso_caps.supported_qpts |=
618 1 << IB_QPT_RAW_PACKET;
619 }
620 resp.response_length = offsetof(typeof(resp), tso_caps) +
621 sizeof(resp.tso_caps);
Guy Levi09d208b22017-10-25 22:39:34 +0300622 }
623
Matan Barak4b664c42015-06-11 16:35:27 +0300624 if (uhw->outlen) {
625 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
626 if (err)
627 goto out;
628 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700629out:
630 kfree(in_mad);
631 kfree(out_mad);
632
633 return err;
634}
635
Eli Cohenfa417f72010-10-24 21:08:52 -0700636static enum rdma_link_layer
637mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
638{
639 struct mlx4_dev *dev = to_mdev(device)->dev;
640
Jack Morgenstein65dab252011-12-13 04:10:41 +0000641 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700642 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
643}
644
645static int ib_link_query_port(struct ib_device *ibdev, u8 port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000646 struct ib_port_attr *props, int netw_view)
Eli Cohenfa417f72010-10-24 21:08:52 -0700647{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200648 struct ib_smp *in_mad = NULL;
649 struct ib_smp *out_mad = NULL;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300650 int ext_active_speed;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000651 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200652 int err = -ENOMEM;
653
654 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
655 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
656 if (!in_mad || !out_mad)
657 goto out;
658
659 init_query_mad(in_mad);
660 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
661 in_mad->attr_mod = cpu_to_be32(port);
662
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000663 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
664 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
665
666 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
Or Gerlitza9c766b2012-01-11 19:00:29 +0200667 in_mad, out_mad);
668 if (err)
669 goto out;
670
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300671
Eli Cohenfa417f72010-10-24 21:08:52 -0700672 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
673 props->lmc = out_mad->data[34] & 0x7;
674 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
675 props->sm_sl = out_mad->data[36] & 0xf;
676 props->state = out_mad->data[32] & 0xf;
677 props->phys_state = out_mad->data[33] >> 4;
678 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000679 if (netw_view)
680 props->gid_tbl_len = out_mad->data[50];
681 else
682 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
Eli Cohenfa417f72010-10-24 21:08:52 -0700683 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
684 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
685 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
686 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
687 props->active_width = out_mad->data[31] & 0xf;
688 props->active_speed = out_mad->data[35] >> 4;
689 props->max_mtu = out_mad->data[41] & 0xf;
690 props->active_mtu = out_mad->data[36] >> 4;
691 props->subnet_timeout = out_mad->data[51] & 0x1f;
692 props->max_vl_num = out_mad->data[37] >> 4;
693 props->init_type_reply = out_mad->data[41] >> 4;
694
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300695 /* Check if extended speeds (EDR/FDR/...) are supported */
696 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
697 ext_active_speed = out_mad->data[62] >> 4;
698
699 switch (ext_active_speed) {
700 case 1:
Or Gerlitz2e966912012-02-28 18:49:50 +0200701 props->active_speed = IB_SPEED_FDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300702 break;
703 case 2:
Or Gerlitz2e966912012-02-28 18:49:50 +0200704 props->active_speed = IB_SPEED_EDR;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300705 break;
706 }
707 }
708
709 /* If reported active speed is QDR, check if is FDR-10 */
Or Gerlitz2e966912012-02-28 18:49:50 +0200710 if (props->active_speed == IB_SPEED_QDR) {
Or Gerlitz8154c072012-03-06 15:50:50 +0200711 init_query_mad(in_mad);
712 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
713 in_mad->attr_mod = cpu_to_be32(port);
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300714
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000715 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
Or Gerlitz8154c072012-03-06 15:50:50 +0200716 NULL, NULL, in_mad, out_mad);
717 if (err)
Jesper Juhlbf6b47d2012-04-11 23:43:29 +0200718 goto out;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300719
Or Gerlitz8154c072012-03-06 15:50:50 +0200720 /* Checking LinkSpeedActive for FDR-10 */
721 if (out_mad->data[15] & 0x1)
722 props->active_speed = IB_SPEED_FDR10;
Marcel Apfelbauma5e12df2011-10-03 19:04:20 +0300723 }
Or Gerlitzd2ef4062012-04-02 17:45:20 +0300724
725 /* Avoid wrong speed value returned by FW if the IB link is down. */
726 if (props->state == IB_PORT_DOWN)
727 props->active_speed = IB_SPEED_SDR;
728
Or Gerlitza9c766b2012-01-11 19:00:29 +0200729out:
730 kfree(in_mad);
731 kfree(out_mad);
732 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700733}
734
735static u8 state_to_phys_state(enum ib_port_state state)
736{
737 return state == IB_PORT_ACTIVE ? 5 : 3;
738}
739
740static int eth_link_query_port(struct ib_device *ibdev, u8 port,
Leon Romanovsky850b7412017-01-25 20:26:18 +0200741 struct ib_port_attr *props)
Eli Cohenfa417f72010-10-24 21:08:52 -0700742{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200743
744 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
745 struct mlx4_ib_iboe *iboe = &mdev->iboe;
Eli Cohenfa417f72010-10-24 21:08:52 -0700746 struct net_device *ndev;
747 enum ib_mtu tmp;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200748 struct mlx4_cmd_mailbox *mailbox;
749 int err = 0;
Moni Shouaa5750092015-02-03 16:48:37 +0200750 int is_bonded = mlx4_is_bonded(mdev->dev);
Eli Cohenfa417f72010-10-24 21:08:52 -0700751
Or Gerlitza9c766b2012-01-11 19:00:29 +0200752 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
753 if (IS_ERR(mailbox))
754 return PTR_ERR(mailbox);
755
756 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
757 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
758 MLX4_CMD_WRAPPED);
759 if (err)
760 goto out;
761
Saeed Mahameed6fa26202016-11-10 11:30:59 +0200762 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
763 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
764 IB_WIDTH_4X : IB_WIDTH_1X;
765 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
766 IB_SPEED_FDR : IB_SPEED_QDR;
Jason Gunthorpe2f944c02018-07-04 15:57:48 +0300767 props->port_cap_flags = IB_PORT_CM_SUP;
768 props->ip_gids = true;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200769 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
770 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
Eli Cohenfa417f72010-10-24 21:08:52 -0700771 props->pkey_tbl_len = 1;
Or Gerlitzbcacb892011-10-10 10:53:41 +0200772 props->max_mtu = IB_MTU_4096;
Or Gerlitza9c766b2012-01-11 19:00:29 +0200773 props->max_vl_num = 2;
Eli Cohenfa417f72010-10-24 21:08:52 -0700774 props->state = IB_PORT_DOWN;
775 props->phys_state = state_to_phys_state(props->state);
776 props->active_mtu = IB_MTU_256;
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300777 spin_lock_bh(&iboe->lock);
Eli Cohenfa417f72010-10-24 21:08:52 -0700778 ndev = iboe->netdevs[port - 1];
Moni Shoua5070cd22015-07-30 18:33:30 +0300779 if (ndev && is_bonded) {
780 rcu_read_lock(); /* required to get upper dev */
781 ndev = netdev_master_upper_dev_get_rcu(ndev);
782 rcu_read_unlock();
783 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700784 if (!ndev)
Or Gerlitza9c766b2012-01-11 19:00:29 +0200785 goto out_unlock;
Eli Cohenfa417f72010-10-24 21:08:52 -0700786
787 tmp = iboe_get_mtu(ndev->mtu);
788 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
789
Eli Cohen21d606092010-11-11 21:05:58 +0000790 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
Eli Cohenfa417f72010-10-24 21:08:52 -0700791 IB_PORT_ACTIVE : IB_PORT_DOWN;
792 props->phys_state = state_to_phys_state(props->state);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200793out_unlock:
Jack Morgensteindba3ad22014-08-21 14:28:41 +0300794 spin_unlock_bh(&iboe->lock);
Or Gerlitza9c766b2012-01-11 19:00:29 +0200795out:
796 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
797 return err;
Eli Cohenfa417f72010-10-24 21:08:52 -0700798}
799
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000800int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
801 struct ib_port_attr *props, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700802{
Or Gerlitza9c766b2012-01-11 19:00:29 +0200803 int err;
Roland Dreier225c7b12007-05-08 18:00:38 -0700804
Or Gerlitzc4550c62017-01-24 13:02:39 +0200805 /* props being zeroed by the caller, avoid zeroing it here */
Roland Dreier225c7b12007-05-08 18:00:38 -0700806
Eli Cohenfa417f72010-10-24 21:08:52 -0700807 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000808 ib_link_query_port(ibdev, port, props, netw_view) :
Leon Romanovsky850b7412017-01-25 20:26:18 +0200809 eth_link_query_port(ibdev, port, props);
Roland Dreier225c7b12007-05-08 18:00:38 -0700810
811 return err;
812}
813
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000814static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
815 struct ib_port_attr *props)
816{
817 /* returns host view */
818 return __mlx4_ib_query_port(ibdev, port, props, 0);
819}
820
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000821int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
822 union ib_gid *gid, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700823{
824 struct ib_smp *in_mad = NULL;
825 struct ib_smp *out_mad = NULL;
826 int err = -ENOMEM;
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000827 struct mlx4_ib_dev *dev = to_mdev(ibdev);
828 int clear = 0;
829 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700830
831 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
832 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
833 if (!in_mad || !out_mad)
834 goto out;
835
836 init_query_mad(in_mad);
837 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
838 in_mad->attr_mod = cpu_to_be32(port);
839
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000840 if (mlx4_is_mfunc(dev->dev) && netw_view)
841 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
842
843 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700844 if (err)
845 goto out;
846
847 memcpy(gid->raw, out_mad->data + 8, 8);
848
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000849 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
850 if (index) {
851 /* For any index > 0, return the null guid */
852 err = 0;
853 clear = 1;
854 goto out;
855 }
856 }
857
Roland Dreier225c7b12007-05-08 18:00:38 -0700858 init_query_mad(in_mad);
859 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
860 in_mad->attr_mod = cpu_to_be32(index / 8);
861
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000862 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000863 NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700864 if (err)
865 goto out;
866
867 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
868
869out:
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000870 if (clear)
871 memset(gid->raw + 8, 0, 8);
Roland Dreier225c7b12007-05-08 18:00:38 -0700872 kfree(in_mad);
873 kfree(out_mad);
874 return err;
875}
876
Eli Cohenfa417f72010-10-24 21:08:52 -0700877static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
878 union ib_gid *gid)
879{
Moni Shoua5070cd22015-07-30 18:33:30 +0300880 if (rdma_protocol_ib(ibdev, port))
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000881 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
Parav Pandit0e1f9b92018-04-01 15:08:19 +0300882 return 0;
Eli Cohenfa417f72010-10-24 21:08:52 -0700883}
884
Jack Morgensteinfd10ed82016-09-12 19:16:21 +0300885static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
886{
887 union sl2vl_tbl_to_u64 sl2vl64;
888 struct ib_smp *in_mad = NULL;
889 struct ib_smp *out_mad = NULL;
890 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
891 int err = -ENOMEM;
892 int jj;
893
894 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
895 *sl2vl_tbl = 0;
896 return 0;
897 }
898
899 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
900 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
901 if (!in_mad || !out_mad)
902 goto out;
903
904 init_query_mad(in_mad);
905 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
906 in_mad->attr_mod = 0;
907
908 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
909 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
910
911 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
912 in_mad, out_mad);
913 if (err)
914 goto out;
915
916 for (jj = 0; jj < 8; jj++)
917 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
918 *sl2vl_tbl = sl2vl64.sl64;
919
920out:
921 kfree(in_mad);
922 kfree(out_mad);
923 return err;
924}
925
926static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
927{
928 u64 sl2vl;
929 int i;
930 int err;
931
932 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
933 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
934 continue;
935 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
936 if (err) {
937 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
938 i, err);
939 sl2vl = 0;
940 }
941 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
942 }
943}
944
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000945int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
946 u16 *pkey, int netw_view)
Roland Dreier225c7b12007-05-08 18:00:38 -0700947{
948 struct ib_smp *in_mad = NULL;
949 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000950 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700951 int err = -ENOMEM;
952
953 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
954 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
955 if (!in_mad || !out_mad)
956 goto out;
957
958 init_query_mad(in_mad);
959 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
960 in_mad->attr_mod = cpu_to_be32(index / 32);
961
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000962 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
963 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
964
965 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
966 in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -0700967 if (err)
968 goto out;
969
970 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
971
972out:
973 kfree(in_mad);
974 kfree(out_mad);
975 return err;
976}
977
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000978static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
979{
980 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
981}
982
Roland Dreier225c7b12007-05-08 18:00:38 -0700983static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
984 struct ib_device_modify *props)
985{
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000986 struct mlx4_cmd_mailbox *mailbox;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000987 unsigned long flags;
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000988
Roland Dreier225c7b12007-05-08 18:00:38 -0700989 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
990 return -EOPNOTSUPP;
991
Jack Morgensteind0d68b82010-10-04 12:11:34 +0000992 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
993 return 0;
994
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +0000995 if (mlx4_is_slave(to_mdev(ibdev)->dev))
996 return -EOPNOTSUPP;
997
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000998 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700999 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteindf7fba62012-08-03 08:26:45 +00001000 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001001
1002 /*
1003 * If possible, pass node desc to FW, so it can generate
1004 * a 144 trap. If cmd fails, just ignore.
1005 */
1006 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1007 if (IS_ERR(mailbox))
1008 return 0;
1009
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001010 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001011 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00001012 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
Jack Morgensteind0d68b82010-10-04 12:11:34 +00001013
1014 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
Roland Dreier225c7b12007-05-08 18:00:38 -07001015
1016 return 0;
1017}
1018
Jack Morgenstein61565012014-05-29 16:31:01 +03001019static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1020 u32 cap_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -07001021{
1022 struct mlx4_cmd_mailbox *mailbox;
1023 int err;
1024
1025 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1026 if (IS_ERR(mailbox))
1027 return PTR_ERR(mailbox);
1028
Roland Dreier5ae2a7a2007-06-18 08:15:02 -07001029 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1030 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1031 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1032 } else {
1033 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1034 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1035 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001036
Ido Shamaya130b592015-04-02 16:31:19 +03001037 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1038 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1039 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -07001040
1041 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1042 return err;
1043}
1044
1045static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1046 struct ib_port_modify *props)
1047{
Jack Morgenstein61565012014-05-29 16:31:01 +03001048 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1049 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
Roland Dreier225c7b12007-05-08 18:00:38 -07001050 struct ib_port_attr attr;
1051 u32 cap_mask;
1052 int err;
1053
Jack Morgenstein61565012014-05-29 16:31:01 +03001054 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1055 * of whether port link layer is ETH or IB. For ETH ports, qkey
1056 * violations and port capabilities are not meaningful.
1057 */
1058 if (is_eth)
1059 return 0;
1060
1061 mutex_lock(&mdev->cap_mask_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -07001062
Or Gerlitzc4550c62017-01-24 13:02:39 +02001063 err = ib_query_port(ibdev, port, &attr);
Roland Dreier225c7b12007-05-08 18:00:38 -07001064 if (err)
1065 goto out;
1066
1067 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1068 ~props->clr_port_cap_mask;
1069
Jack Morgenstein61565012014-05-29 16:31:01 +03001070 err = mlx4_ib_SET_PORT(mdev, port,
1071 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1072 cap_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -07001073
1074out:
1075 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1076 return err;
1077}
1078
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001079static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1080 struct ib_udata *udata)
Roland Dreier225c7b12007-05-08 18:00:38 -07001081{
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001082 struct ib_device *ibdev = uctx->device;
Roland Dreier225c7b12007-05-08 18:00:38 -07001083 struct mlx4_ib_dev *dev = to_mdev(ibdev);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001084 struct mlx4_ib_ucontext *context = to_mucontext(uctx);
Or Gerlitz08ff3232012-10-21 14:59:24 +00001085 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
Roland Dreier225c7b12007-05-08 18:00:38 -07001086 struct mlx4_ib_alloc_ucontext_resp resp;
1087 int err;
1088
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07001089 if (!dev->ib_active)
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001090 return -EAGAIN;
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07001091
Or Gerlitz08ff3232012-10-21 14:59:24 +00001092 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1093 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1094 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1095 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1096 } else {
1097 resp.dev_caps = dev->dev->caps.userspace_caps;
1098 resp.qp_tab_size = dev->dev->caps.num_qps;
1099 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1100 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1101 resp.cqe_size = dev->dev->caps.cqe_size;
1102 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001103
Roland Dreier225c7b12007-05-08 18:00:38 -07001104 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001105 if (err)
1106 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07001107
1108 INIT_LIST_HEAD(&context->db_page_list);
1109 mutex_init(&context->db_page_mutex);
1110
Guy Levi400b1eb2017-07-04 16:24:24 +03001111 INIT_LIST_HEAD(&context->wqn_ranges_list);
1112 mutex_init(&context->wqn_ranges_mutex);
1113
Or Gerlitz08ff3232012-10-21 14:59:24 +00001114 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1115 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1116 else
1117 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1118
Roland Dreier225c7b12007-05-08 18:00:38 -07001119 if (err) {
1120 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001121 return -EFAULT;
Roland Dreier225c7b12007-05-08 18:00:38 -07001122 }
1123
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001124 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07001125}
1126
Leon Romanovskya2a074e2019-02-12 20:39:16 +02001127static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
Roland Dreier225c7b12007-05-08 18:00:38 -07001128{
1129 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1130
1131 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
Roland Dreier225c7b12007-05-08 18:00:38 -07001132}
1133
Yishai Hadasae184dd2015-08-13 18:32:06 +03001134static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1135{
Yishai Hadasae184dd2015-08-13 18:32:06 +03001136}
1137
Roland Dreier225c7b12007-05-08 18:00:38 -07001138static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1139{
1140 struct mlx4_ib_dev *dev = to_mdev(context->device);
1141
Jason Gunthorpec282da42018-09-16 20:43:09 +03001142 switch (vma->vm_pgoff) {
1143 case 0:
1144 return rdma_user_mmap_io(context, vma,
1145 to_mucontext(context)->uar.pfn,
1146 PAGE_SIZE,
1147 pgprot_noncached(vma->vm_page_prot));
Roland Dreier225c7b12007-05-08 18:00:38 -07001148
Jason Gunthorpec282da42018-09-16 20:43:09 +03001149 case 1:
1150 if (dev->dev->caps.bf_reg_size == 0)
Yishai Hadasae184dd2015-08-13 18:32:06 +03001151 return -EINVAL;
Jason Gunthorpec282da42018-09-16 20:43:09 +03001152 return rdma_user_mmap_io(
1153 context, vma,
1154 to_mucontext(context)->uar.pfn +
1155 dev->dev->caps.num_uars,
1156 PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
Yishai Hadasae184dd2015-08-13 18:32:06 +03001157
Jason Gunthorpec282da42018-09-16 20:43:09 +03001158 case 3: {
Matan Barak52033cf2015-06-11 16:35:26 +03001159 struct mlx4_clock_params params;
Yishai Hadasae184dd2015-08-13 18:32:06 +03001160 int ret;
1161
Yishai Hadasae184dd2015-08-13 18:32:06 +03001162 ret = mlx4_get_internal_clock_params(dev->dev, &params);
Matan Barak52033cf2015-06-11 16:35:26 +03001163 if (ret)
1164 return ret;
1165
Jason Gunthorpec282da42018-09-16 20:43:09 +03001166 return rdma_user_mmap_io(
1167 context, vma,
1168 (pci_resource_start(dev->dev->persist->pdev,
1169 params.bar) +
1170 params.offset) >>
1171 PAGE_SHIFT,
1172 PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
Matan Barak52033cf2015-06-11 16:35:26 +03001173 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001174
Jason Gunthorpec282da42018-09-16 20:43:09 +03001175 default:
1176 return -EINVAL;
1177 }
Roland Dreier225c7b12007-05-08 18:00:38 -07001178}
1179
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +03001180static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
Roland Dreier225c7b12007-05-08 18:00:38 -07001181{
Leon Romanovsky21a428a2019-02-03 14:55:51 +02001182 struct mlx4_ib_pd *pd = to_mpd(ibpd);
1183 struct ib_device *ibdev = ibpd->device;
Roland Dreier225c7b12007-05-08 18:00:38 -07001184 int err;
1185
Roland Dreier225c7b12007-05-08 18:00:38 -07001186 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
Leon Romanovsky21a428a2019-02-03 14:55:51 +02001187 if (err)
1188 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07001189
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +03001190 if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
Leon Romanovsky21a428a2019-02-03 14:55:51 +02001191 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1192 return -EFAULT;
1193 }
1194 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001195}
1196
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001197static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
Roland Dreier225c7b12007-05-08 18:00:38 -07001198{
1199 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
Roland Dreier225c7b12007-05-08 18:00:38 -07001200}
1201
Sean Hefty012a8ff2011-06-02 09:01:33 -07001202static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
Sean Hefty012a8ff2011-06-02 09:01:33 -07001203 struct ib_udata *udata)
1204{
1205 struct mlx4_ib_xrcd *xrcd;
Matan Barak8e372102015-06-11 16:35:21 +03001206 struct ib_cq_init_attr cq_attr = {};
Sean Hefty012a8ff2011-06-02 09:01:33 -07001207 int err;
1208
1209 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1210 return ERR_PTR(-ENOSYS);
1211
1212 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1213 if (!xrcd)
1214 return ERR_PTR(-ENOMEM);
1215
1216 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1217 if (err)
1218 goto err1;
1219
Christoph Hellwiged082d32016-09-05 12:56:17 +02001220 xrcd->pd = ib_alloc_pd(ibdev, 0);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001221 if (IS_ERR(xrcd->pd)) {
1222 err = PTR_ERR(xrcd->pd);
1223 goto err2;
1224 }
1225
Matan Barak8e372102015-06-11 16:35:21 +03001226 cq_attr.cqe = 1;
1227 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
Sean Hefty012a8ff2011-06-02 09:01:33 -07001228 if (IS_ERR(xrcd->cq)) {
1229 err = PTR_ERR(xrcd->cq);
1230 goto err3;
1231 }
1232
1233 return &xrcd->ibxrcd;
1234
1235err3:
1236 ib_dealloc_pd(xrcd->pd);
1237err2:
1238 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1239err1:
1240 kfree(xrcd);
1241 return ERR_PTR(err);
1242}
1243
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001244static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
Sean Hefty012a8ff2011-06-02 09:01:33 -07001245{
1246 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1247 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1248 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1249 kfree(xrcd);
1250
1251 return 0;
1252}
1253
Eli Cohenfa417f72010-10-24 21:08:52 -07001254static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1255{
1256 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1257 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1258 struct mlx4_ib_gid_entry *ge;
1259
1260 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1261 if (!ge)
1262 return -ENOMEM;
1263
1264 ge->gid = *gid;
1265 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1266 ge->port = mqp->port;
1267 ge->added = 1;
1268 }
1269
1270 mutex_lock(&mqp->mutex);
1271 list_add_tail(&ge->list, &mqp->gid_list);
1272 mutex_unlock(&mqp->mutex);
1273
1274 return 0;
1275}
1276
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03001277static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1278 struct mlx4_ib_counters *ctr_table)
1279{
1280 struct counter_index *counter, *tmp_count;
1281
1282 mutex_lock(&ctr_table->mutex);
1283 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1284 list) {
1285 if (counter->allocated)
1286 mlx4_counter_free(ibdev->dev, counter->index);
1287 list_del(&counter->list);
1288 kfree(counter);
1289 }
1290 mutex_unlock(&ctr_table->mutex);
1291}
1292
Eli Cohenfa417f72010-10-24 21:08:52 -07001293int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1294 union ib_gid *gid)
1295{
Eli Cohenfa417f72010-10-24 21:08:52 -07001296 struct net_device *ndev;
1297 int ret = 0;
1298
1299 if (!mqp->port)
1300 return 0;
1301
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001302 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001303 ndev = mdev->iboe.netdevs[mqp->port - 1];
1304 if (ndev)
1305 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001306 spin_unlock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001307
1308 if (ndev) {
Eli Cohenfa417f72010-10-24 21:08:52 -07001309 ret = 1;
Eli Cohenfa417f72010-10-24 21:08:52 -07001310 dev_put(ndev);
1311 }
1312
1313 return ret;
1314}
1315
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001316struct mlx4_ib_steering {
1317 struct list_head list;
Moni Shoua146d6e12015-02-03 16:48:38 +02001318 struct mlx4_flow_reg_id reg_id;
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001319 union ib_gid gid;
1320};
1321
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001322#define LAST_ETH_FIELD vlan_tag
1323#define LAST_IB_FIELD sl
1324#define LAST_IPV4_FIELD dst_ip
1325#define LAST_TCP_UDP_FIELD src_port
1326
1327/* Field is the last supported field */
1328#define FIELDS_NOT_SUPPORTED(filter, field)\
1329 memchr_inv((void *)&filter.field +\
1330 sizeof(filter.field), 0,\
1331 sizeof(filter) -\
1332 offsetof(typeof(filter), field) -\
1333 sizeof(filter.field))
1334
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001335static int parse_flow_attr(struct mlx4_dev *dev,
Matan Baraka37a1a42013-11-07 15:25:16 +02001336 u32 qp_num,
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001337 union ib_flow_spec *ib_spec,
1338 struct _rule_hw *mlx4_spec)
1339{
1340 enum mlx4_net_trans_rule_id type;
1341
1342 switch (ib_spec->type) {
1343 case IB_FLOW_SPEC_ETH:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001344 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1345 return -ENOTSUPP;
1346
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001347 type = MLX4_NET_TRANS_RULE_ID_ETH;
1348 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1349 ETH_ALEN);
1350 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1351 ETH_ALEN);
1352 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1353 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1354 break;
Matan Baraka37a1a42013-11-07 15:25:16 +02001355 case IB_FLOW_SPEC_IB:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001356 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1357 return -ENOTSUPP;
1358
Matan Baraka37a1a42013-11-07 15:25:16 +02001359 type = MLX4_NET_TRANS_RULE_ID_IB;
1360 mlx4_spec->ib.l3_qpn =
1361 cpu_to_be32(qp_num);
1362 mlx4_spec->ib.qpn_mask =
1363 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1364 break;
1365
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001366
1367 case IB_FLOW_SPEC_IPV4:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001368 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1369 return -ENOTSUPP;
1370
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001371 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1372 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1373 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1374 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1375 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1376 break;
1377
1378 case IB_FLOW_SPEC_TCP:
1379 case IB_FLOW_SPEC_UDP:
Maor Gottlieb1f02a092016-08-30 16:58:30 +03001380 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1381 return -ENOTSUPP;
1382
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001383 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1384 MLX4_NET_TRANS_RULE_ID_TCP :
1385 MLX4_NET_TRANS_RULE_ID_UDP;
1386 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1387 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1388 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1389 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1390 break;
1391
1392 default:
1393 return -EINVAL;
1394 }
1395 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1396 mlx4_hw_rule_sz(dev, type) < 0)
1397 return -EINVAL;
1398 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1399 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1400 return mlx4_hw_rule_sz(dev, type);
1401}
1402
Matan Baraka37a1a42013-11-07 15:25:16 +02001403struct default_rules {
1404 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1405 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1406 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1407 __u8 link_layer;
1408};
1409static const struct default_rules default_table[] = {
1410 {
1411 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1412 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1413 .rules_create_list = {IB_FLOW_SPEC_IB},
1414 .link_layer = IB_LINK_LAYER_INFINIBAND
1415 }
1416};
1417
1418static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1419 struct ib_flow_attr *flow_attr)
1420{
1421 int i, j, k;
1422 void *ib_flow;
1423 const struct default_rules *pdefault_rules = default_table;
1424 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1425
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001426 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001427 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1428 memset(&field_types, 0, sizeof(field_types));
1429
1430 if (link_layer != pdefault_rules->link_layer)
1431 continue;
1432
1433 ib_flow = flow_attr + 1;
1434 /* we assume the specs are sorted */
1435 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1436 j < flow_attr->num_of_specs; k++) {
1437 union ib_flow_spec *current_flow =
1438 (union ib_flow_spec *)ib_flow;
1439
1440 /* same layer but different type */
1441 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1442 (pdefault_rules->mandatory_fields[k] &
1443 IB_FLOW_SPEC_LAYER_MASK)) &&
1444 (current_flow->type !=
1445 pdefault_rules->mandatory_fields[k]))
1446 goto out;
1447
1448 /* same layer, try match next one */
1449 if (current_flow->type ==
1450 pdefault_rules->mandatory_fields[k]) {
1451 j++;
1452 ib_flow +=
1453 ((union ib_flow_spec *)ib_flow)->size;
1454 }
1455 }
1456
1457 ib_flow = flow_attr + 1;
1458 for (j = 0; j < flow_attr->num_of_specs;
1459 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1460 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1461 /* same layer and same type */
1462 if (((union ib_flow_spec *)ib_flow)->type ==
1463 pdefault_rules->mandatory_not_fields[k])
1464 goto out;
1465
1466 return i;
1467 }
1468out:
1469 return -1;
1470}
1471
1472static int __mlx4_ib_create_default_rules(
1473 struct mlx4_ib_dev *mdev,
1474 struct ib_qp *qp,
1475 const struct default_rules *pdefault_rules,
1476 struct _rule_hw *mlx4_spec) {
1477 int size = 0;
1478 int i;
1479
Fabian Fredericka57f23f2014-08-12 19:20:07 -04001480 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001481 int ret;
1482 union ib_flow_spec ib_spec;
1483 switch (pdefault_rules->rules_create_list[i]) {
1484 case 0:
1485 /* no rule */
1486 continue;
1487 case IB_FLOW_SPEC_IB:
1488 ib_spec.type = IB_FLOW_SPEC_IB;
1489 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1490
1491 break;
1492 default:
1493 /* invalid rule */
1494 return -EINVAL;
1495 }
1496 /* We must put empty rule, qpn is being ignored */
1497 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1498 mlx4_spec);
1499 if (ret < 0) {
1500 pr_info("invalid parsing\n");
1501 return -EINVAL;
1502 }
1503
1504 mlx4_spec = (void *)mlx4_spec + ret;
1505 size += ret;
1506 }
1507 return size;
1508}
1509
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001510static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1511 int domain,
1512 enum mlx4_net_trans_promisc_mode flow_type,
1513 u64 *reg_id)
1514{
1515 int ret, i;
1516 int size = 0;
1517 void *ib_flow;
1518 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1519 struct mlx4_cmd_mailbox *mailbox;
1520 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
Matan Baraka37a1a42013-11-07 15:25:16 +02001521 int default_flow;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001522
1523 static const u16 __mlx4_domain[] = {
1524 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1525 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1526 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1527 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1528 };
1529
1530 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1531 pr_err("Invalid priority value %d\n", flow_attr->priority);
1532 return -EINVAL;
1533 }
1534
1535 if (domain >= IB_FLOW_DOMAIN_NUM) {
1536 pr_err("Invalid domain value %d\n", domain);
1537 return -EINVAL;
1538 }
1539
1540 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1541 return -EINVAL;
1542
1543 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1544 if (IS_ERR(mailbox))
1545 return PTR_ERR(mailbox);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001546 ctrl = mailbox->buf;
1547
1548 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1549 flow_attr->priority);
1550 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1551 ctrl->port = flow_attr->port;
1552 ctrl->qpn = cpu_to_be32(qp->qp_num);
1553
1554 ib_flow = flow_attr + 1;
1555 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
Matan Baraka37a1a42013-11-07 15:25:16 +02001556 /* Add default flows */
1557 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1558 if (default_flow >= 0) {
1559 ret = __mlx4_ib_create_default_rules(
1560 mdev, qp, default_table + default_flow,
1561 mailbox->buf + size);
1562 if (ret < 0) {
1563 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1564 return -EINVAL;
1565 }
1566 size += ret;
1567 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001568 for (i = 0; i < flow_attr->num_of_specs; i++) {
Matan Baraka37a1a42013-11-07 15:25:16 +02001569 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1570 mailbox->buf + size);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001571 if (ret < 0) {
1572 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1573 return -EINVAL;
1574 }
1575 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1576 size += ret;
1577 }
1578
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001579 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1580 flow_attr->num_of_specs == 1) {
1581 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1582 enum ib_flow_spec_type header_spec =
1583 ((union ib_flow_spec *)(flow_attr + 1))->type;
1584
1585 if (header_spec == IB_FLOW_SPEC_ETH)
1586 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1587 }
1588
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001589 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1590 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001591 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001592 if (ret == -ENOMEM)
1593 pr_err("mcg table is full. Fail to register network rule.\n");
1594 else if (ret == -ENXIO)
1595 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1596 else if (ret)
Colin Ian King35fc7b72016-04-25 20:26:50 +01001597 pr_err("Invalid argument. Fail to register network rule.\n");
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001598
1599 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1600 return ret;
1601}
1602
1603static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1604{
1605 int err;
1606 err = mlx4_cmd(dev, reg_id, 0, 0,
1607 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Jack Morgenstein10b1c042016-12-29 18:37:13 +02001608 MLX4_CMD_NATIVE);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001609 if (err)
1610 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1611 reg_id);
1612 return err;
1613}
1614
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001615static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1616 u64 *reg_id)
1617{
1618 void *ib_flow;
1619 union ib_flow_spec *ib_spec;
1620 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1621 int err = 0;
1622
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001623 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1624 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001625 return 0; /* do nothing */
1626
1627 ib_flow = flow_attr + 1;
1628 ib_spec = (union ib_flow_spec *)ib_flow;
1629
1630 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1631 return 0; /* do nothing */
1632
1633 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1634 flow_attr->port, qp->qp_num,
1635 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1636 reg_id);
1637 return err;
1638}
1639
Marina Varshaver0e451e82016-02-18 18:31:06 +02001640static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1641 struct ib_flow_attr *flow_attr,
1642 enum mlx4_net_trans_promisc_mode *type)
1643{
1644 int err = 0;
1645
1646 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1647 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1648 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1649 return -EOPNOTSUPP;
1650 }
1651
1652 if (flow_attr->num_of_specs == 0) {
1653 type[0] = MLX4_FS_MC_SNIFFER;
1654 type[1] = MLX4_FS_UC_SNIFFER;
1655 } else {
1656 union ib_flow_spec *ib_spec;
1657
1658 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1659 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1660 return -EINVAL;
1661
1662 /* if all is zero than MC and UC */
1663 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1664 type[0] = MLX4_FS_MC_SNIFFER;
1665 type[1] = MLX4_FS_UC_SNIFFER;
1666 } else {
1667 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1668 ib_spec->eth.mask.dst_mac[1],
1669 ib_spec->eth.mask.dst_mac[2],
1670 ib_spec->eth.mask.dst_mac[3],
1671 ib_spec->eth.mask.dst_mac[4],
1672 ib_spec->eth.mask.dst_mac[5]};
1673
1674 /* Above xor was only on MC bit, non empty mask is valid
1675 * only if this bit is set and rest are zero.
1676 */
1677 if (!is_zero_ether_addr(&mac[0]))
1678 return -EINVAL;
1679
1680 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1681 type[0] = MLX4_FS_MC_SNIFFER;
1682 else
1683 type[0] = MLX4_FS_UC_SNIFFER;
1684 }
1685 }
1686
1687 return err;
1688}
1689
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001690static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1691 struct ib_flow_attr *flow_attr,
Matan Barak59082a32018-05-31 16:43:35 +03001692 int domain, struct ib_udata *udata)
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001693{
Moni Shoua146d6e12015-02-03 16:48:38 +02001694 int err = 0, i = 0, j = 0;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001695 struct mlx4_ib_flow *mflow;
1696 enum mlx4_net_trans_promisc_mode type[2];
Moni Shoua146d6e12015-02-03 16:48:38 +02001697 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1698 int is_bonded = mlx4_is_bonded(dev);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001699
Yishai Hadas5533c182016-06-22 17:27:30 +03001700 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1701 return ERR_PTR(-EINVAL);
1702
Boris Pismenny85100202018-03-28 09:27:43 +03001703 if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1704 return ERR_PTR(-EOPNOTSUPP);
1705
Marina Varshaver0e451e82016-02-18 18:31:06 +02001706 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1707 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
Marina Varshavera3100a72016-02-18 18:31:05 +02001708 return ERR_PTR(-EOPNOTSUPP);
1709
Matan Barak59082a32018-05-31 16:43:35 +03001710 if (udata &&
1711 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1712 return ERR_PTR(-EOPNOTSUPP);
1713
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001714 memset(type, 0, sizeof(type));
1715
1716 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1717 if (!mflow) {
1718 err = -ENOMEM;
1719 goto err_free;
1720 }
1721
1722 switch (flow_attr->type) {
1723 case IB_FLOW_ATTR_NORMAL:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001724 /* If dont trap flag (continue match) is set, under specific
1725 * condition traffic be replicated to given qp,
1726 * without stealing it
1727 */
1728 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1729 err = mlx4_ib_add_dont_trap_rule(dev,
1730 flow_attr,
1731 type);
1732 if (err)
1733 goto err_free;
1734 } else {
1735 type[0] = MLX4_FS_REGULAR;
1736 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001737 break;
1738
1739 case IB_FLOW_ATTR_ALL_DEFAULT:
1740 type[0] = MLX4_FS_ALL_DEFAULT;
1741 break;
1742
1743 case IB_FLOW_ATTR_MC_DEFAULT:
1744 type[0] = MLX4_FS_MC_DEFAULT;
1745 break;
1746
1747 case IB_FLOW_ATTR_SNIFFER:
Marina Varshaver0e451e82016-02-18 18:31:06 +02001748 type[0] = MLX4_FS_MIRROR_RX_PORT;
1749 type[1] = MLX4_FS_MIRROR_SX_PORT;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001750 break;
1751
1752 default:
1753 err = -EINVAL;
1754 goto err_free;
1755 }
1756
1757 while (i < ARRAY_SIZE(type) && type[i]) {
1758 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
Moni Shoua146d6e12015-02-03 16:48:38 +02001759 &mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001760 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001761 goto err_create_flow;
Moni Shoua146d6e12015-02-03 16:48:38 +02001762 if (is_bonded) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001763 /* Application always sees one port so the mirror rule
1764 * must be on port #2
1765 */
Moni Shoua146d6e12015-02-03 16:48:38 +02001766 flow_attr->port = 2;
1767 err = __mlx4_ib_create_flow(qp, flow_attr,
1768 domain, type[j],
1769 &mflow->reg_id[j].mirror);
1770 flow_attr->port = 1;
1771 if (err)
1772 goto err_create_flow;
1773 j++;
1774 }
1775
Roland Dreier11562562015-05-29 23:11:27 -07001776 i++;
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001777 }
1778
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001779 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001780 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1781 &mflow->reg_id[i].id);
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001782 if (err)
Or Gerlitz571e1b22014-10-30 15:59:28 +02001783 goto err_create_flow;
Roland Dreier11562562015-05-29 23:11:27 -07001784
Moni Shoua146d6e12015-02-03 16:48:38 +02001785 if (is_bonded) {
1786 flow_attr->port = 2;
1787 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1788 &mflow->reg_id[j].mirror);
1789 flow_attr->port = 1;
1790 if (err)
1791 goto err_create_flow;
1792 j++;
1793 }
1794 /* function to create mirror rule */
Roland Dreier11562562015-05-29 23:11:27 -07001795 i++;
Or Gerlitzd2fce8a2014-08-27 16:47:49 +03001796 }
1797
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001798 return &mflow->ibflow;
1799
Or Gerlitz571e1b22014-10-30 15:59:28 +02001800err_create_flow:
1801 while (i) {
Moni Shoua146d6e12015-02-03 16:48:38 +02001802 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1803 mflow->reg_id[i].id);
Or Gerlitz571e1b22014-10-30 15:59:28 +02001804 i--;
1805 }
Moni Shoua146d6e12015-02-03 16:48:38 +02001806
1807 while (j) {
1808 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1809 mflow->reg_id[j].mirror);
1810 j--;
1811 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001812err_free:
1813 kfree(mflow);
1814 return ERR_PTR(err);
1815}
1816
1817static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1818{
1819 int err, ret = 0;
1820 int i = 0;
1821 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1822 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1823
Moni Shoua146d6e12015-02-03 16:48:38 +02001824 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1825 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001826 if (err)
1827 ret = err;
Moni Shoua146d6e12015-02-03 16:48:38 +02001828 if (mflow->reg_id[i].mirror) {
1829 err = __mlx4_ib_destroy_flow(mdev->dev,
1830 mflow->reg_id[i].mirror);
1831 if (err)
1832 ret = err;
1833 }
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03001834 i++;
1835 }
1836
1837 kfree(mflow);
1838 return ret;
1839}
1840
Roland Dreier225c7b12007-05-08 18:00:38 -07001841static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1842{
Eli Cohenfa417f72010-10-24 21:08:52 -07001843 int err;
1844 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001845 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001846 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001847 struct mlx4_ib_steering *ib_steering = NULL;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001848 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Moni Shoua146d6e12015-02-03 16:48:38 +02001849 struct mlx4_flow_reg_id reg_id;
Eli Cohenfa417f72010-10-24 21:08:52 -07001850
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001851 if (mdev->dev->caps.steering_mode ==
1852 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1853 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1854 if (!ib_steering)
1855 return -ENOMEM;
1856 }
1857
1858 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1859 !!(mqp->flags &
1860 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
Moni Shoua146d6e12015-02-03 16:48:38 +02001861 prot, &reg_id.id);
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001862 if (err) {
1863 pr_err("multicast attach op failed, err %d\n", err);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001864 goto err_malloc;
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001865 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001866
Moni Shoua146d6e12015-02-03 16:48:38 +02001867 reg_id.mirror = 0;
1868 if (mlx4_is_bonded(dev)) {
Moni Shoua824c25c2015-02-08 11:49:33 +02001869 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1870 (mqp->port == 1) ? 2 : 1,
Moni Shoua146d6e12015-02-03 16:48:38 +02001871 !!(mqp->flags &
1872 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1873 prot, &reg_id.mirror);
1874 if (err)
1875 goto err_add;
1876 }
1877
Eli Cohenfa417f72010-10-24 21:08:52 -07001878 err = add_gid_entry(ibqp, gid);
1879 if (err)
1880 goto err_add;
1881
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001882 if (ib_steering) {
1883 memcpy(ib_steering->gid.raw, gid->raw, 16);
1884 ib_steering->reg_id = reg_id;
1885 mutex_lock(&mqp->mutex);
1886 list_add(&ib_steering->list, &mqp->steering_rules);
1887 mutex_unlock(&mqp->mutex);
1888 }
Eli Cohenfa417f72010-10-24 21:08:52 -07001889 return 0;
1890
1891err_add:
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001892 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001893 prot, reg_id.id);
1894 if (reg_id.mirror)
1895 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1896 prot, reg_id.mirror);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001897err_malloc:
1898 kfree(ib_steering);
1899
Eli Cohenfa417f72010-10-24 21:08:52 -07001900 return err;
1901}
1902
1903static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1904{
1905 struct mlx4_ib_gid_entry *ge;
1906 struct mlx4_ib_gid_entry *tmp;
1907 struct mlx4_ib_gid_entry *ret = NULL;
1908
1909 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1910 if (!memcmp(raw, ge->gid.raw, 16)) {
1911 ret = ge;
1912 break;
1913 }
1914 }
1915
1916 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -07001917}
1918
1919static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1920{
Eli Cohenfa417f72010-10-24 21:08:52 -07001921 int err;
1922 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
Moni Shoua146d6e12015-02-03 16:48:38 +02001923 struct mlx4_dev *dev = mdev->dev;
Eli Cohenfa417f72010-10-24 21:08:52 -07001924 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
Eli Cohenfa417f72010-10-24 21:08:52 -07001925 struct net_device *ndev;
1926 struct mlx4_ib_gid_entry *ge;
Moni Shoua146d6e12015-02-03 16:48:38 +02001927 struct mlx4_flow_reg_id reg_id = {0, 0};
Or Gerlitze9a7faf2014-12-17 16:17:34 +02001928 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
Eli Cohenfa417f72010-10-24 21:08:52 -07001929
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001930 if (mdev->dev->caps.steering_mode ==
1931 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1932 struct mlx4_ib_steering *ib_steering;
1933
1934 mutex_lock(&mqp->mutex);
1935 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1936 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1937 list_del(&ib_steering->list);
1938 break;
1939 }
1940 }
1941 mutex_unlock(&mqp->mutex);
1942 if (&ib_steering->list == &mqp->steering_rules) {
1943 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1944 return -EINVAL;
1945 }
1946 reg_id = ib_steering->reg_id;
1947 kfree(ib_steering);
1948 }
1949
1950 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
Moni Shoua146d6e12015-02-03 16:48:38 +02001951 prot, reg_id.id);
Eli Cohenfa417f72010-10-24 21:08:52 -07001952 if (err)
1953 return err;
1954
Moni Shoua146d6e12015-02-03 16:48:38 +02001955 if (mlx4_is_bonded(dev)) {
1956 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1957 prot, reg_id.mirror);
1958 if (err)
1959 return err;
1960 }
1961
Eli Cohenfa417f72010-10-24 21:08:52 -07001962 mutex_lock(&mqp->mutex);
1963 ge = find_gid_entry(mqp, gid->raw);
1964 if (ge) {
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001965 spin_lock_bh(&mdev->iboe.lock);
Eli Cohenfa417f72010-10-24 21:08:52 -07001966 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1967 if (ndev)
1968 dev_hold(ndev);
Jack Morgensteindba3ad22014-08-21 14:28:41 +03001969 spin_unlock_bh(&mdev->iboe.lock);
Moni Shouad487ee72013-12-12 18:03:13 +02001970 if (ndev)
Eli Cohenfa417f72010-10-24 21:08:52 -07001971 dev_put(ndev);
Eli Cohenfa417f72010-10-24 21:08:52 -07001972 list_del(&ge->list);
1973 kfree(ge);
1974 } else
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03001975 pr_warn("could not find mgid entry\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07001976
1977 mutex_unlock(&mqp->mutex);
1978
1979 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07001980}
1981
1982static int init_node_data(struct mlx4_ib_dev *dev)
1983{
1984 struct ib_smp *in_mad = NULL;
1985 struct ib_smp *out_mad = NULL;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001986 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
Roland Dreier225c7b12007-05-08 18:00:38 -07001987 int err = -ENOMEM;
1988
1989 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1990 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1991 if (!in_mad || !out_mad)
1992 goto out;
1993
1994 init_query_mad(in_mad);
1995 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001996 if (mlx4_is_master(dev->dev))
1997 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
Roland Dreier225c7b12007-05-08 18:00:38 -07001998
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001999 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002000 if (err)
2001 goto out;
2002
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002003 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
Roland Dreier225c7b12007-05-08 18:00:38 -07002004
2005 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2006
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00002007 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
Roland Dreier225c7b12007-05-08 18:00:38 -07002008 if (err)
2009 goto out;
2010
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002011 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
Roland Dreier225c7b12007-05-08 18:00:38 -07002012 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2013
2014out:
2015 kfree(in_mad);
2016 kfree(out_mad);
2017 return err;
2018}
2019
Parav Pandit508a5232018-10-11 22:31:54 +03002020static ssize_t hca_type_show(struct device *device,
2021 struct device_attribute *attr, char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002022{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002023 struct mlx4_ib_dev *dev =
Parav Pandit54747232018-12-18 14:15:56 +02002024 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002025 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002026}
Parav Pandit508a5232018-10-11 22:31:54 +03002027static DEVICE_ATTR_RO(hca_type);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002028
Parav Pandit508a5232018-10-11 22:31:54 +03002029static ssize_t hw_rev_show(struct device *device,
2030 struct device_attribute *attr, char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002031{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002032 struct mlx4_ib_dev *dev =
Parav Pandit54747232018-12-18 14:15:56 +02002033 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002034 return sprintf(buf, "%x\n", dev->dev->rev_id);
2035}
Parav Pandit508a5232018-10-11 22:31:54 +03002036static DEVICE_ATTR_RO(hw_rev);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002037
Parav Pandit508a5232018-10-11 22:31:54 +03002038static ssize_t board_id_show(struct device *device,
2039 struct device_attribute *attr, char *buf)
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002040{
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002041 struct mlx4_ib_dev *dev =
Parav Pandit54747232018-12-18 14:15:56 +02002042 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2043
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002044 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2045 dev->dev->board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002046}
Parav Pandit508a5232018-10-11 22:31:54 +03002047static DEVICE_ATTR_RO(board_id);
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002048
Parav Pandit508a5232018-10-11 22:31:54 +03002049static struct attribute *mlx4_class_attributes[] = {
2050 &dev_attr_hw_rev.attr,
2051 &dev_attr_hca_type.attr,
2052 &dev_attr_board_id.attr,
2053 NULL
2054};
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002055
Parav Pandit508a5232018-10-11 22:31:54 +03002056static const struct attribute_group mlx4_attr_group = {
2057 .attrs = mlx4_class_attributes,
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002058};
2059
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002060struct diag_counter {
2061 const char *name;
2062 u32 offset;
2063};
2064
2065#define DIAG_COUNTER(_name, _offset) \
2066 { .name = #_name, .offset = _offset }
2067
2068static const struct diag_counter diag_basic[] = {
2069 DIAG_COUNTER(rq_num_lle, 0x00),
2070 DIAG_COUNTER(sq_num_lle, 0x04),
2071 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2072 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2073 DIAG_COUNTER(rq_num_lpe, 0x18),
2074 DIAG_COUNTER(sq_num_lpe, 0x1C),
2075 DIAG_COUNTER(rq_num_wrfe, 0x20),
2076 DIAG_COUNTER(sq_num_wrfe, 0x24),
2077 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2078 DIAG_COUNTER(sq_num_bre, 0x34),
2079 DIAG_COUNTER(sq_num_rire, 0x44),
2080 DIAG_COUNTER(rq_num_rire, 0x48),
2081 DIAG_COUNTER(sq_num_rae, 0x4C),
2082 DIAG_COUNTER(rq_num_rae, 0x50),
2083 DIAG_COUNTER(sq_num_roe, 0x54),
2084 DIAG_COUNTER(sq_num_tree, 0x5C),
2085 DIAG_COUNTER(sq_num_rree, 0x64),
2086 DIAG_COUNTER(rq_num_rnr, 0x68),
2087 DIAG_COUNTER(sq_num_rnr, 0x6C),
2088 DIAG_COUNTER(rq_num_oos, 0x100),
2089 DIAG_COUNTER(sq_num_oos, 0x104),
2090};
2091
2092static const struct diag_counter diag_ext[] = {
2093 DIAG_COUNTER(rq_num_dup, 0x130),
2094 DIAG_COUNTER(sq_num_to, 0x134),
2095};
2096
2097static const struct diag_counter diag_device_only[] = {
2098 DIAG_COUNTER(num_cqovf, 0x1A0),
2099 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2100};
2101
2102static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2103 u8 port_num)
2104{
2105 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2106 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2107
2108 if (!diag[!!port_num].name)
2109 return NULL;
2110
2111 return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2112 diag[!!port_num].num_counters,
2113 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2114}
2115
2116static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2117 struct rdma_hw_stats *stats,
2118 u8 port, int index)
2119{
2120 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2121 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2122 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2123 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2124 int ret;
2125 int i;
2126
2127 ret = mlx4_query_diag_counters(dev->dev,
2128 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2129 diag[!!port].offset, hw_value,
2130 diag[!!port].num_counters, port);
2131
2132 if (ret)
2133 return ret;
2134
2135 for (i = 0; i < diag[!!port].num_counters; i++)
2136 stats->value[i] = hw_value[i];
2137
2138 return diag[!!port].num_counters;
2139}
2140
2141static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2142 const char ***name,
2143 u32 **offset,
2144 u32 *num,
2145 bool port)
2146{
2147 u32 num_counters;
2148
2149 num_counters = ARRAY_SIZE(diag_basic);
2150
2151 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2152 num_counters += ARRAY_SIZE(diag_ext);
2153
2154 if (!port)
2155 num_counters += ARRAY_SIZE(diag_device_only);
2156
2157 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2158 if (!*name)
2159 return -ENOMEM;
2160
2161 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2162 if (!*offset)
2163 goto err_name;
2164
2165 *num = num_counters;
2166
2167 return 0;
2168
2169err_name:
2170 kfree(*name);
2171 return -ENOMEM;
2172}
2173
2174static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2175 const char **name,
2176 u32 *offset,
2177 bool port)
2178{
2179 int i;
2180 int j;
2181
2182 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2183 name[i] = diag_basic[i].name;
2184 offset[i] = diag_basic[i].offset;
2185 }
2186
2187 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2188 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2189 name[j] = diag_ext[i].name;
2190 offset[j] = diag_ext[i].offset;
2191 }
2192 }
2193
2194 if (!port) {
2195 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2196 name[j] = diag_device_only[i].name;
2197 offset[j] = diag_device_only[i].offset;
2198 }
2199 }
2200}
2201
Kamal Heib4725c4b2018-12-10 21:09:37 +02002202static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2203 .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2204 .get_hw_stats = mlx4_ib_get_hw_stats,
2205};
2206
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002207static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2208{
2209 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2210 int i;
2211 int ret;
2212 bool per_port = !!(ibdev->dev->caps.flags2 &
2213 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2214
Kamal Heib69d269d382016-09-12 19:16:22 +03002215 if (mlx4_is_slave(ibdev->dev))
2216 return 0;
2217
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002218 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2219 /* i == 1 means we are building port counters */
2220 if (i && !per_port)
2221 continue;
2222
2223 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2224 &diag[i].offset,
2225 &diag[i].num_counters, i);
2226 if (ret)
2227 goto err_alloc;
2228
2229 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2230 diag[i].offset, i);
2231 }
2232
Kamal Heib4725c4b2018-12-10 21:09:37 +02002233 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002234
2235 return 0;
2236
2237err_alloc:
2238 if (i) {
2239 kfree(diag[i - 1].name);
2240 kfree(diag[i - 1].offset);
2241 }
2242
2243 return ret;
2244}
2245
2246static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2247{
2248 int i;
2249
2250 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2251 kfree(ibdev->diag_counters[i].offset);
2252 kfree(ibdev->diag_counters[i].name);
2253 }
2254}
2255
Matan Barak9433c182014-05-15 15:29:28 +03002256#define MLX4_IB_INVALID_MAC ((u64)-1)
2257static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2258 struct net_device *dev,
2259 int port)
2260{
2261 u64 new_smac = 0;
2262 u64 release_mac = MLX4_IB_INVALID_MAC;
2263 struct mlx4_ib_qp *qp;
2264
2265 read_lock(&dev_base_lock);
2266 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2267 read_unlock(&dev_base_lock);
2268
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002269 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2270
Jack Morgensteind24d9f42014-09-11 14:11:18 +03002271 /* no need for update QP1 and mac registration in non-SRIOV */
2272 if (!mlx4_is_mfunc(ibdev->dev))
2273 return;
2274
Matan Barak9433c182014-05-15 15:29:28 +03002275 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2276 qp = ibdev->qp1_proxy[port - 1];
2277 if (qp) {
2278 int new_smac_index;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002279 u64 old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002280 struct mlx4_update_qp_params update_params;
2281
Jack Morgenstein25476b02014-09-11 14:11:20 +03002282 mutex_lock(&qp->mutex);
2283 old_smac = qp->pri.smac;
Matan Barak9433c182014-05-15 15:29:28 +03002284 if (new_smac == old_smac)
2285 goto unlock;
2286
2287 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2288
2289 if (new_smac_index < 0)
2290 goto unlock;
2291
2292 update_params.smac_index = new_smac_index;
Matan Barak09e05c32014-09-10 16:41:56 +03002293 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
Matan Barak9433c182014-05-15 15:29:28 +03002294 &update_params)) {
2295 release_mac = new_smac;
2296 goto unlock;
2297 }
Jack Morgenstein25476b02014-09-11 14:11:20 +03002298 /* if old port was zero, no mac was yet registered for this QP */
2299 if (qp->pri.smac_port)
2300 release_mac = old_smac;
Matan Barak9433c182014-05-15 15:29:28 +03002301 qp->pri.smac = new_smac;
Jack Morgenstein25476b02014-09-11 14:11:20 +03002302 qp->pri.smac_port = port;
Matan Barak9433c182014-05-15 15:29:28 +03002303 qp->pri.smac_index = new_smac_index;
Matan Barak9433c182014-05-15 15:29:28 +03002304 }
2305
2306unlock:
Matan Barak9433c182014-05-15 15:29:28 +03002307 if (release_mac != MLX4_IB_INVALID_MAC)
2308 mlx4_unregister_mac(ibdev->dev, port, release_mac);
Jack Morgenstein25476b02014-09-11 14:11:20 +03002309 if (qp)
2310 mutex_unlock(&qp->mutex);
2311 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
Matan Barak9433c182014-05-15 15:29:28 +03002312}
2313
Matan Barak9433c182014-05-15 15:29:28 +03002314static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2315 struct net_device *dev,
2316 unsigned long event)
2317
Moni Shouad487ee72013-12-12 18:03:13 +02002318{
2319 struct mlx4_ib_iboe *iboe;
Matan Barak9433c182014-05-15 15:29:28 +03002320 int update_qps_port = -1;
Moni Shouad487ee72013-12-12 18:03:13 +02002321 int port;
2322
Moni Shoua5070cd22015-07-30 18:33:30 +03002323 ASSERT_RTNL();
2324
Moni Shouad487ee72013-12-12 18:03:13 +02002325 iboe = &ibdev->iboe;
2326
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002327 spin_lock_bh(&iboe->lock);
Moni Shouad487ee72013-12-12 18:03:13 +02002328 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
Moni Shouaad4885d22014-02-05 15:13:02 +02002329
Moni Shouad487ee72013-12-12 18:03:13 +02002330 iboe->netdevs[port - 1] =
2331 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
Moni Shouad487ee72013-12-12 18:03:13 +02002332
Matan Barak9433c182014-05-15 15:29:28 +03002333 if (dev == iboe->netdevs[port - 1] &&
2334 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2335 event == NETDEV_UP || event == NETDEV_CHANGE))
2336 update_qps_port = port;
2337
Ursula Braunfc6526f2018-11-12 12:41:55 +01002338 if (dev == iboe->netdevs[port - 1] &&
2339 (event == NETDEV_UP || event == NETDEV_DOWN)) {
2340 enum ib_port_state port_state;
2341 struct ib_event ibev = { };
2342
2343 if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2344 &port_state))
2345 continue;
2346
2347 if (event == NETDEV_UP &&
2348 (port_state != IB_PORT_ACTIVE ||
2349 iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2350 continue;
2351 if (event == NETDEV_DOWN &&
2352 (port_state != IB_PORT_DOWN ||
2353 iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2354 continue;
2355 iboe->last_port_state[port - 1] = port_state;
2356
2357 ibev.device = &ibdev->ib_dev;
2358 ibev.element.port_num = port;
2359 ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2360 IB_EVENT_PORT_ERR;
2361 ib_dispatch_event(&ibev);
2362 }
2363
Moni Shouad487ee72013-12-12 18:03:13 +02002364 }
Jack Morgensteindba3ad22014-08-21 14:28:41 +03002365 spin_unlock_bh(&iboe->lock);
Matan Barak9433c182014-05-15 15:29:28 +03002366
2367 if (update_qps_port > 0)
2368 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
Moni Shouad487ee72013-12-12 18:03:13 +02002369}
2370
2371static int mlx4_ib_netdev_event(struct notifier_block *this,
2372 unsigned long event, void *ptr)
2373{
Jiri Pirko351638e2013-05-28 01:30:21 +00002374 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eli Cohenfa417f72010-10-24 21:08:52 -07002375 struct mlx4_ib_dev *ibdev;
Eli Cohenfa417f72010-10-24 21:08:52 -07002376
2377 if (!net_eq(dev_net(dev), &init_net))
2378 return NOTIFY_DONE;
2379
2380 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
Matan Barak9433c182014-05-15 15:29:28 +03002381 mlx4_ib_scan_netdevs(ibdev, dev, event);
Eli Cohenfa417f72010-10-24 21:08:52 -07002382
2383 return NOTIFY_DONE;
2384}
2385
Jack Morgenstein54679e12012-08-03 08:40:43 +00002386static void init_pkeys(struct mlx4_ib_dev *ibdev)
2387{
2388 int port;
2389 int slave;
2390 int i;
2391
2392 if (mlx4_is_master(ibdev->dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002393 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2394 ++slave) {
Jack Morgenstein54679e12012-08-03 08:40:43 +00002395 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2396 for (i = 0;
2397 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2398 ++i) {
2399 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2400 /* master has the identity virt2phys pkey mapping */
2401 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2402 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2403 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2404 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2405 }
2406 }
2407 }
2408 /* initialize pkey cache */
2409 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2410 for (i = 0;
2411 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2412 ++i)
2413 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2414 (i) ? 0 : 0xFFFF;
2415 }
2416 }
2417}
2418
Shlomo Pongratze605b742012-04-29 17:04:27 +03002419static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2420{
Matan Barakc66fa192015-05-31 09:30:16 +03002421 int i, j, eq = 0, total_eqs = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002422
Matan Barakc66fa192015-05-31 09:30:16 +03002423 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2424 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002425 if (!ibdev->eq_table)
2426 return;
2427
Matan Barakc66fa192015-05-31 09:30:16 +03002428 for (i = 1; i <= dev->caps.num_ports; i++) {
2429 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2430 j++, total_eqs++) {
2431 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2432 continue;
2433 ibdev->eq_table[eq] = total_eqs;
2434 if (!mlx4_assign_eq(dev, i,
2435 &ibdev->eq_table[eq]))
2436 eq++;
2437 else
2438 ibdev->eq_table[eq] = -1;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002439 }
2440 }
2441
Matan Barakc66fa192015-05-31 09:30:16 +03002442 for (i = eq; i < dev->caps.num_comp_vectors;
2443 ibdev->eq_table[i++] = -1)
2444 ;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002445
2446 /* Advertise the new number of EQs to clients */
Matan Barakc66fa192015-05-31 09:30:16 +03002447 ibdev->ib_dev.num_comp_vectors = eq;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002448}
2449
2450static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2451{
2452 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002453 int total_eqs = ibdev->ib_dev.num_comp_vectors;
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002454
Matan Barakc66fa192015-05-31 09:30:16 +03002455 /* no eqs were allocated */
Shlomo Pongratz3aac6ff2012-05-24 16:08:07 +03002456 if (!ibdev->eq_table)
2457 return;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002458
2459 /* Reset the advertised EQ number */
Matan Barakc66fa192015-05-31 09:30:16 +03002460 ibdev->ib_dev.num_comp_vectors = 0;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002461
Matan Barakc66fa192015-05-31 09:30:16 +03002462 for (i = 0; i < total_eqs; i++)
Shlomo Pongratze605b742012-04-29 17:04:27 +03002463 mlx4_release_eq(dev, ibdev->eq_table[i]);
Shlomo Pongratze605b742012-04-29 17:04:27 +03002464
Shlomo Pongratze605b742012-04-29 17:04:27 +03002465 kfree(ibdev->eq_table);
Matan Barakc66fa192015-05-31 09:30:16 +03002466 ibdev->eq_table = NULL;
Shlomo Pongratze605b742012-04-29 17:04:27 +03002467}
2468
Ira Weiny77386132015-05-13 20:02:58 -04002469static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2470 struct ib_port_immutable *immutable)
2471{
2472 struct ib_port_attr attr;
Matan Barak4ed088e2016-01-14 17:50:43 +02002473 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
Ira Weiny77386132015-05-13 20:02:58 -04002474 int err;
2475
Matan Barak4ed088e2016-01-14 17:50:43 +02002476 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
Ira Weinyf9b22e32015-05-13 20:02:59 -04002477 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002478 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002479 } else {
2480 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2481 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2482 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2483 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2484 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
Or Gerlitzbc63f9d2017-01-24 13:02:37 +02002485 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2486 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2487 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2488 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Matan Barak4ed088e2016-01-14 17:50:43 +02002489 }
Ira Weinyf9b22e32015-05-13 20:02:59 -04002490
Or Gerlitzc4550c62017-01-24 13:02:39 +02002491 err = ib_query_port(ibdev, port_num, &attr);
2492 if (err)
2493 return err;
2494
2495 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2496 immutable->gid_tbl_len = attr.gid_tbl_len;
2497
Ira Weiny77386132015-05-13 20:02:58 -04002498 return 0;
2499}
2500
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002501static void get_fw_ver_str(struct ib_device *device, char *str)
Ira Weinye9db59f2016-06-15 02:22:00 -04002502{
2503 struct mlx4_ib_dev *dev =
2504 container_of(device, struct mlx4_ib_dev, ib_dev);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002505 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
Ira Weinye9db59f2016-06-15 02:22:00 -04002506 (int) (dev->dev->caps.fw_ver >> 32),
2507 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2508 (int) dev->dev->caps.fw_ver & 0xffff);
2509}
2510
Kamal Heib4725c4b2018-12-10 21:09:37 +02002511static const struct ib_device_ops mlx4_ib_dev_ops = {
2512 .add_gid = mlx4_ib_add_gid,
2513 .alloc_mr = mlx4_ib_alloc_mr,
2514 .alloc_pd = mlx4_ib_alloc_pd,
2515 .alloc_ucontext = mlx4_ib_alloc_ucontext,
2516 .attach_mcast = mlx4_ib_mcg_attach,
2517 .create_ah = mlx4_ib_create_ah,
2518 .create_cq = mlx4_ib_create_cq,
2519 .create_qp = mlx4_ib_create_qp,
2520 .create_srq = mlx4_ib_create_srq,
2521 .dealloc_pd = mlx4_ib_dealloc_pd,
2522 .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2523 .del_gid = mlx4_ib_del_gid,
2524 .dereg_mr = mlx4_ib_dereg_mr,
2525 .destroy_ah = mlx4_ib_destroy_ah,
2526 .destroy_cq = mlx4_ib_destroy_cq,
2527 .destroy_qp = mlx4_ib_destroy_qp,
2528 .destroy_srq = mlx4_ib_destroy_srq,
2529 .detach_mcast = mlx4_ib_mcg_detach,
2530 .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2531 .drain_rq = mlx4_ib_drain_rq,
2532 .drain_sq = mlx4_ib_drain_sq,
2533 .get_dev_fw_str = get_fw_ver_str,
2534 .get_dma_mr = mlx4_ib_get_dma_mr,
2535 .get_link_layer = mlx4_ib_port_link_layer,
2536 .get_netdev = mlx4_ib_get_netdev,
2537 .get_port_immutable = mlx4_port_immutable,
2538 .map_mr_sg = mlx4_ib_map_mr_sg,
2539 .mmap = mlx4_ib_mmap,
2540 .modify_cq = mlx4_ib_modify_cq,
2541 .modify_device = mlx4_ib_modify_device,
2542 .modify_port = mlx4_ib_modify_port,
2543 .modify_qp = mlx4_ib_modify_qp,
2544 .modify_srq = mlx4_ib_modify_srq,
2545 .poll_cq = mlx4_ib_poll_cq,
2546 .post_recv = mlx4_ib_post_recv,
2547 .post_send = mlx4_ib_post_send,
2548 .post_srq_recv = mlx4_ib_post_srq_recv,
2549 .process_mad = mlx4_ib_process_mad,
2550 .query_ah = mlx4_ib_query_ah,
2551 .query_device = mlx4_ib_query_device,
2552 .query_gid = mlx4_ib_query_gid,
2553 .query_pkey = mlx4_ib_query_pkey,
2554 .query_port = mlx4_ib_query_port,
2555 .query_qp = mlx4_ib_query_qp,
2556 .query_srq = mlx4_ib_query_srq,
2557 .reg_user_mr = mlx4_ib_reg_user_mr,
2558 .req_notify_cq = mlx4_ib_arm_cq,
2559 .rereg_user_mr = mlx4_ib_rereg_user_mr,
2560 .resize_cq = mlx4_ib_resize_cq,
Leon Romanovskyd3456912019-04-03 16:42:42 +03002561
2562 INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
Leon Romanovsky21a428a2019-02-03 14:55:51 +02002563 INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002564 INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
Leon Romanovskya2a074e2019-02-12 20:39:16 +02002565 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
Kamal Heib4725c4b2018-12-10 21:09:37 +02002566};
2567
2568static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2569 .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2570 .create_wq = mlx4_ib_create_wq,
2571 .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2572 .destroy_wq = mlx4_ib_destroy_wq,
2573 .modify_wq = mlx4_ib_modify_wq,
2574};
2575
2576static const struct ib_device_ops mlx4_ib_dev_fmr_ops = {
2577 .alloc_fmr = mlx4_ib_fmr_alloc,
2578 .dealloc_fmr = mlx4_ib_fmr_dealloc,
2579 .map_phys_fmr = mlx4_ib_map_phys_fmr,
2580 .unmap_fmr = mlx4_ib_unmap_fmr,
2581};
2582
2583static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2584 .alloc_mw = mlx4_ib_alloc_mw,
2585 .dealloc_mw = mlx4_ib_dealloc_mw,
2586};
2587
2588static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2589 .alloc_xrcd = mlx4_ib_alloc_xrcd,
2590 .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2591};
2592
2593static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2594 .create_flow = mlx4_ib_create_flow,
2595 .destroy_flow = mlx4_ib_destroy_flow,
2596};
2597
Roland Dreier225c7b12007-05-08 18:00:38 -07002598static void *mlx4_ib_add(struct mlx4_dev *dev)
2599{
2600 struct mlx4_ib_dev *ibdev;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002601 int num_ports = 0;
Jack Morgenstein035b1032012-05-10 23:28:09 +03002602 int i, j;
Eli Cohenfa417f72010-10-24 21:08:52 -07002603 int err;
2604 struct mlx4_ib_iboe *iboe;
Matan Barak41966702014-02-02 17:06:47 +02002605 int ib_num_ports = 0;
Moni Shouaa5750092015-02-03 16:48:37 +02002606 int num_req_counters;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002607 int allocated;
2608 u32 counter_index;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002609 struct counter_index *new_counter_index = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -07002610
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03002611 pr_info_once("%s", mlx4_ib_version);
Roland Dreier68f39482008-02-04 20:20:44 -08002612
Jack Morgenstein026149c2012-08-03 08:40:55 +00002613 num_ports = 0;
Eli Cohenfa417f72010-10-24 21:08:52 -07002614 mlx4_foreach_ib_transport_port(i, dev)
Roland Dreier22e7ef92009-01-09 13:22:29 -08002615 num_ports++;
2616
2617 /* No point in registering a device with no ports... */
2618 if (num_ports == 0)
2619 return NULL;
2620
Leon Romanovsky459cc692019-01-30 12:49:11 +02002621 ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002622 if (!ibdev) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002623 dev_err(&dev->persist->pdev->dev,
2624 "Device struct alloc failed\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002625 return NULL;
2626 }
2627
Eli Cohenfa417f72010-10-24 21:08:52 -07002628 iboe = &ibdev->iboe;
2629
Roland Dreier225c7b12007-05-08 18:00:38 -07002630 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2631 goto err_dealloc;
2632
2633 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2634 goto err_pd;
2635
Roland Dreier4979d182011-01-12 09:50:36 -08002636 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2637 PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002638 if (!ibdev->uar_map)
2639 goto err_uar;
Jack Morgenstein26c6bc72007-05-13 17:18:23 +03002640 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002641
Roland Dreier225c7b12007-05-08 18:00:38 -07002642 ibdev->dev = dev;
Moni Shouac6215742015-02-03 16:48:39 +02002643 ibdev->bond_next_port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002644
Roland Dreier225c7b12007-05-08 18:00:38 -07002645 ibdev->ib_dev.owner = THIS_MODULE;
2646 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
Roland Dreier95d04f02008-07-23 08:12:26 -07002647 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
Roland Dreier22e7ef92009-01-09 13:22:29 -08002648 ibdev->num_ports = num_ports;
Moni Shouaa5750092015-02-03 16:48:37 +02002649 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2650 1 : ibdev->num_ports;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002651 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
Bart Van Assched66c88a82017-01-20 13:04:20 -08002652 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
Roland Dreier225c7b12007-05-08 18:00:38 -07002653
Or Gerlitz08ff3232012-10-21 14:59:24 +00002654 if (dev->caps.userspace_caps)
2655 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2656 else
2657 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2658
Roland Dreier225c7b12007-05-08 18:00:38 -07002659 ibdev->ib_dev.uverbs_cmd_mask =
2660 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2661 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2662 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2663 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2664 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2665 (1ull << IB_USER_VERBS_CMD_REG_MR) |
Matan Barak93769322014-07-31 11:01:30 +03002666 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002667 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2668 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2669 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -07002670 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002671 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2672 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2673 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
Jack Morgenstein6a775e22007-06-21 12:27:47 +03002674 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
Roland Dreier225c7b12007-05-08 18:00:38 -07002675 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2676 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2677 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2678 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2679 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
Jack Morgenstein65541cb2007-06-21 13:03:11 +03002680 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
Sean Hefty18abd5e2011-06-02 10:43:26 -07002681 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
Sean Hefty42849b22011-08-11 13:57:43 -07002682 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2683 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
Roland Dreier225c7b12007-05-08 18:00:38 -07002684
Kamal Heib4725c4b2018-12-10 21:09:37 +02002685 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
Yonatan Cohen34d9a272017-11-13 10:51:14 +02002686 ibdev->ib_dev.uverbs_ex_cmd_mask |=
Kamal Heib4725c4b2018-12-10 21:09:37 +02002687 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
2688 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2689 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2690 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
Yonatan Cohen34d9a272017-11-13 10:51:14 +02002691
Guy Levi400b1eb2017-07-04 16:24:24 +03002692 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2693 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2694 IB_LINK_LAYER_ETHERNET) ||
2695 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2696 IB_LINK_LAYER_ETHERNET))) {
Guy Levi400b1eb2017-07-04 16:24:24 +03002697 ibdev->ib_dev.uverbs_ex_cmd_mask |=
Guy Levib8d46ca2017-07-04 16:24:25 +03002698 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
2699 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2700 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2701 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2702 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
Kamal Heib4725c4b2018-12-10 21:09:37 +02002703 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
Guy Levi400b1eb2017-07-04 16:24:24 +03002704 }
2705
Kamal Heib4725c4b2018-12-10 21:09:37 +02002706 if (!mlx4_is_slave(ibdev->dev))
2707 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops);
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +03002708
Shani Michaelib4253882013-02-06 16:19:16 +00002709 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2710 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
Shani Michaelib4253882013-02-06 16:19:16 +00002711 ibdev->ib_dev.uverbs_cmd_mask |=
2712 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2713 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
Kamal Heib4725c4b2018-12-10 21:09:37 +02002714 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
Shani Michaelib4253882013-02-06 16:19:16 +00002715 }
2716
Sean Hefty012a8ff2011-06-02 09:01:33 -07002717 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
Sean Hefty012a8ff2011-06-02 09:01:33 -07002718 ibdev->ib_dev.uverbs_cmd_mask |=
2719 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2720 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
Kamal Heib4725c4b2018-12-10 21:09:37 +02002721 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
Sean Hefty012a8ff2011-06-02 09:01:33 -07002722 }
2723
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002724 if (check_flow_steering_support(dev)) {
Matan Barak0a9b7d52013-11-07 15:25:15 +02002725 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
Yann Droneaudf21519b2013-11-06 23:21:49 +01002726 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2727 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2728 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
Kamal Heib4725c4b2018-12-10 21:09:37 +02002729 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
Hadar Hen Zionf77c0162013-08-14 13:58:31 +03002730 }
2731
Shlomo Pongratze605b742012-04-29 17:04:27 +03002732 mlx4_ib_alloc_eqs(dev, ibdev);
2733
Eli Cohenfa417f72010-10-24 21:08:52 -07002734 spin_lock_init(&iboe->lock);
2735
Roland Dreier225c7b12007-05-08 18:00:38 -07002736 if (init_node_data(ibdev))
2737 goto err_map;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03002738 mlx4_init_sl2vl_tbl(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002739
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002740 for (i = 0; i < ibdev->num_ports; ++i) {
2741 mutex_init(&ibdev->counters_table[i].mutex);
2742 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
Ursula Braunfc6526f2018-11-12 12:41:55 +01002743 iboe->last_port_state[i] = IB_PORT_DOWN;
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002744 }
2745
Moni Shouaa5750092015-02-03 16:48:37 +02002746 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2747 for (i = 0; i < num_req_counters; ++i) {
Matan Barak9433c182014-05-15 15:29:28 +03002748 mutex_init(&ibdev->qp1_proxy_lock[i]);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002749 allocated = 0;
Or Gerlitzcfcde112011-06-15 14:49:57 +00002750 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2751 IB_LINK_LAYER_ETHERNET) {
Moshe Shemeshf3301872017-06-21 09:29:36 +03002752 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2753 MLX4_RES_USAGE_DRIVER);
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002754 /* if failed to allocate a new counter, use default */
Or Gerlitzcfcde112011-06-15 14:49:57 +00002755 if (err)
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002756 counter_index =
2757 mlx4_get_default_counter_index(dev,
2758 i + 1);
2759 else
2760 allocated = 1;
2761 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2762 counter_index = mlx4_get_default_counter_index(dev,
2763 i + 1);
Dan Carpenter3839d8a2014-03-28 11:21:39 +03002764 }
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002765 new_counter_index = kmalloc(sizeof(*new_counter_index),
2766 GFP_KERNEL);
2767 if (!new_counter_index) {
2768 if (allocated)
2769 mlx4_counter_free(ibdev->dev, counter_index);
2770 goto err_counter;
2771 }
2772 new_counter_index->index = counter_index;
2773 new_counter_index->allocated = allocated;
2774 list_add_tail(&new_counter_index->list,
2775 &ibdev->counters_table[i].counters_list);
2776 ibdev->counters_table[i].default_counter = counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002777 pr_info("counter index %d for port %d allocated %d\n",
2778 counter_index, i + 1, allocated);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002779 }
Moni Shouaa5750092015-02-03 16:48:37 +02002780 if (mlx4_is_bonded(dev))
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002781 for (i = 1; i < ibdev->num_ports ; ++i) {
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002782 new_counter_index =
2783 kmalloc(sizeof(struct counter_index),
2784 GFP_KERNEL);
2785 if (!new_counter_index)
2786 goto err_counter;
2787 new_counter_index->index = counter_index;
2788 new_counter_index->allocated = 0;
2789 list_add_tail(&new_counter_index->list,
2790 &ibdev->counters_table[i].counters_list);
2791 ibdev->counters_table[i].default_counter =
2792 counter_index;
Eran Ben Elishac3abb512015-06-15 17:59:03 +03002793 }
Or Gerlitzcfcde112011-06-15 14:49:57 +00002794
Matan Barak41966702014-02-02 17:06:47 +02002795 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2796 ib_num_ports++;
2797
Roland Dreier225c7b12007-05-08 18:00:38 -07002798 spin_lock_init(&ibdev->sm_lock);
2799 mutex_init(&ibdev->cap_mask_mutex);
Yishai Hadas35f05da2015-02-08 11:49:34 +02002800 INIT_LIST_HEAD(&ibdev->qp_list);
2801 spin_lock_init(&ibdev->reset_flow_resource_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07002802
Matan Barak41966702014-02-02 17:06:47 +02002803 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2804 ib_num_ports) {
Matan Barakc1c98502013-11-07 15:25:17 +02002805 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2806 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2807 MLX4_IB_UC_STEER_QPN_ALIGN,
Moshe Shemeshf3301872017-06-21 09:29:36 +03002808 &ibdev->steer_qpn_base, 0,
2809 MLX4_RES_USAGE_DRIVER);
Matan Barakc1c98502013-11-07 15:25:17 +02002810 if (err)
2811 goto err_counter;
2812
2813 ibdev->ib_uc_qpns_bitmap =
Kees Cook6da2ec52018-06-12 13:55:00 -07002814 kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2815 sizeof(long),
2816 GFP_KERNEL);
Leon Romanovsky15d46262016-11-03 16:44:12 +02002817 if (!ibdev->ib_uc_qpns_bitmap)
Matan Barakc1c98502013-11-07 15:25:17 +02002818 goto err_steer_qp_release;
Matan Barakc1c98502013-11-07 15:25:17 +02002819
Eran Ben Elisha1f22e452016-11-10 11:31:00 +02002820 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2821 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2822 ibdev->steer_qpn_count);
2823 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2824 dev, ibdev->steer_qpn_base,
2825 ibdev->steer_qpn_base +
2826 ibdev->steer_qpn_count - 1);
2827 if (err)
2828 goto err_steer_free_bitmap;
2829 } else {
2830 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2831 ibdev->steer_qpn_count);
2832 }
Matan Barakc1c98502013-11-07 15:25:17 +02002833 }
2834
Jack Morgenstein3e0629c2014-09-11 14:11:17 +03002835 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2836 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2837
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002838 if (mlx4_ib_alloc_diag_counters(ibdev))
Matan Barakc1c98502013-11-07 15:25:17 +02002839 goto err_steer_free_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -07002840
Parav Pandit508a5232018-10-11 22:31:54 +03002841 rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
Matan Barak0ede73b2018-03-19 15:02:34 +02002842 ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
Parav Panditea4baf72018-12-18 14:28:30 +02002843 if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002844 goto err_diag_counters;
2845
Roland Dreier225c7b12007-05-08 18:00:38 -07002846 if (mlx4_ib_mad_init(ibdev))
2847 goto err_reg;
2848
Jack Morgensteinfc065732012-08-03 08:40:42 +00002849 if (mlx4_ib_init_sriov(ibdev))
2850 goto err_mad;
2851
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002852 if (!iboe->nb.notifier_call) {
2853 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2854 err = register_netdevice_notifier(&iboe->nb);
2855 if (err) {
2856 iboe->nb.notifier_call = NULL;
2857 goto err_notif;
Moni Shouad487ee72013-12-12 18:03:13 +02002858 }
Majd Dibbinydd77abf2017-03-19 11:01:28 +02002859 }
2860 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2861 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2862 if (err)
2863 goto err_notif;
Eli Cohenfa417f72010-10-24 21:08:52 -07002864 }
2865
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002866 ibdev->ib_active = true;
Jiri Pirko09d4d082016-02-26 17:32:24 +01002867 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2868 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2869 &ibdev->ib_dev);
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07002870
Jack Morgenstein54679e12012-08-03 08:40:43 +00002871 if (mlx4_is_mfunc(ibdev->dev))
2872 init_pkeys(ibdev);
2873
Jack Morgenstein3806d082012-08-03 08:40:58 +00002874 /* create paravirt contexts for any VFs which are active */
2875 if (mlx4_is_master(ibdev->dev)) {
2876 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2877 if (j == mlx4_master_func_num(ibdev->dev))
2878 continue;
2879 if (mlx4_is_slave_active(ibdev->dev, j))
2880 do_slave_init(ibdev, j, 1);
2881 }
2882 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002883 return ibdev;
2884
Eli Cohenfa417f72010-10-24 21:08:52 -07002885err_notif:
Moni Shouad487ee72013-12-12 18:03:13 +02002886 if (ibdev->iboe.nb.notifier_call) {
2887 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2888 pr_warn("failure unregistering notifier\n");
2889 ibdev->iboe.nb.notifier_call = NULL;
2890 }
Eli Cohenfa417f72010-10-24 21:08:52 -07002891 flush_workqueue(wq);
2892
Jack Morgensteinfc065732012-08-03 08:40:42 +00002893 mlx4_ib_close_sriov(ibdev);
2894
2895err_mad:
2896 mlx4_ib_mad_cleanup(ibdev);
2897
Roland Dreier225c7b12007-05-08 18:00:38 -07002898err_reg:
2899 ib_unregister_device(&ibdev->ib_dev);
2900
Mark Bloch3f85f2a2016-07-19 20:54:58 +03002901err_diag_counters:
2902 mlx4_ib_diag_cleanup(ibdev);
2903
Matan Barakc1c98502013-11-07 15:25:17 +02002904err_steer_free_bitmap:
2905 kfree(ibdev->ib_uc_qpns_bitmap);
2906
2907err_steer_qp_release:
Jack Morgenstein852f6922018-01-12 07:58:40 +02002908 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2909 ibdev->steer_qpn_count);
Or Gerlitzcfcde112011-06-15 14:49:57 +00002910err_counter:
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03002911 for (i = 0; i < ibdev->num_ports; ++i)
2912 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2913
Roland Dreier225c7b12007-05-08 18:00:38 -07002914err_map:
Jack Morgenstein99e68909d2017-03-21 12:57:05 +02002915 mlx4_ib_free_eqs(dev, ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002916 iounmap(ibdev->uar_map);
2917
2918err_uar:
2919 mlx4_uar_free(dev, &ibdev->priv_uar);
2920
2921err_pd:
2922 mlx4_pd_free(dev, ibdev->priv_pdn);
2923
2924err_dealloc:
2925 ib_dealloc_device(&ibdev->ib_dev);
2926
2927 return NULL;
2928}
2929
Matan Barakc1c98502013-11-07 15:25:17 +02002930int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2931{
2932 int offset;
2933
2934 WARN_ON(!dev->ib_uc_qpns_bitmap);
2935
2936 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2937 dev->steer_qpn_count,
2938 get_count_order(count));
2939 if (offset < 0)
2940 return offset;
2941
2942 *qpn = dev->steer_qpn_base + offset;
2943 return 0;
2944}
2945
2946void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2947{
2948 if (!qpn ||
2949 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2950 return;
2951
Leon Romanovskyf77f3032018-05-29 14:56:13 +03002952 if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2953 qpn, dev->steer_qpn_base))
2954 /* not supposed to be here */
2955 return;
Matan Barakc1c98502013-11-07 15:25:17 +02002956
2957 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2958 qpn - dev->steer_qpn_base,
2959 get_count_order(count));
2960}
2961
2962int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2963 int is_attach)
2964{
2965 int err;
2966 size_t flow_size;
2967 struct ib_flow_attr *flow = NULL;
2968 struct ib_flow_spec_ib *ib_spec;
2969
2970 if (is_attach) {
2971 flow_size = sizeof(struct ib_flow_attr) +
2972 sizeof(struct ib_flow_spec_ib);
2973 flow = kzalloc(flow_size, GFP_KERNEL);
2974 if (!flow)
2975 return -ENOMEM;
2976 flow->port = mqp->port;
2977 flow->num_of_specs = 1;
2978 flow->size = flow_size;
2979 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2980 ib_spec->type = IB_FLOW_SPEC_IB;
2981 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2982 /* Add an empty rule for IB L2 */
2983 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2984
2985 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2986 IB_FLOW_DOMAIN_NIC,
2987 MLX4_FS_REGULAR,
2988 &mqp->reg_id);
2989 } else {
2990 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2991 }
2992 kfree(flow);
2993 return err;
2994}
2995
Roland Dreier225c7b12007-05-08 18:00:38 -07002996static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2997{
2998 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2999 int p;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003000 int i;
Roland Dreier225c7b12007-05-08 18:00:38 -07003001
Jiri Pirko09d4d082016-02-26 17:32:24 +01003002 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3003 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
Moni Shoua4bf97152014-08-21 14:28:42 +03003004 ibdev->ib_active = false;
3005 flush_workqueue(wq);
3006
Jack Morgensteinfc065732012-08-03 08:40:42 +00003007 mlx4_ib_close_sriov(ibdev);
Yevgeny Petrilina6a47772009-03-18 19:49:54 -07003008 mlx4_ib_mad_cleanup(ibdev);
3009 ib_unregister_device(&ibdev->ib_dev);
Mark Bloch3f85f2a2016-07-19 20:54:58 +03003010 mlx4_ib_diag_cleanup(ibdev);
Eli Cohenfa417f72010-10-24 21:08:52 -07003011 if (ibdev->iboe.nb.notifier_call) {
3012 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
Shlomo Pongratz987c8f82012-04-29 17:04:26 +03003013 pr_warn("failure unregistering notifier\n");
Eli Cohenfa417f72010-10-24 21:08:52 -07003014 ibdev->iboe.nb.notifier_call = NULL;
3015 }
Matan Barakc1c98502013-11-07 15:25:17 +02003016
Jack Morgenstein852f6922018-01-12 07:58:40 +02003017 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3018 ibdev->steer_qpn_count);
3019 kfree(ibdev->ib_uc_qpns_bitmap);
Matan Barakc1c98502013-11-07 15:25:17 +02003020
Eli Cohenfa417f72010-10-24 21:08:52 -07003021 iounmap(ibdev->uar_map);
Or Gerlitzcfcde112011-06-15 14:49:57 +00003022 for (p = 0; p < ibdev->num_ports; ++p)
Eran Ben Elisha3ba8e312015-10-15 14:44:40 +03003023 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3024
Eli Cohenfa417f72010-10-24 21:08:52 -07003025 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
Roland Dreier225c7b12007-05-08 18:00:38 -07003026 mlx4_CLOSE_PORT(dev, p);
3027
Shlomo Pongratze605b742012-04-29 17:04:27 +03003028 mlx4_ib_free_eqs(dev, ibdev);
3029
Roland Dreier225c7b12007-05-08 18:00:38 -07003030 mlx4_uar_free(dev, &ibdev->priv_uar);
3031 mlx4_pd_free(dev, ibdev->priv_pdn);
3032 ib_dealloc_device(&ibdev->ib_dev);
3033}
3034
Jack Morgensteinfc065732012-08-03 08:40:42 +00003035static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3036{
3037 struct mlx4_ib_demux_work **dm = NULL;
3038 struct mlx4_dev *dev = ibdev->dev;
3039 int i;
3040 unsigned long flags;
Matan Barak449fc482014-03-19 18:11:52 +02003041 struct mlx4_active_ports actv_ports;
3042 unsigned int ports;
3043 unsigned int first_port;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003044
3045 if (!mlx4_is_master(dev))
3046 return;
3047
Matan Barak449fc482014-03-19 18:11:52 +02003048 actv_ports = mlx4_get_active_ports(dev, slave);
3049 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3050 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3051
3052 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003053 if (!dm)
Maninder Singha39a98f2015-07-08 09:43:35 +05303054 return;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003055
Matan Barak449fc482014-03-19 18:11:52 +02003056 for (i = 0; i < ports; i++) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00003057 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3058 if (!dm[i]) {
Maninder Singha39a98f2015-07-08 09:43:35 +05303059 while (--i >= 0)
3060 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003061 goto out;
3062 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003063 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
Matan Barak449fc482014-03-19 18:11:52 +02003064 dm[i]->port = first_port + i + 1;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003065 dm[i]->slave = slave;
3066 dm[i]->do_init = do_init;
3067 dm[i]->dev = ibdev;
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003068 }
3069 /* initialize or tear down tunnel QPs for the slave */
3070 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3071 if (!ibdev->sriov.is_going_down) {
3072 for (i = 0; i < ports; i++)
Jack Morgensteinfc065732012-08-03 08:40:42 +00003073 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3074 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
Doug Ledfordd9a047a2015-07-09 10:21:08 -04003075 } else {
3076 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3077 for (i = 0; i < ports; i++)
3078 kfree(dm[i]);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003079 }
3080out:
Syam Sidhardhanc89d1272013-02-24 23:20:05 +00003081 kfree(dm);
Jack Morgensteinfc065732012-08-03 08:40:42 +00003082 return;
3083}
3084
Yishai Hadas35f05da2015-02-08 11:49:34 +02003085static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3086{
3087 struct mlx4_ib_qp *mqp;
3088 unsigned long flags_qp;
3089 unsigned long flags_cq;
3090 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3091 struct list_head cq_notify_list;
3092 struct mlx4_cq *mcq;
3093 unsigned long flags;
3094
3095 pr_warn("mlx4_ib_handle_catas_error was started\n");
3096 INIT_LIST_HEAD(&cq_notify_list);
3097
3098 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3099 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3100
3101 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3102 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3103 if (mqp->sq.tail != mqp->sq.head) {
3104 send_mcq = to_mcq(mqp->ibqp.send_cq);
3105 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3106 if (send_mcq->mcq.comp &&
3107 mqp->ibqp.send_cq->comp_handler) {
3108 if (!send_mcq->mcq.reset_notify_added) {
3109 send_mcq->mcq.reset_notify_added = 1;
3110 list_add_tail(&send_mcq->mcq.reset_notify,
3111 &cq_notify_list);
3112 }
3113 }
3114 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3115 }
3116 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3117 /* Now, handle the QP's receive queue */
3118 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3119 /* no handling is needed for SRQ */
3120 if (!mqp->ibqp.srq) {
3121 if (mqp->rq.tail != mqp->rq.head) {
3122 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3123 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3124 if (recv_mcq->mcq.comp &&
3125 mqp->ibqp.recv_cq->comp_handler) {
3126 if (!recv_mcq->mcq.reset_notify_added) {
3127 recv_mcq->mcq.reset_notify_added = 1;
3128 list_add_tail(&recv_mcq->mcq.reset_notify,
3129 &cq_notify_list);
3130 }
3131 }
3132 spin_unlock_irqrestore(&recv_mcq->lock,
3133 flags_cq);
3134 }
3135 }
3136 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3137 }
3138
3139 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3140 mcq->comp(mcq);
3141 }
3142 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3143 pr_warn("mlx4_ib_handle_catas_error ended\n");
3144}
3145
Moni Shouaa5750092015-02-03 16:48:37 +02003146static void handle_bonded_port_state_event(struct work_struct *work)
3147{
3148 struct ib_event_work *ew =
3149 container_of(work, struct ib_event_work, work);
3150 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3151 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3152 int i;
3153 struct ib_event ibev;
3154
3155 kfree(ew);
3156 spin_lock_bh(&ibdev->iboe.lock);
3157 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3158 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
Moni Shoua217e8b12015-03-18 16:51:35 +02003159 enum ib_port_state curr_port_state;
Moni Shouaa5750092015-02-03 16:48:37 +02003160
Moni Shoua217e8b12015-03-18 16:51:35 +02003161 if (!curr_netdev)
3162 continue;
3163
3164 curr_port_state =
Moni Shouaa5750092015-02-03 16:48:37 +02003165 (netif_running(curr_netdev) &&
3166 netif_carrier_ok(curr_netdev)) ?
3167 IB_PORT_ACTIVE : IB_PORT_DOWN;
3168
3169 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3170 curr_port_state : IB_PORT_ACTIVE;
3171 }
3172 spin_unlock_bh(&ibdev->iboe.lock);
3173
3174 ibev.device = &ibdev->ib_dev;
3175 ibev.element.port_num = 1;
3176 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3177 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3178
3179 ib_dispatch_event(&ibev);
3180}
3181
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003182void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3183{
3184 u64 sl2vl;
3185 int err;
3186
3187 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3188 if (err) {
3189 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3190 port, err);
3191 sl2vl = 0;
3192 }
3193 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3194}
3195
3196static void ib_sl2vl_update_work(struct work_struct *work)
3197{
3198 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3199 struct mlx4_ib_dev *mdev = ew->ib_dev;
3200 int port = ew->port;
3201
3202 mlx4_ib_sl2vl_update(mdev, port);
3203
3204 kfree(ew);
3205}
3206
3207void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3208 int port)
3209{
3210 struct ib_event_work *ew;
3211
3212 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3213 if (ew) {
3214 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3215 ew->port = port;
3216 ew->ib_dev = ibdev;
3217 queue_work(wq, &ew->work);
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003218 }
3219}
3220
Roland Dreier225c7b12007-05-08 18:00:38 -07003221static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003222 enum mlx4_dev_event event, unsigned long param)
Roland Dreier225c7b12007-05-08 18:00:38 -07003223{
3224 struct ib_event ibev;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003225 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003226 struct mlx4_eqe *eqe = NULL;
3227 struct ib_event_work *ew;
Jack Morgensteinfc065732012-08-03 08:40:42 +00003228 int p = 0;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003229
Moni Shouaa5750092015-02-03 16:48:37 +02003230 if (mlx4_is_bonded(dev) &&
3231 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3232 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3233 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3234 if (!ew)
3235 return;
3236 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3237 ew->ib_dev = ibdev;
3238 queue_work(wq, &ew->work);
3239 return;
3240 }
3241
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003242 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3243 eqe = (struct mlx4_eqe *)param;
3244 else
Jack Morgensteinfc065732012-08-03 08:40:42 +00003245 p = (int) param;
Roland Dreier225c7b12007-05-08 18:00:38 -07003246
3247 switch (event) {
Roland Dreier37608ee2008-04-16 21:01:08 -07003248 case MLX4_DEV_EVENT_PORT_UP:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003249 if (p > ibdev->num_ports)
3250 return;
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003251 if (!mlx4_is_slave(dev) &&
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003252 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3253 IB_LINK_LAYER_INFINIBAND) {
Jack Morgensteinfd10ed82016-09-12 19:16:21 +03003254 if (mlx4_is_master(dev))
3255 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3256 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3257 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3258 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
Jack Morgensteina0c64a12012-08-03 08:40:49 +00003259 }
Roland Dreier37608ee2008-04-16 21:01:08 -07003260 ibev.event = IB_EVENT_PORT_ACTIVE;
Roland Dreier225c7b12007-05-08 18:00:38 -07003261 break;
3262
Roland Dreier37608ee2008-04-16 21:01:08 -07003263 case MLX4_DEV_EVENT_PORT_DOWN:
Jack Morgensteinfc065732012-08-03 08:40:42 +00003264 if (p > ibdev->num_ports)
3265 return;
Roland Dreier37608ee2008-04-16 21:01:08 -07003266 ibev.event = IB_EVENT_PORT_ERR;
3267 break;
3268
3269 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
Jack Morgenstein3b4a8cd2009-09-05 20:24:50 -07003270 ibdev->ib_active = false;
Roland Dreier225c7b12007-05-08 18:00:38 -07003271 ibev.event = IB_EVENT_DEVICE_FATAL;
Yishai Hadas35f05da2015-02-08 11:49:34 +02003272 mlx4_ib_handle_catas_error(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003273 break;
3274
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003275 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3276 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
Leon Romanovsky15d46262016-11-03 16:44:12 +02003277 if (!ew)
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003278 break;
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003279
3280 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3281 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3282 ew->ib_dev = ibdev;
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00003283 /* need to queue only for port owner, which uses GEN_EQE */
3284 if (mlx4_is_master(dev))
3285 queue_work(wq, &ew->work);
3286 else
3287 handle_port_mgmt_change_event(&ew->work);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +03003288 return;
3289
Jack Morgensteinfc065732012-08-03 08:40:42 +00003290 case MLX4_DEV_EVENT_SLAVE_INIT:
3291 /* here, p is the slave id */
3292 do_slave_init(ibdev, p, 1);
Yishai Hadasee59fa02015-03-03 17:28:49 +02003293 if (mlx4_is_master(dev)) {
3294 int i;
3295
3296 for (i = 1; i <= ibdev->num_ports; i++) {
3297 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3298 == IB_LINK_LAYER_INFINIBAND)
3299 mlx4_ib_slave_alias_guid_event(ibdev,
3300 p, i,
3301 1);
3302 }
3303 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003304 return;
3305
3306 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
Yishai Hadasee59fa02015-03-03 17:28:49 +02003307 if (mlx4_is_master(dev)) {
3308 int i;
3309
3310 for (i = 1; i <= ibdev->num_ports; i++) {
3311 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3312 == IB_LINK_LAYER_INFINIBAND)
3313 mlx4_ib_slave_alias_guid_event(ibdev,
3314 p, i,
3315 0);
3316 }
3317 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00003318 /* here, p is the slave id */
3319 do_slave_init(ibdev, p, 0);
3320 return;
3321
Roland Dreier225c7b12007-05-08 18:00:38 -07003322 default:
3323 return;
3324 }
3325
3326 ibev.device = ibdev_ptr;
Moni Shouaa5750092015-02-03 16:48:37 +02003327 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
Roland Dreier225c7b12007-05-08 18:00:38 -07003328
3329 ib_dispatch_event(&ibev);
3330}
3331
3332static struct mlx4_interface mlx4_ib_interface = {
Eli Cohenfa417f72010-10-24 21:08:52 -07003333 .add = mlx4_ib_add,
3334 .remove = mlx4_ib_remove,
3335 .event = mlx4_ib_event,
Moni Shouaa5750092015-02-03 16:48:37 +02003336 .protocol = MLX4_PROT_IB_IPV6,
3337 .flags = MLX4_INTFF_BONDING
Roland Dreier225c7b12007-05-08 18:00:38 -07003338};
3339
3340static int __init mlx4_ib_init(void)
3341{
Eli Cohenfa417f72010-10-24 21:08:52 -07003342 int err;
3343
Bhaktipriya Shridhar41cd3942016-08-15 23:42:48 +05303344 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
Eli Cohenfa417f72010-10-24 21:08:52 -07003345 if (!wq)
3346 return -ENOMEM;
3347
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003348 err = mlx4_ib_mcg_init();
3349 if (err)
3350 goto clean_wq;
3351
Eli Cohenfa417f72010-10-24 21:08:52 -07003352 err = mlx4_register_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003353 if (err)
3354 goto clean_mcg;
Eli Cohenfa417f72010-10-24 21:08:52 -07003355
3356 return 0;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003357
3358clean_mcg:
3359 mlx4_ib_mcg_destroy();
3360
3361clean_wq:
3362 destroy_workqueue(wq);
3363 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07003364}
3365
3366static void __exit mlx4_ib_cleanup(void)
3367{
3368 mlx4_unregister_interface(&mlx4_ib_interface);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00003369 mlx4_ib_mcg_destroy();
Eli Cohenfa417f72010-10-24 21:08:52 -07003370 destroy_workqueue(wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07003371}
3372
3373module_init(mlx4_ib_init);
3374module_exit(mlx4_ib_cleanup);