blob: a770490ebbf107e97af4f362ab633d6d087a47b9 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/cmd.h>
34#include <rdma/ib_mad.h>
35#include <rdma/ib_smi.h>
36#include "mlx5_ib.h"
37
38enum {
39 MLX5_IB_VENDOR_CLASS1 = 0x9,
40 MLX5_IB_VENDOR_CLASS2 = 0xa
41};
42
43int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
Jack Morgensteinf241e742014-07-28 23:30:23 +030044 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
Eli Cohene126ba92013-07-07 17:25:49 +030045 void *in_mad, void *response_mad)
46{
47 u8 op_modifier = 0;
48
49 /* Key check traps can't be generated unless we have in_wc to
50 * tell us where to send the trap.
51 */
52 if (ignore_mkey || !in_wc)
53 op_modifier |= 0x1;
54 if (ignore_bkey || !in_wc)
55 op_modifier |= 0x2;
56
Jack Morgenstein9603b612014-07-28 23:30:22 +030057 return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
Eli Cohene126ba92013-07-07 17:25:49 +030058}
59
60int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
61 struct ib_wc *in_wc, struct ib_grh *in_grh,
62 struct ib_mad *in_mad, struct ib_mad *out_mad)
63{
64 u16 slid;
65 int err;
66
67 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
68
69 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
70 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
71
72 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
73 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
74 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
75 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
76 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
77 return IB_MAD_RESULT_SUCCESS;
78
79 /* Don't process SMInfo queries -- the SMA can't handle them.
80 */
81 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
82 return IB_MAD_RESULT_SUCCESS;
83 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
84 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
85 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
86 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
87 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
88 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
89 return IB_MAD_RESULT_SUCCESS;
90 } else {
91 return IB_MAD_RESULT_SUCCESS;
92 }
93
94 err = mlx5_MAD_IFC(to_mdev(ibdev),
95 mad_flags & IB_MAD_IGNORE_MKEY,
96 mad_flags & IB_MAD_IGNORE_BKEY,
97 port_num, in_wc, in_grh, in_mad, out_mad);
98 if (err)
99 return IB_MAD_RESULT_FAILURE;
100
101 /* set return bit in status of directed route responses */
102 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
103 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
104
105 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
106 /* no response for trap repress */
107 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
108
109 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
110}
111
112int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
113{
114 struct ib_smp *in_mad = NULL;
115 struct ib_smp *out_mad = NULL;
116 int err = -ENOMEM;
117 u16 packet_error;
118
119 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
120 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
121 if (!in_mad || !out_mad)
122 goto out;
123
124 init_query_mad(in_mad);
125 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
126 in_mad->attr_mod = cpu_to_be32(port);
127
128 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
129
130 packet_error = be16_to_cpu(out_mad->status);
131
Saeed Mahameed938fe832015-05-28 22:28:41 +0300132 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
Eli Cohene126ba92013-07-07 17:25:49 +0300133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
134
135out:
136 kfree(in_mad);
137 kfree(out_mad);
138 return err;
139}
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300140
141int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
142 struct ib_smp *out_mad)
143{
144 struct ib_smp *in_mad = NULL;
145 int err = -ENOMEM;
146
147 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
148 if (!in_mad)
149 return -ENOMEM;
150
151 init_query_mad(in_mad);
152 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
153
154 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
155 out_mad);
156
157 kfree(in_mad);
158 return err;
159}
160
161int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
162 __be64 *sys_image_guid)
163{
164 struct ib_smp *out_mad = NULL;
165 int err = -ENOMEM;
166
167 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
168 if (!out_mad)
169 return -ENOMEM;
170
171 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
172 if (err)
173 goto out;
174
175 memcpy(sys_image_guid, out_mad->data + 4, 8);
176
177out:
178 kfree(out_mad);
179
180 return err;
181}
182
183int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
184 u16 *max_pkeys)
185{
186 struct ib_smp *out_mad = NULL;
187 int err = -ENOMEM;
188
189 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
190 if (!out_mad)
191 return -ENOMEM;
192
193 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
194 if (err)
195 goto out;
196
197 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
198
199out:
200 kfree(out_mad);
201
202 return err;
203}
204
205int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
206 u32 *vendor_id)
207{
208 struct ib_smp *out_mad = NULL;
209 int err = -ENOMEM;
210
211 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
212 if (!out_mad)
213 return -ENOMEM;
214
215 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
216 if (err)
217 goto out;
218
219 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
220
221out:
222 kfree(out_mad);
223
224 return err;
225}
226
227int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
228{
229 struct ib_smp *in_mad = NULL;
230 struct ib_smp *out_mad = NULL;
231 int err = -ENOMEM;
232
233 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
234 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
235 if (!in_mad || !out_mad)
236 goto out;
237
238 init_query_mad(in_mad);
239 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
240
241 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
242 if (err)
243 goto out;
244
245 memcpy(node_desc, out_mad->data, 64);
246out:
247 kfree(in_mad);
248 kfree(out_mad);
249 return err;
250}
251
252int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
253{
254 struct ib_smp *in_mad = NULL;
255 struct ib_smp *out_mad = NULL;
256 int err = -ENOMEM;
257
258 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
259 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
260 if (!in_mad || !out_mad)
261 goto out;
262
263 init_query_mad(in_mad);
264 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
265
266 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
267 if (err)
268 goto out;
269
270 memcpy(node_guid, out_mad->data + 12, 8);
271out:
272 kfree(in_mad);
273 kfree(out_mad);
274 return err;
275}
276
277int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
278 u16 *pkey)
279{
280 struct ib_smp *in_mad = NULL;
281 struct ib_smp *out_mad = NULL;
282 int err = -ENOMEM;
283
284 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
285 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
286 if (!in_mad || !out_mad)
287 goto out;
288
289 init_query_mad(in_mad);
290 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
291 in_mad->attr_mod = cpu_to_be32(index / 32);
292
293 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
294 out_mad);
295 if (err)
296 goto out;
297
298 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
299
300out:
301 kfree(in_mad);
302 kfree(out_mad);
303 return err;
304}
305
306int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
307 union ib_gid *gid)
308{
309 struct ib_smp *in_mad = NULL;
310 struct ib_smp *out_mad = NULL;
311 int err = -ENOMEM;
312
313 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
314 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
315 if (!in_mad || !out_mad)
316 goto out;
317
318 init_query_mad(in_mad);
319 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
320 in_mad->attr_mod = cpu_to_be32(port);
321
322 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
323 out_mad);
324 if (err)
325 goto out;
326
327 memcpy(gid->raw, out_mad->data + 8, 8);
328
329 init_query_mad(in_mad);
330 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
331 in_mad->attr_mod = cpu_to_be32(index / 8);
332
333 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
334 out_mad);
335 if (err)
336 goto out;
337
338 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
339
340out:
341 kfree(in_mad);
342 kfree(out_mad);
343 return err;
344}
345
346int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
347 struct ib_port_attr *props)
348{
349 struct mlx5_ib_dev *dev = to_mdev(ibdev);
350 struct mlx5_core_dev *mdev = dev->mdev;
351 struct ib_smp *in_mad = NULL;
352 struct ib_smp *out_mad = NULL;
353 int ext_active_speed;
354 int err = -ENOMEM;
355
356 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
357 mlx5_ib_warn(dev, "invalid port number %d\n", port);
358 return -EINVAL;
359 }
360
361 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
362 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
363 if (!in_mad || !out_mad)
364 goto out;
365
366 memset(props, 0, sizeof(*props));
367
368 init_query_mad(in_mad);
369 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
370 in_mad->attr_mod = cpu_to_be32(port);
371
372 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
373 if (err) {
374 mlx5_ib_warn(dev, "err %d\n", err);
375 goto out;
376 }
377
378 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
379 props->lmc = out_mad->data[34] & 0x7;
380 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
381 props->sm_sl = out_mad->data[36] & 0xf;
382 props->state = out_mad->data[32] & 0xf;
383 props->phys_state = out_mad->data[33] >> 4;
384 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
385 props->gid_tbl_len = out_mad->data[50];
386 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
387 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
388 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
389 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
390 props->active_width = out_mad->data[31] & 0xf;
391 props->active_speed = out_mad->data[35] >> 4;
392 props->max_mtu = out_mad->data[41] & 0xf;
393 props->active_mtu = out_mad->data[36] >> 4;
394 props->subnet_timeout = out_mad->data[51] & 0x1f;
395 props->max_vl_num = out_mad->data[37] >> 4;
396 props->init_type_reply = out_mad->data[41] >> 4;
397
398 /* Check if extended speeds (EDR/FDR/...) are supported */
399 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
400 ext_active_speed = out_mad->data[62] >> 4;
401
402 switch (ext_active_speed) {
403 case 1:
404 props->active_speed = 16; /* FDR */
405 break;
406 case 2:
407 props->active_speed = 32; /* EDR */
408 break;
409 }
410 }
411
412 /* If reported active speed is QDR, check if is FDR-10 */
413 if (props->active_speed == 4) {
414 if (mdev->port_caps[port - 1].ext_port_cap &
415 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
416 init_query_mad(in_mad);
417 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
418 in_mad->attr_mod = cpu_to_be32(port);
419
420 err = mlx5_MAD_IFC(dev, 1, 1, port,
421 NULL, NULL, in_mad, out_mad);
422 if (err)
423 goto out;
424
425 /* Checking LinkSpeedActive for FDR-10 */
426 if (out_mad->data[15] & 0x1)
427 props->active_speed = 8;
428 }
429 }
430
431out:
432 kfree(in_mad);
433 kfree(out_mad);
434
435 return err;
436}