blob: 77ea04d4edbef52cbe272c500448924e5d1785d9 [file] [log] [blame]
Kang Luwei322ddeb2018-06-30 08:53:21 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
Wu Hao4284c652019-10-14 13:42:02 +080017#include <linux/hwmon.h>
18#include <linux/hwmon-sysfs.h>
Kang Luwei322ddeb2018-06-30 08:53:21 +080019#include <linux/kernel.h>
20#include <linux/module.h>
Wu Hao69bb18d2019-08-04 18:20:11 +080021#include <linux/uaccess.h>
Wu Hao620e19022018-06-30 08:53:23 +080022#include <linux/fpga-dfl.h>
Kang Luwei322ddeb2018-06-30 08:53:21 +080023
24#include "dfl.h"
Kang Luwei29de7622018-06-30 08:53:24 +080025#include "dfl-fme.h"
Kang Luwei322ddeb2018-06-30 08:53:21 +080026
Kang Luwei0a27ff22018-06-30 08:53:22 +080027static ssize_t ports_num_show(struct device *dev,
28 struct device_attribute *attr, char *buf)
29{
30 void __iomem *base;
31 u64 v;
32
33 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
34
35 v = readq(base + FME_HDR_CAP);
36
37 return scnprintf(buf, PAGE_SIZE, "%u\n",
38 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
39}
40static DEVICE_ATTR_RO(ports_num);
41
42/*
43 * Bitstream (static FPGA region) identifier number. It contains the
44 * detailed version and other information of this static FPGA region.
45 */
46static ssize_t bitstream_id_show(struct device *dev,
47 struct device_attribute *attr, char *buf)
48{
49 void __iomem *base;
50 u64 v;
51
52 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
53
54 v = readq(base + FME_HDR_BITSTREAM_ID);
55
56 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
57}
58static DEVICE_ATTR_RO(bitstream_id);
59
60/*
61 * Bitstream (static FPGA region) meta data. It contains the synthesis
62 * date, seed and other information of this static FPGA region.
63 */
64static ssize_t bitstream_metadata_show(struct device *dev,
65 struct device_attribute *attr, char *buf)
66{
67 void __iomem *base;
68 u64 v;
69
70 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
71
72 v = readq(base + FME_HDR_BITSTREAM_MD);
73
74 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
75}
76static DEVICE_ATTR_RO(bitstream_metadata);
77
Wu Hao52eb6d32019-08-04 18:20:20 +080078static ssize_t cache_size_show(struct device *dev,
79 struct device_attribute *attr, char *buf)
80{
81 void __iomem *base;
82 u64 v;
83
84 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
85
86 v = readq(base + FME_HDR_CAP);
87
88 return sprintf(buf, "%u\n",
89 (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
90}
91static DEVICE_ATTR_RO(cache_size);
92
93static ssize_t fabric_version_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
95{
96 void __iomem *base;
97 u64 v;
98
99 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
100
101 v = readq(base + FME_HDR_CAP);
102
103 return sprintf(buf, "%u\n",
104 (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
105}
106static DEVICE_ATTR_RO(fabric_version);
107
108static ssize_t socket_id_show(struct device *dev,
109 struct device_attribute *attr, char *buf)
110{
111 void __iomem *base;
112 u64 v;
113
114 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
115
116 v = readq(base + FME_HDR_CAP);
117
118 return sprintf(buf, "%u\n",
119 (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
120}
121static DEVICE_ATTR_RO(socket_id);
122
Greg Kroah-Hartmandcfecd42019-07-04 07:56:45 +0200123static struct attribute *fme_hdr_attrs[] = {
Kang Luwei0a27ff22018-06-30 08:53:22 +0800124 &dev_attr_ports_num.attr,
125 &dev_attr_bitstream_id.attr,
126 &dev_attr_bitstream_metadata.attr,
Wu Hao52eb6d32019-08-04 18:20:20 +0800127 &dev_attr_cache_size.attr,
128 &dev_attr_fabric_version.attr,
129 &dev_attr_socket_id.attr,
Kang Luwei0a27ff22018-06-30 08:53:22 +0800130 NULL,
131};
Wu Haocb3c2c42019-08-12 10:50:03 +0800132
133static const struct attribute_group fme_hdr_group = {
134 .attrs = fme_hdr_attrs,
135};
Kang Luwei0a27ff22018-06-30 08:53:22 +0800136
Wu Hao69bb18d2019-08-04 18:20:11 +0800137static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
138 unsigned long arg)
139{
140 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
141 int port_id;
142
143 if (get_user(port_id, (int __user *)arg))
144 return -EFAULT;
145
146 return dfl_fpga_cdev_release_port(cdev, port_id);
147}
148
149static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
150 unsigned long arg)
151{
152 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
153 int port_id;
154
155 if (get_user(port_id, (int __user *)arg))
156 return -EFAULT;
157
158 return dfl_fpga_cdev_assign_port(cdev, port_id);
159}
160
161static long fme_hdr_ioctl(struct platform_device *pdev,
162 struct dfl_feature *feature,
163 unsigned int cmd, unsigned long arg)
164{
165 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
166
167 switch (cmd) {
168 case DFL_FPGA_FME_PORT_RELEASE:
169 return fme_hdr_ioctl_release_port(pdata, arg);
170 case DFL_FPGA_FME_PORT_ASSIGN:
171 return fme_hdr_ioctl_assign_port(pdata, arg);
172 }
173
174 return -ENODEV;
175}
176
Wu Hao15bbb302019-08-04 18:20:15 +0800177static const struct dfl_feature_id fme_hdr_id_table[] = {
178 {.id = FME_FEATURE_ID_HEADER,},
179 {0,}
180};
181
Kang Luwei322ddeb2018-06-30 08:53:21 +0800182static const struct dfl_feature_ops fme_hdr_ops = {
Wu Hao69bb18d2019-08-04 18:20:11 +0800183 .ioctl = fme_hdr_ioctl,
Kang Luwei322ddeb2018-06-30 08:53:21 +0800184};
185
Wu Hao4284c652019-10-14 13:42:02 +0800186#define FME_THERM_THRESHOLD 0x8
187#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
188#define TEMP_THRESHOLD1_EN BIT_ULL(7)
189#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
190#define TEMP_THRESHOLD2_EN BIT_ULL(15)
191#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
192#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
193#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
194/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
195#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
196
197#define FME_THERM_RDSENSOR_FMT1 0x10
198#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
199
200#define FME_THERM_CAP 0x20
201#define THERM_NO_THROTTLE BIT_ULL(0)
202
203#define MD_PRE_DEG
204
205static bool fme_thermal_throttle_support(void __iomem *base)
206{
207 u64 v = readq(base + FME_THERM_CAP);
208
209 return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
210}
211
212static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
213 enum hwmon_sensor_types type,
214 u32 attr, int channel)
215{
216 const struct dfl_feature *feature = drvdata;
217
218 /* temperature is always supported, and check hardware cap for others */
219 if (attr == hwmon_temp_input)
220 return 0444;
221
222 return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
223}
224
225static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
226 u32 attr, int channel, long *val)
227{
228 struct dfl_feature *feature = dev_get_drvdata(dev);
229 u64 v;
230
231 switch (attr) {
232 case hwmon_temp_input:
233 v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
234 *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
235 break;
236 case hwmon_temp_max:
237 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
238 *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
239 break;
240 case hwmon_temp_crit:
241 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
242 *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
243 break;
244 case hwmon_temp_emergency:
245 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
246 *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
247 break;
248 case hwmon_temp_max_alarm:
249 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
250 *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
251 break;
252 case hwmon_temp_crit_alarm:
253 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
254 *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
255 break;
256 default:
257 return -EOPNOTSUPP;
258 }
259
260 return 0;
261}
262
263static const struct hwmon_ops thermal_hwmon_ops = {
264 .is_visible = thermal_hwmon_attrs_visible,
265 .read = thermal_hwmon_read,
266};
267
268static const struct hwmon_channel_info *thermal_hwmon_info[] = {
269 HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
270 HWMON_T_MAX | HWMON_T_MAX_ALARM |
271 HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
272 NULL
273};
274
275static const struct hwmon_chip_info thermal_hwmon_chip_info = {
276 .ops = &thermal_hwmon_ops,
277 .info = thermal_hwmon_info,
278};
279
280static ssize_t temp1_max_policy_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282{
283 struct dfl_feature *feature = dev_get_drvdata(dev);
284 u64 v;
285
286 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
287
288 return sprintf(buf, "%u\n",
289 (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
290}
291
292static DEVICE_ATTR_RO(temp1_max_policy);
293
294static struct attribute *thermal_extra_attrs[] = {
295 &dev_attr_temp1_max_policy.attr,
296 NULL,
297};
298
299static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
300 struct attribute *attr, int index)
301{
302 struct device *dev = kobj_to_dev(kobj);
303 struct dfl_feature *feature = dev_get_drvdata(dev);
304
305 return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
306}
307
308static const struct attribute_group thermal_extra_group = {
309 .attrs = thermal_extra_attrs,
310 .is_visible = thermal_extra_attrs_visible,
311};
312__ATTRIBUTE_GROUPS(thermal_extra);
313
314static int fme_thermal_mgmt_init(struct platform_device *pdev,
315 struct dfl_feature *feature)
316{
317 struct device *hwmon;
318
319 /*
320 * create hwmon to allow userspace monitoring temperature and other
321 * threshold information.
322 *
323 * temp1_input -> FPGA device temperature
324 * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
325 * temp1_crit -> hardware threshold 2 -> 100% throttling
326 * temp1_emergency -> hardware trip_threshold to shutdown FPGA
327 * temp1_max_alarm -> hardware threshold 1 alarm
328 * temp1_crit_alarm -> hardware threshold 2 alarm
329 *
330 * create device specific sysfs interfaces, e.g. read temp1_max_policy
331 * to understand the actual hardware throttling action (50% vs 90%).
332 *
333 * If hardware doesn't support automatic throttling per thresholds,
334 * then all above sysfs interfaces are not visible except temp1_input
335 * for temperature.
336 */
337 hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
338 "dfl_fme_thermal", feature,
339 &thermal_hwmon_chip_info,
340 thermal_extra_groups);
341 if (IS_ERR(hwmon)) {
342 dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
343 return PTR_ERR(hwmon);
344 }
345
346 return 0;
347}
348
349static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
350 {.id = FME_FEATURE_ID_THERMAL_MGMT,},
351 {0,}
352};
353
354static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
355 .init = fme_thermal_mgmt_init,
356};
357
Wu Haofddc9fc2019-10-14 13:42:03 +0800358#define FME_PWR_STATUS 0x8
359#define FME_LATENCY_TOLERANCE BIT_ULL(18)
360#define PWR_CONSUMED GENMASK_ULL(17, 0)
361
362#define FME_PWR_THRESHOLD 0x10
363#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
364#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
365#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
366#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
367#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
368
369#define FME_PWR_XEON_LIMIT 0x18
370#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
371#define XEON_PWR_EN BIT_ULL(15)
372#define FME_PWR_FPGA_LIMIT 0x20
373#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
374#define FPGA_PWR_EN BIT_ULL(15)
375
376static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
377 u32 attr, int channel, long *val)
378{
379 struct dfl_feature *feature = dev_get_drvdata(dev);
380 u64 v;
381
382 switch (attr) {
383 case hwmon_power_input:
384 v = readq(feature->ioaddr + FME_PWR_STATUS);
385 *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
386 break;
387 case hwmon_power_max:
388 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
389 *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
390 break;
391 case hwmon_power_crit:
392 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
393 *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
394 break;
395 case hwmon_power_max_alarm:
396 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
397 *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
398 break;
399 case hwmon_power_crit_alarm:
400 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
401 *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
402 break;
403 default:
404 return -EOPNOTSUPP;
405 }
406
407 return 0;
408}
409
410static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
411 u32 attr, int channel, long val)
412{
413 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
414 struct dfl_feature *feature = dev_get_drvdata(dev);
415 int ret = 0;
416 u64 v;
417
418 val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
419
420 mutex_lock(&pdata->lock);
421
422 switch (attr) {
423 case hwmon_power_max:
424 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
425 v &= ~PWR_THRESHOLD1;
426 v |= FIELD_PREP(PWR_THRESHOLD1, val);
427 writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
428 break;
429 case hwmon_power_crit:
430 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
431 v &= ~PWR_THRESHOLD2;
432 v |= FIELD_PREP(PWR_THRESHOLD2, val);
433 writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
434 break;
435 default:
436 ret = -EOPNOTSUPP;
437 break;
438 }
439
440 mutex_unlock(&pdata->lock);
441
442 return ret;
443}
444
445static umode_t power_hwmon_attrs_visible(const void *drvdata,
446 enum hwmon_sensor_types type,
447 u32 attr, int channel)
448{
449 switch (attr) {
450 case hwmon_power_input:
451 case hwmon_power_max_alarm:
452 case hwmon_power_crit_alarm:
453 return 0444;
454 case hwmon_power_max:
455 case hwmon_power_crit:
456 return 0644;
457 }
458
459 return 0;
460}
461
462static const struct hwmon_ops power_hwmon_ops = {
463 .is_visible = power_hwmon_attrs_visible,
464 .read = power_hwmon_read,
465 .write = power_hwmon_write,
466};
467
468static const struct hwmon_channel_info *power_hwmon_info[] = {
469 HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
470 HWMON_P_MAX | HWMON_P_MAX_ALARM |
471 HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
472 NULL
473};
474
475static const struct hwmon_chip_info power_hwmon_chip_info = {
476 .ops = &power_hwmon_ops,
477 .info = power_hwmon_info,
478};
479
480static ssize_t power1_xeon_limit_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482{
483 struct dfl_feature *feature = dev_get_drvdata(dev);
484 u16 xeon_limit = 0;
485 u64 v;
486
487 v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
488
489 if (FIELD_GET(XEON_PWR_EN, v))
490 xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
491
492 return sprintf(buf, "%u\n", xeon_limit * 100000);
493}
494
495static ssize_t power1_fpga_limit_show(struct device *dev,
496 struct device_attribute *attr, char *buf)
497{
498 struct dfl_feature *feature = dev_get_drvdata(dev);
499 u16 fpga_limit = 0;
500 u64 v;
501
502 v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
503
504 if (FIELD_GET(FPGA_PWR_EN, v))
505 fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
506
507 return sprintf(buf, "%u\n", fpga_limit * 100000);
508}
509
510static ssize_t power1_ltr_show(struct device *dev,
511 struct device_attribute *attr, char *buf)
512{
513 struct dfl_feature *feature = dev_get_drvdata(dev);
514 u64 v;
515
516 v = readq(feature->ioaddr + FME_PWR_STATUS);
517
518 return sprintf(buf, "%u\n",
519 (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
520}
521
522static DEVICE_ATTR_RO(power1_xeon_limit);
523static DEVICE_ATTR_RO(power1_fpga_limit);
524static DEVICE_ATTR_RO(power1_ltr);
525
526static struct attribute *power_extra_attrs[] = {
527 &dev_attr_power1_xeon_limit.attr,
528 &dev_attr_power1_fpga_limit.attr,
529 &dev_attr_power1_ltr.attr,
530 NULL
531};
532
533ATTRIBUTE_GROUPS(power_extra);
534
535static int fme_power_mgmt_init(struct platform_device *pdev,
536 struct dfl_feature *feature)
537{
538 struct device *hwmon;
539
540 hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
541 "dfl_fme_power", feature,
542 &power_hwmon_chip_info,
543 power_extra_groups);
544 if (IS_ERR(hwmon)) {
545 dev_err(&pdev->dev, "Fail to register power hwmon\n");
546 return PTR_ERR(hwmon);
547 }
548
549 return 0;
550}
551
552static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
553 {.id = FME_FEATURE_ID_POWER_MGMT,},
554 {0,}
555};
556
557static const struct dfl_feature_ops fme_power_mgmt_ops = {
558 .init = fme_power_mgmt_init,
559};
560
Kang Luwei322ddeb2018-06-30 08:53:21 +0800561static struct dfl_feature_driver fme_feature_drvs[] = {
562 {
Wu Hao15bbb302019-08-04 18:20:15 +0800563 .id_table = fme_hdr_id_table,
Kang Luwei322ddeb2018-06-30 08:53:21 +0800564 .ops = &fme_hdr_ops,
565 },
566 {
Wu Hao15bbb302019-08-04 18:20:15 +0800567 .id_table = fme_pr_mgmt_id_table,
568 .ops = &fme_pr_mgmt_ops,
Kang Luwei29de7622018-06-30 08:53:24 +0800569 },
570 {
Wu Haocb3c2c42019-08-12 10:50:03 +0800571 .id_table = fme_global_err_id_table,
572 .ops = &fme_global_err_ops,
573 },
574 {
Wu Hao4284c652019-10-14 13:42:02 +0800575 .id_table = fme_thermal_mgmt_id_table,
576 .ops = &fme_thermal_mgmt_ops,
577 },
578 {
Wu Haofddc9fc2019-10-14 13:42:03 +0800579 .id_table = fme_power_mgmt_id_table,
580 .ops = &fme_power_mgmt_ops,
581 },
582 {
Wu Hao724142f2020-04-27 09:06:23 +0800583 .id_table = fme_perf_id_table,
584 .ops = &fme_perf_ops,
585 },
586 {
Kang Luwei322ddeb2018-06-30 08:53:21 +0800587 .ops = NULL,
588 },
589};
590
Wu Hao620e19022018-06-30 08:53:23 +0800591static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
592 unsigned long arg)
593{
594 /* No extension support for now */
595 return 0;
596}
597
Kang Luwei322ddeb2018-06-30 08:53:21 +0800598static int fme_open(struct inode *inode, struct file *filp)
599{
600 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
601 struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
602 int ret;
603
604 if (WARN_ON(!pdata))
605 return -ENODEV;
606
Xu Yilunb6862192019-11-18 13:20:41 +0800607 mutex_lock(&pdata->lock);
608 ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
609 if (!ret) {
610 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
611 dfl_feature_dev_use_count(pdata));
612 filp->private_data = pdata;
613 }
614 mutex_unlock(&pdata->lock);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800615
Xu Yilunb6862192019-11-18 13:20:41 +0800616 return ret;
Kang Luwei322ddeb2018-06-30 08:53:21 +0800617}
618
619static int fme_release(struct inode *inode, struct file *filp)
620{
621 struct dfl_feature_platform_data *pdata = filp->private_data;
622 struct platform_device *pdev = pdata->dev;
Xu Yilund43f20b2020-06-16 12:08:46 +0800623 struct dfl_feature *feature;
Kang Luwei322ddeb2018-06-30 08:53:21 +0800624
625 dev_dbg(&pdev->dev, "Device File Release\n");
Xu Yilunb6862192019-11-18 13:20:41 +0800626
627 mutex_lock(&pdata->lock);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800628 dfl_feature_dev_use_end(pdata);
Xu Yilund43f20b2020-06-16 12:08:46 +0800629
630 if (!dfl_feature_dev_use_count(pdata))
631 dfl_fpga_dev_for_each_feature(pdata, feature)
632 dfl_fpga_set_irq_triggers(feature, 0,
633 feature->nr_irqs, NULL);
Xu Yilunb6862192019-11-18 13:20:41 +0800634 mutex_unlock(&pdata->lock);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800635
636 return 0;
637}
638
639static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
640{
641 struct dfl_feature_platform_data *pdata = filp->private_data;
642 struct platform_device *pdev = pdata->dev;
643 struct dfl_feature *f;
644 long ret;
645
646 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
647
648 switch (cmd) {
Wu Hao620e19022018-06-30 08:53:23 +0800649 case DFL_FPGA_GET_API_VERSION:
650 return DFL_FPGA_API_VERSION;
651 case DFL_FPGA_CHECK_EXTENSION:
652 return fme_ioctl_check_extension(pdata, arg);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800653 default:
654 /*
655 * Let sub-feature's ioctl function to handle the cmd.
656 * Sub-feature's ioctl returns -ENODEV when cmd is not
657 * handled in this sub feature, and returns 0 or other
658 * error code if cmd is handled.
659 */
660 dfl_fpga_dev_for_each_feature(pdata, f) {
661 if (f->ops && f->ops->ioctl) {
662 ret = f->ops->ioctl(pdev, f, cmd, arg);
663 if (ret != -ENODEV)
664 return ret;
665 }
666 }
667 }
668
669 return -EINVAL;
670}
671
Kang Luwei29de7622018-06-30 08:53:24 +0800672static int fme_dev_init(struct platform_device *pdev)
673{
674 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
675 struct dfl_fme *fme;
676
677 fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
678 if (!fme)
679 return -ENOMEM;
680
681 fme->pdata = pdata;
682
683 mutex_lock(&pdata->lock);
684 dfl_fpga_pdata_set_private(pdata, fme);
685 mutex_unlock(&pdata->lock);
686
687 return 0;
688}
689
690static void fme_dev_destroy(struct platform_device *pdev)
691{
692 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
Kang Luwei29de7622018-06-30 08:53:24 +0800693
694 mutex_lock(&pdata->lock);
Kang Luwei29de7622018-06-30 08:53:24 +0800695 dfl_fpga_pdata_set_private(pdata, NULL);
696 mutex_unlock(&pdata->lock);
697}
698
Kang Luwei322ddeb2018-06-30 08:53:21 +0800699static const struct file_operations fme_fops = {
700 .owner = THIS_MODULE,
701 .open = fme_open,
702 .release = fme_release,
703 .unlocked_ioctl = fme_ioctl,
704};
705
706static int fme_probe(struct platform_device *pdev)
707{
708 int ret;
709
Kang Luwei29de7622018-06-30 08:53:24 +0800710 ret = fme_dev_init(pdev);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800711 if (ret)
712 goto exit;
713
Kang Luwei29de7622018-06-30 08:53:24 +0800714 ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
715 if (ret)
716 goto dev_destroy;
717
Kang Luwei322ddeb2018-06-30 08:53:21 +0800718 ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
719 if (ret)
720 goto feature_uinit;
721
722 return 0;
723
724feature_uinit:
725 dfl_fpga_dev_feature_uinit(pdev);
Kang Luwei29de7622018-06-30 08:53:24 +0800726dev_destroy:
727 fme_dev_destroy(pdev);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800728exit:
729 return ret;
730}
731
732static int fme_remove(struct platform_device *pdev)
733{
734 dfl_fpga_dev_ops_unregister(pdev);
735 dfl_fpga_dev_feature_uinit(pdev);
Kang Luwei29de7622018-06-30 08:53:24 +0800736 fme_dev_destroy(pdev);
Kang Luwei322ddeb2018-06-30 08:53:21 +0800737
738 return 0;
739}
740
Wu Haocb3c2c42019-08-12 10:50:03 +0800741static const struct attribute_group *fme_dev_groups[] = {
742 &fme_hdr_group,
743 &fme_global_err_group,
744 NULL
745};
746
Kang Luwei322ddeb2018-06-30 08:53:21 +0800747static struct platform_driver fme_driver = {
748 .driver = {
Wu Hao084c3ff2019-08-12 10:49:57 +0800749 .name = DFL_FPGA_FEATURE_DEV_FME,
Wu Haocb3c2c42019-08-12 10:50:03 +0800750 .dev_groups = fme_dev_groups,
Kang Luwei322ddeb2018-06-30 08:53:21 +0800751 },
752 .probe = fme_probe,
753 .remove = fme_remove,
754};
755
756module_platform_driver(fme_driver);
757
758MODULE_DESCRIPTION("FPGA Management Engine driver");
759MODULE_AUTHOR("Intel Corporation");
760MODULE_LICENSE("GPL v2");
761MODULE_ALIAS("platform:dfl-fme");