blob: c0512afc4ed71025d64f7178068cdd7c767d0021 [file] [log] [blame]
Wu Hao543be3d2018-06-30 08:53:13 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) Support
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
12 */
13#include <linux/module.h>
14
15#include "dfl.h"
16
17static DEFINE_MUTEX(dfl_id_mutex);
18
19/*
20 * when adding a new feature dev support in DFL framework, it's required to
21 * add a new item in enum dfl_id_type and provide related information in below
22 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
23 * platform device creation (define name strings in dfl.h, as they could be
24 * reused by platform device drivers).
Wu Haob16c5142018-06-30 08:53:14 +080025 *
26 * if the new feature dev needs chardev support, then it's required to add
27 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
28 * index to dfl_chardevs table. If no chardev support just set devt_type
29 * as one invalid index (DFL_FPGA_DEVT_MAX).
Wu Hao543be3d2018-06-30 08:53:13 +080030 */
31enum dfl_id_type {
32 FME_ID, /* fme id allocation and mapping */
33 PORT_ID, /* port id allocation and mapping */
34 DFL_ID_MAX,
35};
36
Wu Haob16c5142018-06-30 08:53:14 +080037enum dfl_fpga_devt_type {
38 DFL_FPGA_DEVT_FME,
39 DFL_FPGA_DEVT_PORT,
40 DFL_FPGA_DEVT_MAX,
41};
42
Scott Wooddfe3de82019-05-09 16:08:28 -050043static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
44
45static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
46 "dfl-fme-pdata",
47 "dfl-port-pdata",
48};
49
Wu Hao543be3d2018-06-30 08:53:13 +080050/**
51 * dfl_dev_info - dfl feature device information.
52 * @name: name string of the feature platform device.
53 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
54 * @id: idr id of the feature dev.
Wu Haob16c5142018-06-30 08:53:14 +080055 * @devt_type: index to dfl_chrdevs[].
Wu Hao543be3d2018-06-30 08:53:13 +080056 */
57struct dfl_dev_info {
58 const char *name;
59 u32 dfh_id;
60 struct idr id;
Wu Haob16c5142018-06-30 08:53:14 +080061 enum dfl_fpga_devt_type devt_type;
Wu Hao543be3d2018-06-30 08:53:13 +080062};
63
64/* it is indexed by dfl_id_type */
65static struct dfl_dev_info dfl_devs[] = {
Wu Haob16c5142018-06-30 08:53:14 +080066 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
67 .devt_type = DFL_FPGA_DEVT_FME},
68 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
69 .devt_type = DFL_FPGA_DEVT_PORT},
70};
71
72/**
73 * dfl_chardev_info - chardev information of dfl feature device
74 * @name: nmae string of the char device.
75 * @devt: devt of the char device.
76 */
77struct dfl_chardev_info {
78 const char *name;
79 dev_t devt;
80};
81
82/* indexed by enum dfl_fpga_devt_type */
83static struct dfl_chardev_info dfl_chrdevs[] = {
84 {.name = DFL_FPGA_FEATURE_DEV_FME},
85 {.name = DFL_FPGA_FEATURE_DEV_PORT},
Wu Hao543be3d2018-06-30 08:53:13 +080086};
87
88static void dfl_ids_init(void)
89{
90 int i;
91
92 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
93 idr_init(&dfl_devs[i].id);
94}
95
96static void dfl_ids_destroy(void)
97{
98 int i;
99
100 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
101 idr_destroy(&dfl_devs[i].id);
102}
103
104static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
105{
106 int id;
107
108 WARN_ON(type >= DFL_ID_MAX);
109 mutex_lock(&dfl_id_mutex);
110 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
111 mutex_unlock(&dfl_id_mutex);
112
113 return id;
114}
115
116static void dfl_id_free(enum dfl_id_type type, int id)
117{
118 WARN_ON(type >= DFL_ID_MAX);
119 mutex_lock(&dfl_id_mutex);
120 idr_remove(&dfl_devs[type].id, id);
121 mutex_unlock(&dfl_id_mutex);
122}
123
124static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
125{
126 int i;
127
128 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
129 if (!strcmp(dfl_devs[i].name, pdev->name))
130 return i;
131
132 return DFL_ID_MAX;
133}
134
135static enum dfl_id_type dfh_id_to_type(u32 id)
136{
137 int i;
138
139 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
140 if (dfl_devs[i].dfh_id == id)
141 return i;
142
143 return DFL_ID_MAX;
144}
145
Wu Hao6e8fd6e2018-06-30 08:53:17 +0800146/*
147 * introduce a global port_ops list, it allows port drivers to register ops
148 * in such list, then other feature devices (e.g. FME), could use the port
149 * functions even related port platform device is hidden. Below is one example,
150 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
151 * enabled, port (and it's AFU) is turned into VF and port platform device
152 * is hidden from system but it's still required to access port to finish FPGA
153 * reconfiguration function in FME.
154 */
155
156static DEFINE_MUTEX(dfl_port_ops_mutex);
157static LIST_HEAD(dfl_port_ops_list);
158
159/**
160 * dfl_fpga_port_ops_get - get matched port ops from the global list
161 * @pdev: platform device to match with associated port ops.
162 * Return: matched port ops on success, NULL otherwise.
163 *
164 * Please note that must dfl_fpga_port_ops_put after use the port_ops.
165 */
166struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
167{
168 struct dfl_fpga_port_ops *ops = NULL;
169
170 mutex_lock(&dfl_port_ops_mutex);
171 if (list_empty(&dfl_port_ops_list))
172 goto done;
173
174 list_for_each_entry(ops, &dfl_port_ops_list, node) {
175 /* match port_ops using the name of platform device */
176 if (!strcmp(pdev->name, ops->name)) {
177 if (!try_module_get(ops->owner))
178 ops = NULL;
179 goto done;
180 }
181 }
182
183 ops = NULL;
184done:
185 mutex_unlock(&dfl_port_ops_mutex);
186 return ops;
187}
188EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
189
190/**
191 * dfl_fpga_port_ops_put - put port ops
192 * @ops: port ops.
193 */
194void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
195{
196 if (ops && ops->owner)
197 module_put(ops->owner);
198}
199EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
200
201/**
202 * dfl_fpga_port_ops_add - add port_ops to global list
203 * @ops: port ops to add.
204 */
205void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
206{
207 mutex_lock(&dfl_port_ops_mutex);
208 list_add_tail(&ops->node, &dfl_port_ops_list);
209 mutex_unlock(&dfl_port_ops_mutex);
210}
211EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
212
213/**
214 * dfl_fpga_port_ops_del - remove port_ops from global list
215 * @ops: port ops to del.
216 */
217void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
218{
219 mutex_lock(&dfl_port_ops_mutex);
220 list_del(&ops->node);
221 mutex_unlock(&dfl_port_ops_mutex);
222}
223EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
224
Xiao Guangrong5b57d022018-06-30 08:53:16 +0800225/**
Wu Haod06b0042018-06-30 08:53:18 +0800226 * dfl_fpga_check_port_id - check the port id
227 * @pdev: port platform device.
228 * @pport_id: port id to compare.
229 *
230 * Return: 1 if port device matches with given port id, otherwise 0.
231 */
232int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
233{
Wu Hao69bb18d2019-08-04 18:20:11 +0800234 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
235 struct dfl_fpga_port_ops *port_ops;
Wu Haod06b0042018-06-30 08:53:18 +0800236
Wu Hao69bb18d2019-08-04 18:20:11 +0800237 if (pdata->id != FEATURE_DEV_ID_UNUSED)
238 return pdata->id == *(int *)pport_id;
239
240 port_ops = dfl_fpga_port_ops_get(pdev);
Wu Haod06b0042018-06-30 08:53:18 +0800241 if (!port_ops || !port_ops->get_id)
242 return 0;
243
Wu Hao69bb18d2019-08-04 18:20:11 +0800244 pdata->id = port_ops->get_id(pdev);
Wu Haod06b0042018-06-30 08:53:18 +0800245 dfl_fpga_port_ops_put(port_ops);
246
Wu Hao69bb18d2019-08-04 18:20:11 +0800247 return pdata->id == *(int *)pport_id;
Wu Haod06b0042018-06-30 08:53:18 +0800248}
249EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
250
251/**
Xiao Guangrong5b57d022018-06-30 08:53:16 +0800252 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
253 * @pdev: feature device.
254 */
255void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
256{
257 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
258 struct dfl_feature *feature;
259
260 dfl_fpga_dev_for_each_feature(pdata, feature)
261 if (feature->ops) {
Wu Hao3c51ff72019-08-04 18:20:18 +0800262 if (feature->ops->uinit)
263 feature->ops->uinit(pdev, feature);
Xiao Guangrong5b57d022018-06-30 08:53:16 +0800264 feature->ops = NULL;
265 }
266}
267EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
268
269static int dfl_feature_instance_init(struct platform_device *pdev,
270 struct dfl_feature_platform_data *pdata,
271 struct dfl_feature *feature,
272 struct dfl_feature_driver *drv)
273{
274 int ret;
275
276 ret = drv->ops->init(pdev, feature);
277 if (ret)
278 return ret;
279
280 feature->ops = drv->ops;
281
282 return ret;
283}
284
Wu Hao15bbb302019-08-04 18:20:15 +0800285static bool dfl_feature_drv_match(struct dfl_feature *feature,
286 struct dfl_feature_driver *driver)
287{
288 const struct dfl_feature_id *ids = driver->id_table;
289
290 if (ids) {
291 while (ids->id) {
292 if (ids->id == feature->id)
293 return true;
294 ids++;
295 }
296 }
297 return false;
298}
299
Xiao Guangrong5b57d022018-06-30 08:53:16 +0800300/**
301 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
302 * @pdev: feature device.
303 * @feature_drvs: drvs for sub features.
304 *
305 * This function will match sub features with given feature drvs list and
306 * use matched drv to init related sub feature.
307 *
308 * Return: 0 on success, negative error code otherwise.
309 */
310int dfl_fpga_dev_feature_init(struct platform_device *pdev,
311 struct dfl_feature_driver *feature_drvs)
312{
313 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
314 struct dfl_feature_driver *drv = feature_drvs;
315 struct dfl_feature *feature;
316 int ret;
317
318 while (drv->ops) {
319 dfl_fpga_dev_for_each_feature(pdata, feature) {
Wu Hao15bbb302019-08-04 18:20:15 +0800320 if (dfl_feature_drv_match(feature, drv)) {
Xiao Guangrong5b57d022018-06-30 08:53:16 +0800321 ret = dfl_feature_instance_init(pdev, pdata,
322 feature, drv);
323 if (ret)
324 goto exit;
325 }
326 }
327 drv++;
328 }
329
330 return 0;
331exit:
332 dfl_fpga_dev_feature_uinit(pdev);
333 return ret;
334}
335EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
336
Wu Haob16c5142018-06-30 08:53:14 +0800337static void dfl_chardev_uinit(void)
338{
339 int i;
340
341 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
342 if (MAJOR(dfl_chrdevs[i].devt)) {
343 unregister_chrdev_region(dfl_chrdevs[i].devt,
Chengguang Xude9a7f62019-05-09 16:08:29 -0500344 MINORMASK + 1);
Wu Haob16c5142018-06-30 08:53:14 +0800345 dfl_chrdevs[i].devt = MKDEV(0, 0);
346 }
347}
348
349static int dfl_chardev_init(void)
350{
351 int i, ret;
352
353 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
Chengguang Xude9a7f62019-05-09 16:08:29 -0500354 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
355 MINORMASK + 1, dfl_chrdevs[i].name);
Wu Haob16c5142018-06-30 08:53:14 +0800356 if (ret)
357 goto exit;
358 }
359
360 return 0;
361
362exit:
363 dfl_chardev_uinit();
364 return ret;
365}
366
367static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
368{
369 if (type >= DFL_FPGA_DEVT_MAX)
370 return 0;
371
372 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
373}
374
375/**
376 * dfl_fpga_dev_ops_register - register cdev ops for feature dev
377 *
378 * @pdev: feature dev.
379 * @fops: file operations for feature dev's cdev.
380 * @owner: owning module/driver.
381 *
382 * Return: 0 on success, negative error code otherwise.
383 */
384int dfl_fpga_dev_ops_register(struct platform_device *pdev,
385 const struct file_operations *fops,
386 struct module *owner)
387{
388 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
389
390 cdev_init(&pdata->cdev, fops);
391 pdata->cdev.owner = owner;
392
393 /*
394 * set parent to the feature device so that its refcount is
395 * decreased after the last refcount of cdev is gone, that
396 * makes sure the feature device is valid during device
397 * file's life-cycle.
398 */
399 pdata->cdev.kobj.parent = &pdev->dev.kobj;
400
401 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
402}
403EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
404
405/**
406 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
407 * @pdev: feature dev.
408 */
409void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
410{
411 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
412
413 cdev_del(&pdata->cdev);
414}
415EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
416
Wu Hao543be3d2018-06-30 08:53:13 +0800417/**
418 * struct build_feature_devs_info - info collected during feature dev build.
419 *
420 * @dev: device to enumerate.
421 * @cdev: the container device for all feature devices.
422 * @feature_dev: current feature device.
423 * @ioaddr: header register region address of feature device in enumeration.
424 * @sub_features: a sub features linked list for feature device in enumeration.
425 * @feature_num: number of sub features for feature device in enumeration.
426 */
427struct build_feature_devs_info {
428 struct device *dev;
429 struct dfl_fpga_cdev *cdev;
430 struct platform_device *feature_dev;
431 void __iomem *ioaddr;
432 struct list_head sub_features;
433 int feature_num;
434};
435
436/**
437 * struct dfl_feature_info - sub feature info collected during feature dev build
438 *
439 * @fid: id of this sub feature.
440 * @mmio_res: mmio resource of this sub feature.
441 * @ioaddr: mapped base address of mmio resource.
442 * @node: node in sub_features linked list.
443 */
444struct dfl_feature_info {
445 u64 fid;
446 struct resource mmio_res;
447 void __iomem *ioaddr;
448 struct list_head node;
449};
450
451static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
452 struct platform_device *port)
453{
454 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
455
456 mutex_lock(&cdev->lock);
457 list_add(&pdata->node, &cdev->port_dev_list);
458 get_device(&pdata->dev->dev);
459 mutex_unlock(&cdev->lock);
460}
461
462/*
463 * register current feature device, it is called when we need to switch to
464 * another feature parsing or we have parsed all features on given device
465 * feature list.
466 */
467static int build_info_commit_dev(struct build_feature_devs_info *binfo)
468{
469 struct platform_device *fdev = binfo->feature_dev;
470 struct dfl_feature_platform_data *pdata;
471 struct dfl_feature_info *finfo, *p;
Scott Wooddfe3de82019-05-09 16:08:28 -0500472 enum dfl_id_type type;
Wu Hao543be3d2018-06-30 08:53:13 +0800473 int ret, index = 0;
474
475 if (!fdev)
476 return 0;
477
Scott Wooddfe3de82019-05-09 16:08:28 -0500478 type = feature_dev_id_type(fdev);
479 if (WARN_ON_ONCE(type >= DFL_ID_MAX))
480 return -EINVAL;
481
Wu Hao543be3d2018-06-30 08:53:13 +0800482 /*
483 * we do not need to care for the memory which is associated with
484 * the platform device. After calling platform_device_unregister(),
485 * it will be automatically freed by device's release() callback,
486 * platform_device_release().
487 */
488 pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
489 GFP_KERNEL);
490 if (!pdata)
491 return -ENOMEM;
492
493 pdata->dev = fdev;
494 pdata->num = binfo->feature_num;
495 pdata->dfl_cdev = binfo->cdev;
Wu Hao69bb18d2019-08-04 18:20:11 +0800496 pdata->id = FEATURE_DEV_ID_UNUSED;
Wu Hao543be3d2018-06-30 08:53:13 +0800497 mutex_init(&pdata->lock);
Scott Wooddfe3de82019-05-09 16:08:28 -0500498 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
499 dfl_pdata_key_strings[type]);
Wu Hao543be3d2018-06-30 08:53:13 +0800500
501 /*
502 * the count should be initialized to 0 to make sure
503 *__fpga_port_enable() following __fpga_port_disable()
504 * works properly for port device.
505 * and it should always be 0 for fme device.
506 */
507 WARN_ON(pdata->disable_count);
508
509 fdev->dev.platform_data = pdata;
510
511 /* each sub feature has one MMIO resource */
512 fdev->num_resources = binfo->feature_num;
513 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
514 GFP_KERNEL);
515 if (!fdev->resource)
516 return -ENOMEM;
517
518 /* fill features and resource information for feature dev */
519 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
520 struct dfl_feature *feature = &pdata->features[index];
521
522 /* save resource information for each feature */
523 feature->id = finfo->fid;
524 feature->resource_index = index;
525 feature->ioaddr = finfo->ioaddr;
526 fdev->resource[index++] = finfo->mmio_res;
527
528 list_del(&finfo->node);
529 kfree(finfo);
530 }
531
532 ret = platform_device_add(binfo->feature_dev);
533 if (!ret) {
Scott Wooddfe3de82019-05-09 16:08:28 -0500534 if (type == PORT_ID)
Wu Hao543be3d2018-06-30 08:53:13 +0800535 dfl_fpga_cdev_add_port_dev(binfo->cdev,
536 binfo->feature_dev);
537 else
538 binfo->cdev->fme_dev =
539 get_device(&binfo->feature_dev->dev);
540 /*
541 * reset it to avoid build_info_free() freeing their resource.
542 *
543 * The resource of successfully registered feature devices
544 * will be freed by platform_device_unregister(). See the
545 * comments in build_info_create_dev().
546 */
547 binfo->feature_dev = NULL;
548 }
549
550 return ret;
551}
552
553static int
554build_info_create_dev(struct build_feature_devs_info *binfo,
555 enum dfl_id_type type, void __iomem *ioaddr)
556{
557 struct platform_device *fdev;
558 int ret;
559
560 if (type >= DFL_ID_MAX)
561 return -EINVAL;
562
563 /* we will create a new device, commit current device first */
564 ret = build_info_commit_dev(binfo);
565 if (ret)
566 return ret;
567
568 /*
569 * we use -ENODEV as the initialization indicator which indicates
570 * whether the id need to be reclaimed
571 */
572 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
573 if (!fdev)
574 return -ENOMEM;
575
576 binfo->feature_dev = fdev;
577 binfo->feature_num = 0;
578 binfo->ioaddr = ioaddr;
579 INIT_LIST_HEAD(&binfo->sub_features);
580
581 fdev->id = dfl_id_alloc(type, &fdev->dev);
582 if (fdev->id < 0)
583 return fdev->id;
584
585 fdev->dev.parent = &binfo->cdev->region->dev;
Wu Haob16c5142018-06-30 08:53:14 +0800586 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
Wu Hao543be3d2018-06-30 08:53:13 +0800587
588 return 0;
589}
590
591static void build_info_free(struct build_feature_devs_info *binfo)
592{
593 struct dfl_feature_info *finfo, *p;
594
595 /*
596 * it is a valid id, free it. See comments in
597 * build_info_create_dev()
598 */
599 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
600 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
601 binfo->feature_dev->id);
602
603 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
604 list_del(&finfo->node);
605 kfree(finfo);
606 }
607 }
608
609 platform_device_put(binfo->feature_dev);
610
611 devm_kfree(binfo->dev, binfo);
612}
613
614static inline u32 feature_size(void __iomem *start)
615{
616 u64 v = readq(start + DFH);
617 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
618 /* workaround for private features with invalid size, use 4K instead */
619 return ofst ? ofst : 4096;
620}
621
622static u64 feature_id(void __iomem *start)
623{
624 u64 v = readq(start + DFH);
625 u16 id = FIELD_GET(DFH_ID, v);
626 u8 type = FIELD_GET(DFH_TYPE, v);
627
628 if (type == DFH_TYPE_FIU)
629 return FEATURE_ID_FIU_HEADER;
630 else if (type == DFH_TYPE_PRIVATE)
631 return id;
632 else if (type == DFH_TYPE_AFU)
633 return FEATURE_ID_AFU;
634
635 WARN_ON(1);
636 return 0;
637}
638
639/*
640 * when create sub feature instances, for private features, it doesn't need
641 * to provide resource size and feature id as they could be read from DFH
642 * register. For afu sub feature, its register region only contains user
643 * defined registers, so never trust any information from it, just use the
644 * resource size information provided by its parent FIU.
645 */
646static int
647create_feature_instance(struct build_feature_devs_info *binfo,
648 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
649 resource_size_t size, u64 fid)
650{
651 struct dfl_feature_info *finfo;
652
653 /* read feature size and id if inputs are invalid */
654 size = size ? size : feature_size(dfl->ioaddr + ofst);
655 fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
656
657 if (dfl->len - ofst < size)
658 return -EINVAL;
659
660 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
661 if (!finfo)
662 return -ENOMEM;
663
664 finfo->fid = fid;
665 finfo->mmio_res.start = dfl->start + ofst;
666 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
667 finfo->mmio_res.flags = IORESOURCE_MEM;
668 finfo->ioaddr = dfl->ioaddr + ofst;
669
670 list_add_tail(&finfo->node, &binfo->sub_features);
671 binfo->feature_num++;
672
673 return 0;
674}
675
676static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
677 struct dfl_fpga_enum_dfl *dfl,
678 resource_size_t ofst)
679{
680 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
681 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
682
683 WARN_ON(!size);
684
685 return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
686}
687
688static int parse_feature_afu(struct build_feature_devs_info *binfo,
689 struct dfl_fpga_enum_dfl *dfl,
690 resource_size_t ofst)
691{
692 if (!binfo->feature_dev) {
693 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
694 return -EINVAL;
695 }
696
697 switch (feature_dev_id_type(binfo->feature_dev)) {
698 case PORT_ID:
699 return parse_feature_port_afu(binfo, dfl, ofst);
700 default:
701 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
702 binfo->feature_dev->name);
703 }
704
705 return 0;
706}
707
708static int parse_feature_fiu(struct build_feature_devs_info *binfo,
709 struct dfl_fpga_enum_dfl *dfl,
710 resource_size_t ofst)
711{
712 u32 id, offset;
713 u64 v;
714 int ret = 0;
715
716 v = readq(dfl->ioaddr + ofst + DFH);
717 id = FIELD_GET(DFH_ID, v);
718
719 /* create platform device for dfl feature dev */
720 ret = build_info_create_dev(binfo, dfh_id_to_type(id),
721 dfl->ioaddr + ofst);
722 if (ret)
723 return ret;
724
725 ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
726 if (ret)
727 return ret;
728 /*
729 * find and parse FIU's child AFU via its NEXT_AFU register.
730 * please note that only Port has valid NEXT_AFU pointer per spec.
731 */
732 v = readq(dfl->ioaddr + ofst + NEXT_AFU);
733
734 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
735 if (offset)
736 return parse_feature_afu(binfo, dfl, ofst + offset);
737
738 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
739
740 return ret;
741}
742
743static int parse_feature_private(struct build_feature_devs_info *binfo,
744 struct dfl_fpga_enum_dfl *dfl,
745 resource_size_t ofst)
746{
747 if (!binfo->feature_dev) {
748 dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
749 (unsigned long long)feature_id(dfl->ioaddr + ofst));
750 return -EINVAL;
751 }
752
753 return create_feature_instance(binfo, dfl, ofst, 0, 0);
754}
755
756/**
757 * parse_feature - parse a feature on given device feature list
758 *
759 * @binfo: build feature devices information.
760 * @dfl: device feature list to parse
761 * @ofst: offset to feature header on this device feature list
762 */
763static int parse_feature(struct build_feature_devs_info *binfo,
764 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
765{
766 u64 v;
767 u32 type;
768
769 v = readq(dfl->ioaddr + ofst + DFH);
770 type = FIELD_GET(DFH_TYPE, v);
771
772 switch (type) {
773 case DFH_TYPE_AFU:
774 return parse_feature_afu(binfo, dfl, ofst);
775 case DFH_TYPE_PRIVATE:
776 return parse_feature_private(binfo, dfl, ofst);
777 case DFH_TYPE_FIU:
778 return parse_feature_fiu(binfo, dfl, ofst);
779 default:
780 dev_info(binfo->dev,
781 "Feature Type %x is not supported.\n", type);
782 }
783
784 return 0;
785}
786
787static int parse_feature_list(struct build_feature_devs_info *binfo,
788 struct dfl_fpga_enum_dfl *dfl)
789{
790 void __iomem *start = dfl->ioaddr;
791 void __iomem *end = dfl->ioaddr + dfl->len;
792 int ret = 0;
793 u32 ofst = 0;
794 u64 v;
795
796 /* walk through the device feature list via DFH's next DFH pointer. */
797 for (; start < end; start += ofst) {
798 if (end - start < DFH_SIZE) {
799 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
800 return -EINVAL;
801 }
802
803 ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
804 if (ret)
805 return ret;
806
807 v = readq(start + DFH);
808 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
809
810 /* stop parsing if EOL(End of List) is set or offset is 0 */
811 if ((v & DFH_EOL) || !ofst)
812 break;
813 }
814
815 /* commit current feature device when reach the end of list */
816 return build_info_commit_dev(binfo);
817}
818
819struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
820{
821 struct dfl_fpga_enum_info *info;
822
823 get_device(dev);
824
825 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
826 if (!info) {
827 put_device(dev);
828 return NULL;
829 }
830
831 info->dev = dev;
832 INIT_LIST_HEAD(&info->dfls);
833
834 return info;
835}
836EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
837
838void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
839{
840 struct dfl_fpga_enum_dfl *tmp, *dfl;
841 struct device *dev;
842
843 if (!info)
844 return;
845
846 dev = info->dev;
847
848 /* remove all device feature lists in the list. */
849 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
850 list_del(&dfl->node);
851 devm_kfree(dev, dfl);
852 }
853
854 devm_kfree(dev, info);
855 put_device(dev);
856}
857EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
858
859/**
860 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
861 *
862 * @info: ptr to dfl_fpga_enum_info
863 * @start: mmio resource address of the device feature list.
864 * @len: mmio resource length of the device feature list.
865 * @ioaddr: mapped mmio resource address of the device feature list.
866 *
867 * One FPGA device may have one or more Device Feature Lists (DFLs), use this
868 * function to add information of each DFL to common data structure for next
869 * step enumeration.
870 *
871 * Return: 0 on success, negative error code otherwise.
872 */
873int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
874 resource_size_t start, resource_size_t len,
875 void __iomem *ioaddr)
876{
877 struct dfl_fpga_enum_dfl *dfl;
878
879 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
880 if (!dfl)
881 return -ENOMEM;
882
883 dfl->start = start;
884 dfl->len = len;
885 dfl->ioaddr = ioaddr;
886
887 list_add_tail(&dfl->node, &info->dfls);
888
889 return 0;
890}
891EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
892
893static int remove_feature_dev(struct device *dev, void *data)
894{
895 struct platform_device *pdev = to_platform_device(dev);
896 enum dfl_id_type type = feature_dev_id_type(pdev);
897 int id = pdev->id;
898
899 platform_device_unregister(pdev);
900
901 dfl_id_free(type, id);
902
903 return 0;
904}
905
906static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
907{
908 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
909}
910
911/**
912 * dfl_fpga_feature_devs_enumerate - enumerate feature devices
913 * @info: information for enumeration.
914 *
915 * This function creates a container device (base FPGA region), enumerates
916 * feature devices based on the enumeration info and creates platform devices
917 * under the container device.
918 *
919 * Return: dfl_fpga_cdev struct on success, -errno on failure
920 */
921struct dfl_fpga_cdev *
922dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
923{
924 struct build_feature_devs_info *binfo;
925 struct dfl_fpga_enum_dfl *dfl;
926 struct dfl_fpga_cdev *cdev;
927 int ret = 0;
928
929 if (!info->dev)
930 return ERR_PTR(-ENODEV);
931
932 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
933 if (!cdev)
934 return ERR_PTR(-ENOMEM);
935
Alan Tullfea82b72018-10-15 17:20:03 -0500936 cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
Wu Hao543be3d2018-06-30 08:53:13 +0800937 if (!cdev->region) {
938 ret = -ENOMEM;
939 goto free_cdev_exit;
940 }
941
942 cdev->parent = info->dev;
943 mutex_init(&cdev->lock);
944 INIT_LIST_HEAD(&cdev->port_dev_list);
945
946 ret = fpga_region_register(cdev->region);
947 if (ret)
Alan Tullfea82b72018-10-15 17:20:03 -0500948 goto free_cdev_exit;
Wu Hao543be3d2018-06-30 08:53:13 +0800949
950 /* create and init build info for enumeration */
951 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
952 if (!binfo) {
953 ret = -ENOMEM;
954 goto unregister_region_exit;
955 }
956
957 binfo->dev = info->dev;
958 binfo->cdev = cdev;
959
960 /*
961 * start enumeration for all feature devices based on Device Feature
962 * Lists.
963 */
964 list_for_each_entry(dfl, &info->dfls, node) {
965 ret = parse_feature_list(binfo, dfl);
966 if (ret) {
967 remove_feature_devs(cdev);
968 build_info_free(binfo);
969 goto unregister_region_exit;
970 }
971 }
972
973 build_info_free(binfo);
974
975 return cdev;
976
977unregister_region_exit:
978 fpga_region_unregister(cdev->region);
Wu Hao543be3d2018-06-30 08:53:13 +0800979free_cdev_exit:
980 devm_kfree(info->dev, cdev);
981 return ERR_PTR(ret);
982}
983EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
984
985/**
986 * dfl_fpga_feature_devs_remove - remove all feature devices
987 * @cdev: fpga container device.
988 *
989 * Remove the container device and all feature devices under given container
990 * devices.
991 */
992void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
993{
994 struct dfl_feature_platform_data *pdata, *ptmp;
995
Wu Hao543be3d2018-06-30 08:53:13 +0800996 mutex_lock(&cdev->lock);
Wu Hao69bb18d2019-08-04 18:20:11 +0800997 if (cdev->fme_dev)
Wu Hao543be3d2018-06-30 08:53:13 +0800998 put_device(cdev->fme_dev);
Wu Hao543be3d2018-06-30 08:53:13 +0800999
1000 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
1001 struct platform_device *port_dev = pdata->dev;
1002
Wu Hao69bb18d2019-08-04 18:20:11 +08001003 /* remove released ports */
1004 if (!device_is_registered(&port_dev->dev)) {
1005 dfl_id_free(feature_dev_id_type(port_dev),
1006 port_dev->id);
1007 platform_device_put(port_dev);
1008 }
1009
Wu Hao543be3d2018-06-30 08:53:13 +08001010 list_del(&pdata->node);
1011 put_device(&port_dev->dev);
1012 }
1013 mutex_unlock(&cdev->lock);
1014
Wu Hao69bb18d2019-08-04 18:20:11 +08001015 remove_feature_devs(cdev);
1016
Wu Hao543be3d2018-06-30 08:53:13 +08001017 fpga_region_unregister(cdev->region);
1018 devm_kfree(cdev->parent, cdev);
1019}
1020EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1021
Wu Hao5d56e112018-06-30 08:53:15 +08001022/**
1023 * __dfl_fpga_cdev_find_port - find a port under given container device
1024 *
1025 * @cdev: container device
1026 * @data: data passed to match function
1027 * @match: match function used to find specific port from the port device list
1028 *
1029 * Find a port device under container device. This function needs to be
1030 * invoked with lock held.
1031 *
1032 * Return: pointer to port's platform device if successful, NULL otherwise.
1033 *
1034 * NOTE: you will need to drop the device reference with put_device() after use.
1035 */
1036struct platform_device *
1037__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1038 int (*match)(struct platform_device *, void *))
1039{
1040 struct dfl_feature_platform_data *pdata;
1041 struct platform_device *port_dev;
1042
1043 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1044 port_dev = pdata->dev;
1045
1046 if (match(port_dev, data) && get_device(&port_dev->dev))
1047 return port_dev;
1048 }
1049
1050 return NULL;
1051}
1052EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1053
Wu Hao543be3d2018-06-30 08:53:13 +08001054static int __init dfl_fpga_init(void)
1055{
Wu Haob16c5142018-06-30 08:53:14 +08001056 int ret;
1057
Wu Hao543be3d2018-06-30 08:53:13 +08001058 dfl_ids_init();
1059
Wu Haob16c5142018-06-30 08:53:14 +08001060 ret = dfl_chardev_init();
1061 if (ret)
1062 dfl_ids_destroy();
1063
1064 return ret;
Wu Hao543be3d2018-06-30 08:53:13 +08001065}
1066
Wu Hao69bb18d2019-08-04 18:20:11 +08001067/**
1068 * dfl_fpga_cdev_release_port - release a port platform device
1069 *
1070 * @cdev: parent container device.
1071 * @port_id: id of the port platform device.
1072 *
1073 * This function allows user to release a port platform device. This is a
1074 * mandatory step before turn a port from PF into VF for SRIOV support.
1075 *
1076 * Return: 0 on success, negative error code otherwise.
1077 */
1078int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1079{
1080 struct platform_device *port_pdev;
1081 int ret = -ENODEV;
1082
1083 mutex_lock(&cdev->lock);
1084 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1085 dfl_fpga_check_port_id);
1086 if (!port_pdev)
1087 goto unlock_exit;
1088
1089 if (!device_is_registered(&port_pdev->dev)) {
1090 ret = -EBUSY;
1091 goto put_dev_exit;
1092 }
1093
1094 ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
1095 if (ret)
1096 goto put_dev_exit;
1097
1098 platform_device_del(port_pdev);
1099 cdev->released_port_num++;
1100put_dev_exit:
1101 put_device(&port_pdev->dev);
1102unlock_exit:
1103 mutex_unlock(&cdev->lock);
1104 return ret;
1105}
1106EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1107
1108/**
1109 * dfl_fpga_cdev_assign_port - assign a port platform device back
1110 *
1111 * @cdev: parent container device.
1112 * @port_id: id of the port platform device.
1113 *
1114 * This function allows user to assign a port platform device back. This is
1115 * a mandatory step after disable SRIOV support.
1116 *
1117 * Return: 0 on success, negative error code otherwise.
1118 */
1119int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1120{
1121 struct platform_device *port_pdev;
1122 int ret = -ENODEV;
1123
1124 mutex_lock(&cdev->lock);
1125 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1126 dfl_fpga_check_port_id);
1127 if (!port_pdev)
1128 goto unlock_exit;
1129
1130 if (device_is_registered(&port_pdev->dev)) {
1131 ret = -EBUSY;
1132 goto put_dev_exit;
1133 }
1134
1135 ret = platform_device_add(port_pdev);
1136 if (ret)
1137 goto put_dev_exit;
1138
1139 dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
1140 cdev->released_port_num--;
1141put_dev_exit:
1142 put_device(&port_pdev->dev);
1143unlock_exit:
1144 mutex_unlock(&cdev->lock);
1145 return ret;
1146}
1147EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1148
Wu Haobdd4f302019-08-04 18:20:12 +08001149static void config_port_access_mode(struct device *fme_dev, int port_id,
1150 bool is_vf)
1151{
1152 void __iomem *base;
1153 u64 v;
1154
1155 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1156
1157 v = readq(base + FME_HDR_PORT_OFST(port_id));
1158
1159 v &= ~FME_PORT_OFST_ACC_CTRL;
1160 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1161 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1162
1163 writeq(v, base + FME_HDR_PORT_OFST(port_id));
1164}
1165
1166#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1167#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1168
1169/**
1170 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1171 *
1172 * @cdev: parent container device.
1173 *
1174 * This function is needed in sriov configuration routine. It could be used to
1175 * configure the all released ports from VF access mode to PF.
1176 */
1177void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1178{
1179 struct dfl_feature_platform_data *pdata;
1180
1181 mutex_lock(&cdev->lock);
1182 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1183 if (device_is_registered(&pdata->dev->dev))
1184 continue;
1185
1186 config_port_pf_mode(cdev->fme_dev, pdata->id);
1187 }
1188 mutex_unlock(&cdev->lock);
1189}
1190EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1191
1192/**
1193 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1194 *
1195 * @cdev: parent container device.
1196 * @num_vfs: VF device number.
1197 *
1198 * This function is needed in sriov configuration routine. It could be used to
1199 * configure the released ports from PF access mode to VF.
1200 *
1201 * Return: 0 on success, negative error code otherwise.
1202 */
1203int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1204{
1205 struct dfl_feature_platform_data *pdata;
1206 int ret = 0;
1207
1208 mutex_lock(&cdev->lock);
1209 /*
1210 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1211 * device, so if released port number doesn't match VF device number,
1212 * then reject the request with -EINVAL error code.
1213 */
1214 if (cdev->released_port_num != num_vfs) {
1215 ret = -EINVAL;
1216 goto done;
1217 }
1218
1219 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1220 if (device_is_registered(&pdata->dev->dev))
1221 continue;
1222
1223 config_port_vf_mode(cdev->fme_dev, pdata->id);
1224 }
1225done:
1226 mutex_unlock(&cdev->lock);
1227 return ret;
1228}
1229EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1230
Wu Hao543be3d2018-06-30 08:53:13 +08001231static void __exit dfl_fpga_exit(void)
1232{
Wu Haob16c5142018-06-30 08:53:14 +08001233 dfl_chardev_uinit();
Wu Hao543be3d2018-06-30 08:53:13 +08001234 dfl_ids_destroy();
1235}
1236
1237module_init(dfl_fpga_init);
1238module_exit(dfl_fpga_exit);
1239
1240MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1241MODULE_AUTHOR("Intel Corporation");
1242MODULE_LICENSE("GPL v2");