blob: 8091d998549a68ae854f821a2bf0d54d0d694758 [file] [log] [blame]
Georgi Djakov11f1cec2019-01-16 18:10:56 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Interconnect framework core driver
4 *
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
Georgi Djakov3697ff42019-01-16 18:10:59 +02009#include <linux/debugfs.h>
Georgi Djakov11f1cec2019-01-16 18:10:56 +020010#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/init.h>
13#include <linux/interconnect.h>
14#include <linux/interconnect-provider.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
Georgi Djakov87e30312019-01-16 18:10:58 +020019#include <linux/of.h>
Georgi Djakov11f1cec2019-01-16 18:10:56 +020020#include <linux/overflow.h>
21
22static DEFINE_IDR(icc_idr);
23static LIST_HEAD(icc_providers);
24static DEFINE_MUTEX(icc_lock);
Georgi Djakov3697ff42019-01-16 18:10:59 +020025static struct dentry *icc_debugfs_dir;
Georgi Djakov11f1cec2019-01-16 18:10:56 +020026
27/**
28 * struct icc_req - constraints that are attached to each node
29 * @req_node: entry in list of requests for the particular @node
30 * @node: the interconnect node to which this constraint applies
31 * @dev: reference to the device that sets the constraints
32 * @avg_bw: an integer describing the average bandwidth in kBps
33 * @peak_bw: an integer describing the peak bandwidth in kBps
34 */
35struct icc_req {
36 struct hlist_node req_node;
37 struct icc_node *node;
38 struct device *dev;
39 u32 avg_bw;
40 u32 peak_bw;
41};
42
43/**
44 * struct icc_path - interconnect path structure
45 * @num_nodes: number of hops (nodes)
46 * @reqs: array of the requests applicable to this path of nodes
47 */
48struct icc_path {
49 size_t num_nodes;
50 struct icc_req reqs[];
51};
52
Georgi Djakov3697ff42019-01-16 18:10:59 +020053static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
54{
55 if (!n)
56 return;
57
58 seq_printf(s, "%-30s %12u %12u\n",
59 n->name, n->avg_bw, n->peak_bw);
60}
61
62static int icc_summary_show(struct seq_file *s, void *data)
63{
64 struct icc_provider *provider;
65
66 seq_puts(s, " node avg peak\n");
67 seq_puts(s, "--------------------------------------------------------\n");
68
69 mutex_lock(&icc_lock);
70
71 list_for_each_entry(provider, &icc_providers, provider_list) {
72 struct icc_node *n;
73
74 list_for_each_entry(n, &provider->nodes, node_list) {
75 struct icc_req *r;
76
77 icc_summary_show_one(s, n);
78 hlist_for_each_entry(r, &n->req_list, req_node) {
79 if (!r->dev)
80 continue;
81
82 seq_printf(s, " %-26s %12u %12u\n",
83 dev_name(r->dev), r->avg_bw,
84 r->peak_bw);
85 }
86 }
87 }
88
89 mutex_unlock(&icc_lock);
90
91 return 0;
92}
93
94static int icc_summary_open(struct inode *inode, struct file *file)
95{
96 return single_open(file, icc_summary_show, inode->i_private);
97}
98
99static const struct file_operations icc_summary_fops = {
100 .open = icc_summary_open,
101 .read = seq_read,
102 .llseek = seq_lseek,
103 .release = single_release,
104};
105
Georgi Djakov11f1cec2019-01-16 18:10:56 +0200106static struct icc_node *node_find(const int id)
107{
108 return idr_find(&icc_idr, id);
109}
110
111static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
112 ssize_t num_nodes)
113{
114 struct icc_node *node = dst;
115 struct icc_path *path;
116 int i;
117
118 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
119 if (!path)
120 return ERR_PTR(-ENOMEM);
121
122 path->num_nodes = num_nodes;
123
124 for (i = num_nodes - 1; i >= 0; i--) {
125 node->provider->users++;
126 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
127 path->reqs[i].node = node;
128 path->reqs[i].dev = dev;
129 /* reference to previous node was saved during path traversal */
130 node = node->reverse;
131 }
132
133 return path;
134}
135
136static struct icc_path *path_find(struct device *dev, struct icc_node *src,
137 struct icc_node *dst)
138{
139 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
140 struct icc_node *n, *node = NULL;
141 struct list_head traverse_list;
142 struct list_head edge_list;
143 struct list_head visited_list;
144 size_t i, depth = 1;
145 bool found = false;
146
147 INIT_LIST_HEAD(&traverse_list);
148 INIT_LIST_HEAD(&edge_list);
149 INIT_LIST_HEAD(&visited_list);
150
151 list_add(&src->search_list, &traverse_list);
152 src->reverse = NULL;
153
154 do {
155 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
156 if (node == dst) {
157 found = true;
158 list_splice_init(&edge_list, &visited_list);
159 list_splice_init(&traverse_list, &visited_list);
160 break;
161 }
162 for (i = 0; i < node->num_links; i++) {
163 struct icc_node *tmp = node->links[i];
164
165 if (!tmp) {
166 path = ERR_PTR(-ENOENT);
167 goto out;
168 }
169
170 if (tmp->is_traversed)
171 continue;
172
173 tmp->is_traversed = true;
174 tmp->reverse = node;
175 list_add_tail(&tmp->search_list, &edge_list);
176 }
177 }
178
179 if (found)
180 break;
181
182 list_splice_init(&traverse_list, &visited_list);
183 list_splice_init(&edge_list, &traverse_list);
184
185 /* count the hops including the source */
186 depth++;
187
188 } while (!list_empty(&traverse_list));
189
190out:
191
192 /* reset the traversed state */
193 list_for_each_entry_reverse(n, &visited_list, search_list)
194 n->is_traversed = false;
195
196 if (found)
197 path = path_init(dev, dst, depth);
198
199 return path;
200}
201
202/*
203 * We want the path to honor all bandwidth requests, so the average and peak
204 * bandwidth requirements from each consumer are aggregated at each node.
205 * The aggregation is platform specific, so each platform can customize it by
206 * implementing its own aggregate() function.
207 */
208
209static int aggregate_requests(struct icc_node *node)
210{
211 struct icc_provider *p = node->provider;
212 struct icc_req *r;
213
214 node->avg_bw = 0;
215 node->peak_bw = 0;
216
217 hlist_for_each_entry(r, &node->req_list, req_node)
218 p->aggregate(node, r->avg_bw, r->peak_bw,
219 &node->avg_bw, &node->peak_bw);
220
221 return 0;
222}
223
224static int apply_constraints(struct icc_path *path)
225{
226 struct icc_node *next, *prev = NULL;
227 int ret = -EINVAL;
228 int i;
229
230 for (i = 0; i < path->num_nodes; i++) {
231 next = path->reqs[i].node;
232
233 /*
234 * Both endpoints should be valid master-slave pairs of the
235 * same interconnect provider that will be configured.
236 */
237 if (!prev || next->provider != prev->provider) {
238 prev = next;
239 continue;
240 }
241
242 /* set the constraints */
243 ret = next->provider->set(prev, next);
244 if (ret)
245 goto out;
246
247 prev = next;
248 }
249out:
250 return ret;
251}
252
Georgi Djakov87e30312019-01-16 18:10:58 +0200253/* of_icc_xlate_onecell() - Translate function using a single index.
254 * @spec: OF phandle args to map into an interconnect node.
255 * @data: private data (pointer to struct icc_onecell_data)
256 *
257 * This is a generic translate function that can be used to model simple
258 * interconnect providers that have one device tree node and provide
259 * multiple interconnect nodes. A single cell is used as an index into
260 * an array of icc nodes specified in the icc_onecell_data struct when
261 * registering the provider.
262 */
263struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
264 void *data)
265{
266 struct icc_onecell_data *icc_data = data;
267 unsigned int idx = spec->args[0];
268
269 if (idx >= icc_data->num_nodes) {
270 pr_err("%s: invalid index %u\n", __func__, idx);
271 return ERR_PTR(-EINVAL);
272 }
273
274 return icc_data->nodes[idx];
275}
276EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
277
278/**
279 * of_icc_get_from_provider() - Look-up interconnect node
280 * @spec: OF phandle args to use for look-up
281 *
282 * Looks for interconnect provider under the node specified by @spec and if
283 * found, uses xlate function of the provider to map phandle args to node.
284 *
285 * Returns a valid pointer to struct icc_node on success or ERR_PTR()
286 * on failure.
287 */
288static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
289{
290 struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
291 struct icc_provider *provider;
292
293 if (!spec || spec->args_count != 1)
294 return ERR_PTR(-EINVAL);
295
296 mutex_lock(&icc_lock);
297 list_for_each_entry(provider, &icc_providers, provider_list) {
298 if (provider->dev->of_node == spec->np)
299 node = provider->xlate(spec, provider->data);
300 if (!IS_ERR(node))
301 break;
302 }
303 mutex_unlock(&icc_lock);
304
305 return node;
306}
307
308/**
309 * of_icc_get() - get a path handle from a DT node based on name
310 * @dev: device pointer for the consumer device
311 * @name: interconnect path name
312 *
313 * This function will search for a path between two endpoints and return an
314 * icc_path handle on success. Use icc_put() to release constraints when they
315 * are not needed anymore.
316 * If the interconnect API is disabled, NULL is returned and the consumer
317 * drivers will still build. Drivers are free to handle this specifically,
318 * but they don't have to.
319 *
320 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
321 * when the API is disabled or the "interconnects" DT property is missing.
322 */
323struct icc_path *of_icc_get(struct device *dev, const char *name)
324{
325 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
326 struct icc_node *src_node, *dst_node;
327 struct device_node *np = NULL;
328 struct of_phandle_args src_args, dst_args;
329 int idx = 0;
330 int ret;
331
332 if (!dev || !dev->of_node)
333 return ERR_PTR(-ENODEV);
334
335 np = dev->of_node;
336
337 /*
338 * When the consumer DT node do not have "interconnects" property
339 * return a NULL path to skip setting constraints.
340 */
341 if (!of_find_property(np, "interconnects", NULL))
342 return NULL;
343
344 /*
345 * We use a combination of phandle and specifier for endpoint. For now
346 * lets support only global ids and extend this in the future if needed
347 * without breaking DT compatibility.
348 */
349 if (name) {
350 idx = of_property_match_string(np, "interconnect-names", name);
351 if (idx < 0)
352 return ERR_PTR(idx);
353 }
354
355 ret = of_parse_phandle_with_args(np, "interconnects",
356 "#interconnect-cells", idx * 2,
357 &src_args);
358 if (ret)
359 return ERR_PTR(ret);
360
361 of_node_put(src_args.np);
362
363 ret = of_parse_phandle_with_args(np, "interconnects",
364 "#interconnect-cells", idx * 2 + 1,
365 &dst_args);
366 if (ret)
367 return ERR_PTR(ret);
368
369 of_node_put(dst_args.np);
370
371 src_node = of_icc_get_from_provider(&src_args);
372
373 if (IS_ERR(src_node)) {
374 if (PTR_ERR(src_node) != -EPROBE_DEFER)
375 dev_err(dev, "error finding src node: %ld\n",
376 PTR_ERR(src_node));
377 return ERR_CAST(src_node);
378 }
379
380 dst_node = of_icc_get_from_provider(&dst_args);
381
382 if (IS_ERR(dst_node)) {
383 if (PTR_ERR(dst_node) != -EPROBE_DEFER)
384 dev_err(dev, "error finding dst node: %ld\n",
385 PTR_ERR(dst_node));
386 return ERR_CAST(dst_node);
387 }
388
389 mutex_lock(&icc_lock);
390 path = path_find(dev, src_node, dst_node);
391 if (IS_ERR(path))
392 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
393 mutex_unlock(&icc_lock);
394
395 return path;
396}
397EXPORT_SYMBOL_GPL(of_icc_get);
398
Georgi Djakov11f1cec2019-01-16 18:10:56 +0200399/**
400 * icc_set_bw() - set bandwidth constraints on an interconnect path
401 * @path: reference to the path returned by icc_get()
402 * @avg_bw: average bandwidth in kilobytes per second
403 * @peak_bw: peak bandwidth in kilobytes per second
404 *
405 * This function is used by an interconnect consumer to express its own needs
406 * in terms of bandwidth for a previously requested path between two endpoints.
407 * The requests are aggregated and each node is updated accordingly. The entire
408 * path is locked by a mutex to ensure that the set() is completed.
409 * The @path can be NULL when the "interconnects" DT properties is missing,
410 * which will mean that no constraints will be set.
411 *
412 * Returns 0 on success, or an appropriate error code otherwise.
413 */
414int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
415{
416 struct icc_node *node;
417 size_t i;
418 int ret;
419
420 if (!path)
421 return 0;
422
423 mutex_lock(&icc_lock);
424
425 for (i = 0; i < path->num_nodes; i++) {
426 node = path->reqs[i].node;
427
428 /* update the consumer request for this path */
429 path->reqs[i].avg_bw = avg_bw;
430 path->reqs[i].peak_bw = peak_bw;
431
432 /* aggregate requests for this node */
433 aggregate_requests(node);
434 }
435
436 ret = apply_constraints(path);
437 if (ret)
438 pr_debug("interconnect: error applying constraints (%d)\n",
439 ret);
440
441 mutex_unlock(&icc_lock);
442
443 return ret;
444}
445EXPORT_SYMBOL_GPL(icc_set_bw);
446
447/**
448 * icc_get() - return a handle for path between two endpoints
449 * @dev: the device requesting the path
450 * @src_id: source device port id
451 * @dst_id: destination device port id
452 *
453 * This function will search for a path between two endpoints and return an
454 * icc_path handle on success. Use icc_put() to release
455 * constraints when they are not needed anymore.
456 * If the interconnect API is disabled, NULL is returned and the consumer
457 * drivers will still build. Drivers are free to handle this specifically,
458 * but they don't have to.
459 *
460 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
461 * interconnect API is disabled.
462 */
463struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
464{
465 struct icc_node *src, *dst;
466 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
467
468 mutex_lock(&icc_lock);
469
470 src = node_find(src_id);
471 if (!src)
472 goto out;
473
474 dst = node_find(dst_id);
475 if (!dst)
476 goto out;
477
478 path = path_find(dev, src, dst);
479 if (IS_ERR(path))
480 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
481
482out:
483 mutex_unlock(&icc_lock);
484 return path;
485}
486EXPORT_SYMBOL_GPL(icc_get);
487
488/**
489 * icc_put() - release the reference to the icc_path
490 * @path: interconnect path
491 *
492 * Use this function to release the constraints on a path when the path is
493 * no longer needed. The constraints will be re-aggregated.
494 */
495void icc_put(struct icc_path *path)
496{
497 struct icc_node *node;
498 size_t i;
499 int ret;
500
501 if (!path || WARN_ON(IS_ERR(path)))
502 return;
503
504 ret = icc_set_bw(path, 0, 0);
505 if (ret)
506 pr_err("%s: error (%d)\n", __func__, ret);
507
508 mutex_lock(&icc_lock);
509 for (i = 0; i < path->num_nodes; i++) {
510 node = path->reqs[i].node;
511 hlist_del(&path->reqs[i].req_node);
512 if (!WARN_ON(!node->provider->users))
513 node->provider->users--;
514 }
515 mutex_unlock(&icc_lock);
516
517 kfree(path);
518}
519EXPORT_SYMBOL_GPL(icc_put);
520
521static struct icc_node *icc_node_create_nolock(int id)
522{
523 struct icc_node *node;
524
525 /* check if node already exists */
526 node = node_find(id);
527 if (node)
528 return node;
529
530 node = kzalloc(sizeof(*node), GFP_KERNEL);
531 if (!node)
532 return ERR_PTR(-ENOMEM);
533
534 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
535 if (id < 0) {
536 WARN(1, "%s: couldn't get idr\n", __func__);
537 kfree(node);
538 return ERR_PTR(id);
539 }
540
541 node->id = id;
542
543 return node;
544}
545
546/**
547 * icc_node_create() - create a node
548 * @id: node id
549 *
550 * Return: icc_node pointer on success, or ERR_PTR() on error
551 */
552struct icc_node *icc_node_create(int id)
553{
554 struct icc_node *node;
555
556 mutex_lock(&icc_lock);
557
558 node = icc_node_create_nolock(id);
559
560 mutex_unlock(&icc_lock);
561
562 return node;
563}
564EXPORT_SYMBOL_GPL(icc_node_create);
565
566/**
567 * icc_node_destroy() - destroy a node
568 * @id: node id
569 */
570void icc_node_destroy(int id)
571{
572 struct icc_node *node;
573
574 mutex_lock(&icc_lock);
575
576 node = node_find(id);
577 if (node) {
578 idr_remove(&icc_idr, node->id);
579 WARN_ON(!hlist_empty(&node->req_list));
580 }
581
582 mutex_unlock(&icc_lock);
583
584 kfree(node);
585}
586EXPORT_SYMBOL_GPL(icc_node_destroy);
587
588/**
589 * icc_link_create() - create a link between two nodes
590 * @node: source node id
591 * @dst_id: destination node id
592 *
593 * Create a link between two nodes. The nodes might belong to different
594 * interconnect providers and the @dst_id node might not exist (if the
595 * provider driver has not probed yet). So just create the @dst_id node
596 * and when the actual provider driver is probed, the rest of the node
597 * data is filled.
598 *
599 * Return: 0 on success, or an error code otherwise
600 */
601int icc_link_create(struct icc_node *node, const int dst_id)
602{
603 struct icc_node *dst;
604 struct icc_node **new;
605 int ret = 0;
606
607 if (!node->provider)
608 return -EINVAL;
609
610 mutex_lock(&icc_lock);
611
612 dst = node_find(dst_id);
613 if (!dst) {
614 dst = icc_node_create_nolock(dst_id);
615
616 if (IS_ERR(dst)) {
617 ret = PTR_ERR(dst);
618 goto out;
619 }
620 }
621
622 new = krealloc(node->links,
623 (node->num_links + 1) * sizeof(*node->links),
624 GFP_KERNEL);
625 if (!new) {
626 ret = -ENOMEM;
627 goto out;
628 }
629
630 node->links = new;
631 node->links[node->num_links++] = dst;
632
633out:
634 mutex_unlock(&icc_lock);
635
636 return ret;
637}
638EXPORT_SYMBOL_GPL(icc_link_create);
639
640/**
641 * icc_link_destroy() - destroy a link between two nodes
642 * @src: pointer to source node
643 * @dst: pointer to destination node
644 *
645 * Return: 0 on success, or an error code otherwise
646 */
647int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
648{
649 struct icc_node **new;
650 size_t slot;
651 int ret = 0;
652
653 if (IS_ERR_OR_NULL(src))
654 return -EINVAL;
655
656 if (IS_ERR_OR_NULL(dst))
657 return -EINVAL;
658
659 mutex_lock(&icc_lock);
660
661 for (slot = 0; slot < src->num_links; slot++)
662 if (src->links[slot] == dst)
663 break;
664
665 if (WARN_ON(slot == src->num_links)) {
666 ret = -ENXIO;
667 goto out;
668 }
669
670 src->links[slot] = src->links[--src->num_links];
671
672 new = krealloc(src->links, src->num_links * sizeof(*src->links),
673 GFP_KERNEL);
674 if (new)
675 src->links = new;
676
677out:
678 mutex_unlock(&icc_lock);
679
680 return ret;
681}
682EXPORT_SYMBOL_GPL(icc_link_destroy);
683
684/**
685 * icc_node_add() - add interconnect node to interconnect provider
686 * @node: pointer to the interconnect node
687 * @provider: pointer to the interconnect provider
688 */
689void icc_node_add(struct icc_node *node, struct icc_provider *provider)
690{
691 mutex_lock(&icc_lock);
692
693 node->provider = provider;
694 list_add_tail(&node->node_list, &provider->nodes);
695
696 mutex_unlock(&icc_lock);
697}
698EXPORT_SYMBOL_GPL(icc_node_add);
699
700/**
701 * icc_node_del() - delete interconnect node from interconnect provider
702 * @node: pointer to the interconnect node
703 */
704void icc_node_del(struct icc_node *node)
705{
706 mutex_lock(&icc_lock);
707
708 list_del(&node->node_list);
709
710 mutex_unlock(&icc_lock);
711}
712EXPORT_SYMBOL_GPL(icc_node_del);
713
714/**
715 * icc_provider_add() - add a new interconnect provider
716 * @provider: the interconnect provider that will be added into topology
717 *
718 * Return: 0 on success, or an error code otherwise
719 */
720int icc_provider_add(struct icc_provider *provider)
721{
722 if (WARN_ON(!provider->set))
723 return -EINVAL;
Georgi Djakov87e30312019-01-16 18:10:58 +0200724 if (WARN_ON(!provider->xlate))
725 return -EINVAL;
Georgi Djakov11f1cec2019-01-16 18:10:56 +0200726
727 mutex_lock(&icc_lock);
728
729 INIT_LIST_HEAD(&provider->nodes);
730 list_add_tail(&provider->provider_list, &icc_providers);
731
732 mutex_unlock(&icc_lock);
733
734 dev_dbg(provider->dev, "interconnect provider added to topology\n");
735
736 return 0;
737}
738EXPORT_SYMBOL_GPL(icc_provider_add);
739
740/**
741 * icc_provider_del() - delete previously added interconnect provider
742 * @provider: the interconnect provider that will be removed from topology
743 *
744 * Return: 0 on success, or an error code otherwise
745 */
746int icc_provider_del(struct icc_provider *provider)
747{
748 mutex_lock(&icc_lock);
749 if (provider->users) {
750 pr_warn("interconnect provider still has %d users\n",
751 provider->users);
752 mutex_unlock(&icc_lock);
753 return -EBUSY;
754 }
755
756 if (!list_empty(&provider->nodes)) {
757 pr_warn("interconnect provider still has nodes\n");
758 mutex_unlock(&icc_lock);
759 return -EBUSY;
760 }
761
762 list_del(&provider->provider_list);
763 mutex_unlock(&icc_lock);
764
765 return 0;
766}
767EXPORT_SYMBOL_GPL(icc_provider_del);
768
Georgi Djakov3697ff42019-01-16 18:10:59 +0200769static int __init icc_init(void)
770{
771 icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
772 debugfs_create_file("interconnect_summary", 0444,
773 icc_debugfs_dir, NULL, &icc_summary_fops);
774 return 0;
775}
776
777static void __exit icc_exit(void)
778{
779 debugfs_remove_recursive(icc_debugfs_dir);
780}
781module_init(icc_init);
782module_exit(icc_exit);
783
Georgi Djakov11f1cec2019-01-16 18:10:56 +0200784MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
785MODULE_DESCRIPTION("Interconnect Driver Core");
786MODULE_LICENSE("GPL v2");