blob: 2f9dbf8ad2eebb4bf7f9bcb2ff53faf407492f28 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linas Vepstas5d5a0932005-11-03 18:53:07 -06002/*
Linas Vepstas5d5a0932005-11-03 18:53:07 -06003 * PCI address cache; allows the lookup of PCI devices based on I/O address
4 *
Linas Vepstas3c8c90a2007-05-24 03:28:01 +10005 * Copyright IBM Corporation 2004
6 * Copyright Linas Vepstas <linas@austin.ibm.com> 2004
Linas Vepstas5d5a0932005-11-03 18:53:07 -06007 */
8
9#include <linux/list.h>
10#include <linux/pci.h>
11#include <linux/rbtree.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linas Vepstas5d5a0932005-11-03 18:53:07 -060013#include <linux/spinlock.h>
Arun Sharma600634972011-07-26 16:09:06 -070014#include <linux/atomic.h>
Aneesh Kumar K.Vdbf77fed2021-08-12 18:58:31 +053015#include <linux/debugfs.h>
Linas Vepstas5d5a0932005-11-03 18:53:07 -060016#include <asm/pci-bridge.h>
17#include <asm/ppc-pci.h>
Linas Vepstas5d5a0932005-11-03 18:53:07 -060018
Linas Vepstas5d5a0932005-11-03 18:53:07 -060019
20/**
Qian Cai3becd112019-06-05 16:46:19 -040021 * DOC: Overview
22 *
Linas Vepstas5d5a0932005-11-03 18:53:07 -060023 * The pci address cache subsystem. This subsystem places
24 * PCI device address resources into a red-black tree, sorted
25 * according to the address range, so that given only an i/o
26 * address, the corresponding PCI device can be **quickly**
27 * found. It is safe to perform an address lookup in an interrupt
28 * context; this ability is an important feature.
29 *
30 * Currently, the only customer of this code is the EEH subsystem;
31 * thus, this code has been somewhat tailored to suit EEH better.
32 * In particular, the cache does *not* hold the addresses of devices
33 * for which EEH is not enabled.
34 *
35 * (Implementation Note: The RB tree seems to be better/faster
36 * than any hash algo I could think of for this problem, even
37 * with the penalty of slow pointer chases for d-cache misses).
38 */
Qian Cai3becd112019-06-05 16:46:19 -040039
Gavin Shan29f8bf12012-02-27 20:04:02 +000040struct pci_io_addr_range {
Linas Vepstas5d5a0932005-11-03 18:53:07 -060041 struct rb_node rb_node;
Wei Yang37213522015-04-27 09:25:09 +080042 resource_size_t addr_lo;
43 resource_size_t addr_hi;
Gavin Shanf8f7d632012-09-07 22:44:22 +000044 struct eeh_dev *edev;
Linas Vepstas5d5a0932005-11-03 18:53:07 -060045 struct pci_dev *pcidev;
Wei Yang37213522015-04-27 09:25:09 +080046 unsigned long flags;
Linas Vepstas5d5a0932005-11-03 18:53:07 -060047};
48
Gavin Shan29f8bf12012-02-27 20:04:02 +000049static struct pci_io_addr_cache {
Linas Vepstas5d5a0932005-11-03 18:53:07 -060050 struct rb_root rb_root;
51 spinlock_t piar_lock;
52} pci_io_addr_cache_root;
53
Gavin Shan3ab96a02012-09-07 22:44:23 +000054static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
Linas Vepstas5d5a0932005-11-03 18:53:07 -060055{
56 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
57
58 while (n) {
59 struct pci_io_addr_range *piar;
60 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
61
Gavin Shan0ba17882013-07-24 10:24:51 +080062 if (addr < piar->addr_lo)
Linas Vepstas5d5a0932005-11-03 18:53:07 -060063 n = n->rb_left;
Gavin Shan0ba17882013-07-24 10:24:51 +080064 else if (addr > piar->addr_hi)
65 n = n->rb_right;
66 else
67 return piar->edev;
Linas Vepstas5d5a0932005-11-03 18:53:07 -060068 }
69
70 return NULL;
71}
72
73/**
Gavin Shan3ab96a02012-09-07 22:44:23 +000074 * eeh_addr_cache_get_dev - Get device, given only address
Linas Vepstas5d5a0932005-11-03 18:53:07 -060075 * @addr: mmio (PIO) phys address or i/o port number
76 *
77 * Given an mmio phys address, or a port number, find a pci device
Sam Bobroff63457b12018-03-19 13:46:40 +110078 * that implements this address. I/O port numbers are assumed to be offset
Linas Vepstas5d5a0932005-11-03 18:53:07 -060079 * from zero (that is, they do *not* have pci_io_addr added in).
80 * It is safe to call this function within an interrupt.
81 */
Gavin Shan3ab96a02012-09-07 22:44:23 +000082struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
Linas Vepstas5d5a0932005-11-03 18:53:07 -060083{
Gavin Shanf8f7d632012-09-07 22:44:22 +000084 struct eeh_dev *edev;
Linas Vepstas5d5a0932005-11-03 18:53:07 -060085 unsigned long flags;
86
87 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
Gavin Shan3ab96a02012-09-07 22:44:23 +000088 edev = __eeh_addr_cache_get_device(addr);
Linas Vepstas5d5a0932005-11-03 18:53:07 -060089 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
Gavin Shanf8f7d632012-09-07 22:44:22 +000090 return edev;
Linas Vepstas5d5a0932005-11-03 18:53:07 -060091}
92
93#ifdef DEBUG
94/*
95 * Handy-dandy debug print routine, does nothing more
96 * than print out the contents of our addr cache.
97 */
Gavin Shan3ab96a02012-09-07 22:44:23 +000098static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
Linas Vepstas5d5a0932005-11-03 18:53:07 -060099{
100 struct rb_node *n;
101 int cnt = 0;
102
103 n = rb_first(&cache->rb_root);
104 while (n) {
105 struct pci_io_addr_range *piar;
106 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
Oliver O'Halloranc8f02f22019-02-15 11:48:14 +1100107 pr_info("PCI: %s addr range %d [%pap-%pap]: %s\n",
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600108 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
Andrew Donnellan91dc0682016-06-24 15:54:22 +1000109 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600110 cnt++;
111 n = rb_next(n);
112 }
113}
114#endif
115
116/* Insert address range into the rb tree. */
117static struct pci_io_addr_range *
Wei Yang37213522015-04-27 09:25:09 +0800118eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
119 resource_size_t ahi, unsigned long flags)
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600120{
121 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
122 struct rb_node *parent = NULL;
123 struct pci_io_addr_range *piar;
124
125 /* Walk tree, find a place to insert into tree */
126 while (*p) {
127 parent = *p;
128 piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
129 if (ahi < piar->addr_lo) {
130 p = &parent->rb_left;
131 } else if (alo > piar->addr_hi) {
132 p = &parent->rb_right;
133 } else {
134 if (dev != piar->pcidev ||
135 alo != piar->addr_lo || ahi != piar->addr_hi) {
Gavin Shan0dae2742014-07-17 14:41:41 +1000136 pr_warn("PIAR: overlapping address range\n");
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600137 }
138 return piar;
139 }
140 }
Gavin Shan7e4bbaf2012-09-07 22:44:03 +0000141 piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600142 if (!piar)
143 return NULL;
144
145 piar->addr_lo = alo;
146 piar->addr_hi = ahi;
Gavin Shanf8f7d632012-09-07 22:44:22 +0000147 piar->edev = pci_dev_to_eeh_dev(dev);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600148 piar->pcidev = dev;
149 piar->flags = flags;
150
Sam Bobroff1ff8f362019-08-16 14:48:13 +1000151 eeh_edev_dbg(piar->edev, "PIAR: insert range=[%pap:%pap]\n",
152 &alo, &ahi);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600153
154 rb_link_node(&piar->rb_node, parent, p);
155 rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
156
157 return piar;
158}
159
Gavin Shan3ab96a02012-09-07 22:44:23 +0000160static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600161{
Gavin Shand50a7d42012-02-27 20:04:06 +0000162 struct eeh_dev *edev;
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600163 int i;
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600164
Oliver O'Halloranb1268f42019-07-15 18:56:08 +1000165 edev = pci_dev_to_eeh_dev(dev);
Gavin Shand50a7d42012-02-27 20:04:06 +0000166 if (!edev) {
Gavin Shanc6406d82015-03-17 16:15:08 +1100167 pr_warn("PCI: no EEH dev found for %s\n",
168 pci_name(dev));
Gavin Shand50a7d42012-02-27 20:04:06 +0000169 return;
170 }
171
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600172 /* Skip any devices for which EEH is not enabled. */
Gavin Shan05b17212014-07-17 14:41:38 +1000173 if (!edev->pe) {
Gavin Shanc6406d82015-03-17 16:15:08 +1100174 dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600175 return;
176 }
177
Wei Yang51c0e872016-03-04 10:53:06 +1100178 /*
179 * Walk resources on this device, poke the first 7 (6 normal BAR and 1
180 * ROM BAR) into the tree.
181 */
182 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
Wei Yang37213522015-04-27 09:25:09 +0800183 resource_size_t start = pci_resource_start(dev,i);
184 resource_size_t end = pci_resource_end(dev,i);
185 unsigned long flags = pci_resource_flags(dev,i);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600186
187 /* We are interested only bus addresses, not dma or other stuff */
188 if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
189 continue;
190 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
191 continue;
Gavin Shan3ab96a02012-09-07 22:44:23 +0000192 eeh_addr_cache_insert(dev, start, end, flags);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600193 }
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600194}
195
196/**
Gavin Shan3ab96a02012-09-07 22:44:23 +0000197 * eeh_addr_cache_insert_dev - Add a device to the address cache
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600198 * @dev: PCI device whose I/O addresses we are interested in.
199 *
200 * In order to support the fast lookup of devices based on addresses,
201 * we maintain a cache of devices that can be quickly searched.
202 * This routine adds a device to that cache.
203 */
Gavin Shan3ab96a02012-09-07 22:44:23 +0000204void eeh_addr_cache_insert_dev(struct pci_dev *dev)
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
Gavin Shan3ab96a02012-09-07 22:44:23 +0000209 __eeh_addr_cache_insert_dev(dev);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600210 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
211}
212
Gavin Shan3ab96a02012-09-07 22:44:23 +0000213static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600214{
215 struct rb_node *n;
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600216
217restart:
218 n = rb_first(&pci_io_addr_cache_root.rb_root);
219 while (n) {
220 struct pci_io_addr_range *piar;
221 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
222
223 if (piar->pcidev == dev) {
Sam Bobroff1ff8f362019-08-16 14:48:13 +1000224 eeh_edev_dbg(piar->edev, "PIAR: remove range=[%pap:%pap]\n",
225 &piar->addr_lo, &piar->addr_hi);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600226 rb_erase(n, &pci_io_addr_cache_root.rb_root);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600227 kfree(piar);
228 goto restart;
229 }
230 n = rb_next(n);
231 }
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600232}
233
234/**
Gavin Shan3ab96a02012-09-07 22:44:23 +0000235 * eeh_addr_cache_rmv_dev - remove pci device from addr cache
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600236 * @dev: device to remove
237 *
238 * Remove a device from the addr-cache tree.
239 * This is potentially expensive, since it will walk
240 * the tree multiple times (once per resource).
241 * But so what; device removal doesn't need to be that fast.
242 */
Gavin Shan3ab96a02012-09-07 22:44:23 +0000243void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600244{
245 unsigned long flags;
246
247 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
Gavin Shan3ab96a02012-09-07 22:44:23 +0000248 __eeh_addr_cache_rmv_dev(dev);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600249 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
250}
251
252/**
Sam Bobroff685a0bc2019-08-16 14:48:08 +1000253 * eeh_addr_cache_init - Initialize a cache of I/O addresses
254 *
255 * Initialize a cache of pci i/o addresses. This cache will be used to
256 * find the pci device that corresponds to a given address.
257 */
258void eeh_addr_cache_init(void)
259{
260 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
261}
262
Oliver O'Halloran5ca85ae2019-02-15 11:48:13 +1100263static int eeh_addr_cache_show(struct seq_file *s, void *v)
264{
265 struct pci_io_addr_range *piar;
266 struct rb_node *n;
Qian Caifd552e02020-10-28 11:27:17 -0400267 unsigned long flags;
Oliver O'Halloran5ca85ae2019-02-15 11:48:13 +1100268
Qian Caifd552e02020-10-28 11:27:17 -0400269 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
Oliver O'Halloran5ca85ae2019-02-15 11:48:13 +1100270 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
271 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
272
273 seq_printf(s, "%s addr range [%pap-%pap]: %s\n",
274 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
275 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
276 }
Qian Caifd552e02020-10-28 11:27:17 -0400277 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
Oliver O'Halloran5ca85ae2019-02-15 11:48:13 +1100278
279 return 0;
280}
281DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
282
Nick Childd2769602021-12-16 17:00:16 -0500283void __init eeh_cache_debugfs_init(void)
Oliver O'Halloran5ca85ae2019-02-15 11:48:13 +1100284{
285 debugfs_create_file_unsafe("eeh_address_cache", 0400,
Aneesh Kumar K.Vdbf77fed2021-08-12 18:58:31 +0530286 arch_debugfs_dir, NULL,
Oliver O'Halloran5ca85ae2019-02-15 11:48:13 +1100287 &eeh_addr_cache_fops);
Linas Vepstas5d5a0932005-11-03 18:53:07 -0600288}