blob: 2a4ff5d6428860b79ed04001acb87137b6dbc651 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Thierry Redingf4a18312013-01-22 22:24:46 +01002#include <linux/err.h>
Al Viro5ea81762007-02-11 15:41:31 +00003#include <linux/pci.h>
4#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09005#include <linux/gfp.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05006#include <linux/export.h>
Benjamin Herrenschmidtd5e83822018-06-05 13:21:26 +10007#include <linux/of_address.h>
Al Viro5ea81762007-02-11 15:41:31 +00008
Yisheng Xie1b723412018-01-29 19:48:16 +08009enum devm_ioremap_type {
10 DEVM_IOREMAP = 0,
Tuowen Zhaoe537654b2019-10-16 15:06:28 -060011 DEVM_IOREMAP_UC,
Yisheng Xie1b723412018-01-29 19:48:16 +080012 DEVM_IOREMAP_WC,
13};
14
Emil Medveb41e5ff2008-05-03 06:34:04 +100015void devm_ioremap_release(struct device *dev, void *res)
Al Viro5ea81762007-02-11 15:41:31 +000016{
17 iounmap(*(void __iomem **)res);
18}
19
20static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
21{
22 return *(void **)res == match_data;
23}
24
Yisheng Xie1b723412018-01-29 19:48:16 +080025static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
26 resource_size_t size,
27 enum devm_ioremap_type type)
28{
29 void __iomem **ptr, *addr = NULL;
30
31 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
32 if (!ptr)
33 return NULL;
34
35 switch (type) {
36 case DEVM_IOREMAP:
37 addr = ioremap(offset, size);
38 break;
Tuowen Zhaoe537654b2019-10-16 15:06:28 -060039 case DEVM_IOREMAP_UC:
40 addr = ioremap_uc(offset, size);
41 break;
Yisheng Xie1b723412018-01-29 19:48:16 +080042 case DEVM_IOREMAP_WC:
43 addr = ioremap_wc(offset, size);
44 break;
45 }
46
47 if (addr) {
48 *ptr = addr;
49 devres_add(dev, ptr);
50 } else
51 devres_free(ptr);
52
53 return addr;
54}
55
Al Viro5ea81762007-02-11 15:41:31 +000056/**
57 * devm_ioremap - Managed ioremap()
58 * @dev: Generic device to remap IO address for
Lorenzo Pieralisi65247542017-04-19 17:48:54 +010059 * @offset: Resource address to map
Al Viro5ea81762007-02-11 15:41:31 +000060 * @size: Size of map
61 *
62 * Managed ioremap(). Map is automatically unmapped on driver detach.
63 */
Kumar Gala4f452e82008-04-29 10:25:48 -050064void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
Cristian Stoica5559b7b2014-10-07 18:25:43 +030065 resource_size_t size)
Al Viro5ea81762007-02-11 15:41:31 +000066{
Yisheng Xie1b723412018-01-29 19:48:16 +080067 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
Al Viro5ea81762007-02-11 15:41:31 +000068}
69EXPORT_SYMBOL(devm_ioremap);
70
71/**
Tuowen Zhaoe537654b2019-10-16 15:06:28 -060072 * devm_ioremap_uc - Managed ioremap_uc()
73 * @dev: Generic device to remap IO address for
74 * @offset: Resource address to map
75 * @size: Size of map
76 *
77 * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
78 */
79void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
80 resource_size_t size)
81{
82 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
83}
84EXPORT_SYMBOL_GPL(devm_ioremap_uc);
85
86/**
Abhilash Kesavan34644522015-02-06 19:15:27 +053087 * devm_ioremap_wc - Managed ioremap_wc()
88 * @dev: Generic device to remap IO address for
Lorenzo Pieralisi65247542017-04-19 17:48:54 +010089 * @offset: Resource address to map
Abhilash Kesavan34644522015-02-06 19:15:27 +053090 * @size: Size of map
91 *
92 * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
93 */
94void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
95 resource_size_t size)
96{
Yisheng Xie1b723412018-01-29 19:48:16 +080097 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
Abhilash Kesavan34644522015-02-06 19:15:27 +053098}
99EXPORT_SYMBOL(devm_ioremap_wc);
100
101/**
Al Viro5ea81762007-02-11 15:41:31 +0000102 * devm_iounmap - Managed iounmap()
103 * @dev: Generic device to unmap for
104 * @addr: Address to unmap
105 *
106 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
107 */
108void devm_iounmap(struct device *dev, void __iomem *addr)
109{
Al Viro5ea81762007-02-11 15:41:31 +0000110 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
Steven Rostedtb104d6a2014-04-03 14:49:07 -0700111 (__force void *)addr));
Maxin B Johnae891a12011-07-25 17:12:59 -0700112 iounmap(addr);
Al Viro5ea81762007-02-11 15:41:31 +0000113}
114EXPORT_SYMBOL(devm_iounmap);
115
Bartosz Golaszewski6e924822019-10-22 10:43:12 +0200116static void __iomem *
117__devm_ioremap_resource(struct device *dev, const struct resource *res,
118 enum devm_ioremap_type type)
119{
120 resource_size_t size;
121 void __iomem *dest_ptr;
Vladimir Oltean35bd8c02020-06-01 12:58:26 +0300122 char *pretty_name;
Bartosz Golaszewski6e924822019-10-22 10:43:12 +0200123
124 BUG_ON(!dev);
125
126 if (!res || resource_type(res) != IORESOURCE_MEM) {
127 dev_err(dev, "invalid resource\n");
128 return IOMEM_ERR_PTR(-EINVAL);
129 }
130
131 size = resource_size(res);
132
Vladimir Oltean35bd8c02020-06-01 12:58:26 +0300133 if (res->name)
134 pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
135 dev_name(dev), res->name);
136 else
137 pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
138 if (!pretty_name)
139 return IOMEM_ERR_PTR(-ENOMEM);
140
141 if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
Bartosz Golaszewski6e924822019-10-22 10:43:12 +0200142 dev_err(dev, "can't request region for resource %pR\n", res);
143 return IOMEM_ERR_PTR(-EBUSY);
144 }
145
146 dest_ptr = __devm_ioremap(dev, res->start, size, type);
147 if (!dest_ptr) {
148 dev_err(dev, "ioremap failed for resource %pR\n", res);
149 devm_release_mem_region(dev, res->start, size);
150 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
151 }
152
153 return dest_ptr;
154}
155
Wolfram Sang72f8c0b2011-10-25 15:16:47 +0200156/**
Thierry Reding75096572013-01-21 11:08:54 +0100157 * devm_ioremap_resource() - check, request region, and ioremap resource
158 * @dev: generic device to handle the resource for
159 * @res: resource to be handled
160 *
Dan Williams92b19ff2015-08-10 23:07:06 -0400161 * Checks that a resource is a valid memory region, requests the memory
162 * region and ioremaps it. All operations are managed and will be undone
163 * on driver detach.
Thierry Reding75096572013-01-21 11:08:54 +0100164 *
Stephen Boyd0c7a6b92020-09-09 23:04:40 -0700165 * Usage example:
Thierry Reding75096572013-01-21 11:08:54 +0100166 *
167 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
168 * base = devm_ioremap_resource(&pdev->dev, res);
169 * if (IS_ERR(base))
170 * return PTR_ERR(base);
Stephen Boyd0c7a6b92020-09-09 23:04:40 -0700171 *
172 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
173 * on failure.
Thierry Reding75096572013-01-21 11:08:54 +0100174 */
Arnd Bergmanneef778c2019-07-04 15:14:45 -0700175void __iomem *devm_ioremap_resource(struct device *dev,
176 const struct resource *res)
Thierry Reding75096572013-01-21 11:08:54 +0100177{
Bartosz Golaszewski6e924822019-10-22 10:43:12 +0200178 return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
Thierry Reding75096572013-01-21 11:08:54 +0100179}
180EXPORT_SYMBOL(devm_ioremap_resource);
181
Bartosz Golaszewskib873af62019-10-22 10:43:13 +0200182/**
183 * devm_ioremap_resource_wc() - write-combined variant of
184 * devm_ioremap_resource()
185 * @dev: generic device to handle the resource for
186 * @res: resource to be handled
187 *
Stephen Boyd0c7a6b92020-09-09 23:04:40 -0700188 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
189 * on failure.
Bartosz Golaszewskib873af62019-10-22 10:43:13 +0200190 */
191void __iomem *devm_ioremap_resource_wc(struct device *dev,
192 const struct resource *res)
193{
194 return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
195}
196
Benjamin Herrenschmidtd5e83822018-06-05 13:21:26 +1000197/*
198 * devm_of_iomap - Requests a resource and maps the memory mapped IO
199 * for a given device_node managed by a given device
200 *
201 * Checks that a resource is a valid memory region, requests the memory
202 * region and ioremaps it. All operations are managed and will be undone
203 * on driver detach of the device.
204 *
205 * This is to be used when a device requests/maps resources described
206 * by other device tree nodes (children or otherwise).
207 *
208 * @dev: The device "managing" the resource
209 * @node: The device-tree node where the resource resides
210 * @index: index of the MMIO range in the "reg" property
211 * @size: Returns the size of the resource (pass NULL if not needed)
Stephen Boyd0c7a6b92020-09-09 23:04:40 -0700212 *
213 * Usage example:
Benjamin Herrenschmidtd5e83822018-06-05 13:21:26 +1000214 *
215 * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
216 * if (IS_ERR(base))
217 * return PTR_ERR(base);
Dan Carpenter7ae731a2020-06-09 13:46:42 +0300218 *
219 * Please Note: This is not a one-to-one replacement for of_iomap() because the
220 * of_iomap() function does not track whether the region is already mapped. If
221 * two drivers try to map the same memory, the of_iomap() function will succeed
Randy Dunlap28d9fdf2020-08-22 21:04:43 -0700222 * but the devm_of_iomap() function will return -EBUSY.
Dan Carpenter7ae731a2020-06-09 13:46:42 +0300223 *
Stephen Boyd0c7a6b92020-09-09 23:04:40 -0700224 * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
225 * error code on failure.
Benjamin Herrenschmidtd5e83822018-06-05 13:21:26 +1000226 */
227void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
228 resource_size_t *size)
229{
230 struct resource res;
231
232 if (of_address_to_resource(node, index, &res))
233 return IOMEM_ERR_PTR(-EINVAL);
234 if (size)
235 *size = resource_size(&res);
236 return devm_ioremap_resource(dev, &res);
237}
238EXPORT_SYMBOL(devm_of_iomap);
239
Uwe Kleine-Königce816fa2014-04-07 15:39:19 -0700240#ifdef CONFIG_HAS_IOPORT_MAP
Al Viro5ea81762007-02-11 15:41:31 +0000241/*
242 * Generic iomap devres
243 */
244static void devm_ioport_map_release(struct device *dev, void *res)
245{
246 ioport_unmap(*(void __iomem **)res);
247}
248
249static int devm_ioport_map_match(struct device *dev, void *res,
250 void *match_data)
251{
252 return *(void **)res == match_data;
253}
254
255/**
256 * devm_ioport_map - Managed ioport_map()
257 * @dev: Generic device to map ioport for
258 * @port: Port to map
259 * @nr: Number of ports to map
260 *
261 * Managed ioport_map(). Map is automatically unmapped on driver
262 * detach.
Stephen Boyd0c7a6b92020-09-09 23:04:40 -0700263 *
264 * Return: a pointer to the remapped memory or NULL on failure.
Al Viro5ea81762007-02-11 15:41:31 +0000265 */
Fabian Frederick5cbb00c2014-05-23 22:30:50 +0200266void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
Al Viro5ea81762007-02-11 15:41:31 +0000267 unsigned int nr)
268{
269 void __iomem **ptr, *addr;
270
271 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
272 if (!ptr)
273 return NULL;
274
275 addr = ioport_map(port, nr);
276 if (addr) {
277 *ptr = addr;
278 devres_add(dev, ptr);
279 } else
280 devres_free(ptr);
281
282 return addr;
283}
284EXPORT_SYMBOL(devm_ioport_map);
285
286/**
287 * devm_ioport_unmap - Managed ioport_unmap()
288 * @dev: Generic device to unmap for
289 * @addr: Address to unmap
290 *
291 * Managed ioport_unmap(). @addr must have been mapped using
292 * devm_ioport_map().
293 */
294void devm_ioport_unmap(struct device *dev, void __iomem *addr)
295{
296 ioport_unmap(addr);
297 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
Steven Rostedtb104d6a2014-04-03 14:49:07 -0700298 devm_ioport_map_match, (__force void *)addr));
Al Viro5ea81762007-02-11 15:41:31 +0000299}
300EXPORT_SYMBOL(devm_ioport_unmap);
Uwe Kleine-Königce816fa2014-04-07 15:39:19 -0700301#endif /* CONFIG_HAS_IOPORT_MAP */
Al Viro5ea81762007-02-11 15:41:31 +0000302
303#ifdef CONFIG_PCI
304/*
305 * PCI iomap devres
306 */
Denis Efremovc9c13ba2019-09-28 02:43:08 +0300307#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
Al Viro5ea81762007-02-11 15:41:31 +0000308
309struct pcim_iomap_devres {
310 void __iomem *table[PCIM_IOMAP_MAX];
311};
312
313static void pcim_iomap_release(struct device *gendev, void *res)
314{
Geliang Tang20af74e2015-12-27 18:46:05 +0800315 struct pci_dev *dev = to_pci_dev(gendev);
Al Viro5ea81762007-02-11 15:41:31 +0000316 struct pcim_iomap_devres *this = res;
317 int i;
318
319 for (i = 0; i < PCIM_IOMAP_MAX; i++)
320 if (this->table[i])
321 pci_iounmap(dev, this->table[i]);
322}
323
324/**
325 * pcim_iomap_table - access iomap allocation table
326 * @pdev: PCI device to access iomap table for
327 *
328 * Access iomap allocation table for @dev. If iomap table doesn't
329 * exist and @pdev is managed, it will be allocated. All iomaps
330 * recorded in the iomap table are automatically unmapped on driver
331 * detach.
332 *
333 * This function might sleep when the table is first allocated but can
334 * be safely called without context and guaranteed to succed once
335 * allocated.
336 */
Fabian Frederick5cbb00c2014-05-23 22:30:50 +0200337void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
Al Viro5ea81762007-02-11 15:41:31 +0000338{
339 struct pcim_iomap_devres *dr, *new_dr;
340
341 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
342 if (dr)
343 return dr->table;
344
345 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
346 if (!new_dr)
347 return NULL;
348 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
349 return dr->table;
350}
351EXPORT_SYMBOL(pcim_iomap_table);
352
353/**
354 * pcim_iomap - Managed pcim_iomap()
355 * @pdev: PCI device to iomap for
356 * @bar: BAR to iomap
357 * @maxlen: Maximum length of iomap
358 *
359 * Managed pci_iomap(). Map is automatically unmapped on driver
360 * detach.
361 */
Fabian Frederick5cbb00c2014-05-23 22:30:50 +0200362void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
Al Viro5ea81762007-02-11 15:41:31 +0000363{
364 void __iomem **tbl;
365
366 BUG_ON(bar >= PCIM_IOMAP_MAX);
367
368 tbl = (void __iomem **)pcim_iomap_table(pdev);
369 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
370 return NULL;
371
372 tbl[bar] = pci_iomap(pdev, bar, maxlen);
373 return tbl[bar];
374}
375EXPORT_SYMBOL(pcim_iomap);
376
377/**
378 * pcim_iounmap - Managed pci_iounmap()
379 * @pdev: PCI device to iounmap for
380 * @addr: Address to unmap
381 *
382 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
383 */
384void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
385{
386 void __iomem **tbl;
387 int i;
388
389 pci_iounmap(pdev, addr);
390
391 tbl = (void __iomem **)pcim_iomap_table(pdev);
392 BUG_ON(!tbl);
393
394 for (i = 0; i < PCIM_IOMAP_MAX; i++)
395 if (tbl[i] == addr) {
396 tbl[i] = NULL;
397 return;
398 }
399 WARN_ON(1);
400}
401EXPORT_SYMBOL(pcim_iounmap);
402
403/**
404 * pcim_iomap_regions - Request and iomap PCI BARs
405 * @pdev: PCI device to map IO resources for
406 * @mask: Mask of BARs to request and iomap
407 * @name: Name used when requesting regions
408 *
409 * Request and iomap regions specified by @mask.
410 */
Yinghai Lufb7ebfe2012-01-04 15:50:02 -0800411int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
Al Viro5ea81762007-02-11 15:41:31 +0000412{
413 void __iomem * const *iomap;
414 int i, rc;
415
416 iomap = pcim_iomap_table(pdev);
417 if (!iomap)
418 return -ENOMEM;
419
420 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
421 unsigned long len;
422
423 if (!(mask & (1 << i)))
424 continue;
425
426 rc = -EINVAL;
427 len = pci_resource_len(pdev, i);
428 if (!len)
429 goto err_inval;
430
431 rc = pci_request_region(pdev, i, name);
432 if (rc)
Frederik Deweerdtfb4d64e2007-02-16 01:27:15 -0800433 goto err_inval;
Al Viro5ea81762007-02-11 15:41:31 +0000434
435 rc = -ENOMEM;
436 if (!pcim_iomap(pdev, i, 0))
Frederik Deweerdtfb4d64e2007-02-16 01:27:15 -0800437 goto err_region;
Al Viro5ea81762007-02-11 15:41:31 +0000438 }
439
440 return 0;
441
Al Viro5ea81762007-02-11 15:41:31 +0000442 err_region:
443 pci_release_region(pdev, i);
444 err_inval:
445 while (--i >= 0) {
Frederik Deweerdtfb4d64e2007-02-16 01:27:15 -0800446 if (!(mask & (1 << i)))
447 continue;
Al Viro5ea81762007-02-11 15:41:31 +0000448 pcim_iounmap(pdev, iomap[i]);
449 pci_release_region(pdev, i);
450 }
451
452 return rc;
453}
454EXPORT_SYMBOL(pcim_iomap_regions);
Tejun Heoec04b072007-03-09 19:45:58 +0900455
456/**
Tejun Heo916fbfb2008-03-12 15:26:34 +0900457 * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
458 * @pdev: PCI device to map IO resources for
459 * @mask: Mask of BARs to iomap
460 * @name: Name used when requesting regions
461 *
462 * Request all PCI BARs and iomap regions specified by @mask.
463 */
Yinghai Lufb7ebfe2012-01-04 15:50:02 -0800464int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
Tejun Heo916fbfb2008-03-12 15:26:34 +0900465 const char *name)
466{
467 int request_mask = ((1 << 6) - 1) & ~mask;
468 int rc;
469
470 rc = pci_request_selected_regions(pdev, request_mask, name);
471 if (rc)
472 return rc;
473
474 rc = pcim_iomap_regions(pdev, mask, name);
475 if (rc)
476 pci_release_selected_regions(pdev, request_mask);
477 return rc;
478}
479EXPORT_SYMBOL(pcim_iomap_regions_request_all);
480
481/**
Tejun Heoec04b072007-03-09 19:45:58 +0900482 * pcim_iounmap_regions - Unmap and release PCI BARs
483 * @pdev: PCI device to map IO resources for
484 * @mask: Mask of BARs to unmap and release
485 *
Kulikov Vasiliy4d45ada2010-07-10 14:07:41 +0400486 * Unmap and release regions specified by @mask.
Tejun Heoec04b072007-03-09 19:45:58 +0900487 */
Yinghai Lufb7ebfe2012-01-04 15:50:02 -0800488void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
Tejun Heoec04b072007-03-09 19:45:58 +0900489{
490 void __iomem * const *iomap;
491 int i;
492
493 iomap = pcim_iomap_table(pdev);
494 if (!iomap)
495 return;
496
Dan Carpenter1f35d042015-09-21 19:21:51 +0300497 for (i = 0; i < PCIM_IOMAP_MAX; i++) {
Tejun Heoec04b072007-03-09 19:45:58 +0900498 if (!(mask & (1 << i)))
499 continue;
500
501 pcim_iounmap(pdev, iomap[i]);
502 pci_release_region(pdev, i);
503 }
504}
505EXPORT_SYMBOL(pcim_iounmap_regions);
Wolfram Sang571806a2011-10-25 15:03:42 +0200506#endif /* CONFIG_PCI */