Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 13 | #include <linux/radix-tree.h> |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 14 | #include <linux/device.h> |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 15 | #include <linux/types.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 16 | #include <linux/pfn_t.h> |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 17 | #include <linux/io.h> |
| 18 | #include <linux/mm.h> |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 19 | #include <linux/memory_hotplug.h> |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 20 | #include <linux/swap.h> |
| 21 | #include <linux/swapops.h> |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 22 | |
| 23 | #ifndef ioremap_cache |
| 24 | /* temporary while we convert existing ioremap_cache users to memremap */ |
| 25 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) |
| 26 | { |
| 27 | return ioremap(offset, size); |
| 28 | } |
| 29 | #endif |
| 30 | |
Ard Biesheuvel | c269cba | 2016-02-22 15:02:07 +0100 | [diff] [blame] | 31 | #ifndef arch_memremap_wb |
| 32 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) |
| 33 | { |
| 34 | return (__force void *)ioremap_cache(offset, size); |
| 35 | } |
| 36 | #endif |
| 37 | |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 38 | #ifndef arch_memremap_can_ram_remap |
| 39 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, |
| 40 | unsigned long flags) |
| 41 | { |
| 42 | return true; |
| 43 | } |
| 44 | #endif |
| 45 | |
| 46 | static void *try_ram_remap(resource_size_t offset, size_t size, |
| 47 | unsigned long flags) |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 48 | { |
Ard Biesheuvel | ac343e8 | 2016-03-09 14:08:32 -0800 | [diff] [blame] | 49 | unsigned long pfn = PHYS_PFN(offset); |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 50 | |
| 51 | /* In the simple case just return the existing linear address */ |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 52 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && |
| 53 | arch_memremap_can_ram_remap(offset, size, flags)) |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 54 | return __va(offset); |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 55 | |
Ard Biesheuvel | c269cba | 2016-02-22 15:02:07 +0100 | [diff] [blame] | 56 | return NULL; /* fallback to arch_memremap_wb */ |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 57 | } |
| 58 | |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 59 | /** |
| 60 | * memremap() - remap an iomem_resource as cacheable memory |
| 61 | * @offset: iomem resource start address |
| 62 | * @size: size of remap |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 63 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, |
| 64 | * MEMREMAP_ENC, MEMREMAP_DEC |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 65 | * |
| 66 | * memremap() is "ioremap" for cases where it is known that the resource |
| 67 | * being mapped does not have i/o side effects and the __iomem |
Brian Starkey | c907e0e | 2016-03-22 14:28:00 -0700 | [diff] [blame] | 68 | * annotation is not applicable. In the case of multiple flags, the different |
| 69 | * mapping types will be attempted in the order listed below until one of |
| 70 | * them succeeds. |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 71 | * |
Toshi Kani | 1c29f25 | 2016-01-26 21:57:28 +0100 | [diff] [blame] | 72 | * MEMREMAP_WB - matches the default mapping for System RAM on |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 73 | * the architecture. This is usually a read-allocate write-back cache. |
| 74 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM |
| 75 | * memremap() will bypass establishing a new mapping and instead return |
| 76 | * a pointer into the direct map. |
| 77 | * |
| 78 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the |
| 79 | * cache or are written through to memory and never exist in a |
| 80 | * cache-dirty state with respect to program visibility. Attempts to |
Toshi Kani | 1c29f25 | 2016-01-26 21:57:28 +0100 | [diff] [blame] | 81 | * map System RAM with this mapping type will fail. |
Brian Starkey | c907e0e | 2016-03-22 14:28:00 -0700 | [diff] [blame] | 82 | * |
| 83 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may |
| 84 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise |
| 85 | * uncached. Attempts to map System RAM with this mapping type will fail. |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 86 | */ |
| 87 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) |
| 88 | { |
Toshi Kani | 1c29f25 | 2016-01-26 21:57:28 +0100 | [diff] [blame] | 89 | int is_ram = region_intersects(offset, size, |
| 90 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 91 | void *addr = NULL; |
| 92 | |
Brian Starkey | cf61e2a | 2016-03-22 14:27:57 -0700 | [diff] [blame] | 93 | if (!flags) |
| 94 | return NULL; |
| 95 | |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 96 | if (is_ram == REGION_MIXED) { |
| 97 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", |
| 98 | &offset, (unsigned long) size); |
| 99 | return NULL; |
| 100 | } |
| 101 | |
| 102 | /* Try all mapping types requested until one returns non-NULL */ |
| 103 | if (flags & MEMREMAP_WB) { |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 104 | /* |
| 105 | * MEMREMAP_WB is special in that it can be satisifed |
| 106 | * from the direct map. Some archs depend on the |
| 107 | * capability of memremap() to autodetect cases where |
Toshi Kani | 1c29f25 | 2016-01-26 21:57:28 +0100 | [diff] [blame] | 108 | * the requested range is potentially in System RAM. |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 109 | */ |
| 110 | if (is_ram == REGION_INTERSECTS) |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 111 | addr = try_ram_remap(offset, size, flags); |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 112 | if (!addr) |
Ard Biesheuvel | c269cba | 2016-02-22 15:02:07 +0100 | [diff] [blame] | 113 | addr = arch_memremap_wb(offset, size); |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | /* |
Brian Starkey | cf61e2a | 2016-03-22 14:27:57 -0700 | [diff] [blame] | 117 | * If we don't have a mapping yet and other request flags are |
| 118 | * present then we will be attempting to establish a new virtual |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 119 | * address mapping. Enforce that this mapping is not aliasing |
Toshi Kani | 1c29f25 | 2016-01-26 21:57:28 +0100 | [diff] [blame] | 120 | * System RAM. |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 121 | */ |
Brian Starkey | cf61e2a | 2016-03-22 14:27:57 -0700 | [diff] [blame] | 122 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 123 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", |
| 124 | &offset, (unsigned long) size); |
| 125 | return NULL; |
| 126 | } |
| 127 | |
Brian Starkey | cf61e2a | 2016-03-22 14:27:57 -0700 | [diff] [blame] | 128 | if (!addr && (flags & MEMREMAP_WT)) |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 129 | addr = ioremap_wt(offset, size); |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 130 | |
Brian Starkey | c907e0e | 2016-03-22 14:28:00 -0700 | [diff] [blame] | 131 | if (!addr && (flags & MEMREMAP_WC)) |
| 132 | addr = ioremap_wc(offset, size); |
| 133 | |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 134 | return addr; |
| 135 | } |
| 136 | EXPORT_SYMBOL(memremap); |
| 137 | |
| 138 | void memunmap(void *addr) |
| 139 | { |
| 140 | if (is_vmalloc_addr(addr)) |
| 141 | iounmap((void __iomem *) addr); |
| 142 | } |
| 143 | EXPORT_SYMBOL(memunmap); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 144 | |
| 145 | static void devm_memremap_release(struct device *dev, void *res) |
| 146 | { |
Toshi Kani | 9273a8b | 2016-02-17 13:11:29 -0800 | [diff] [blame] | 147 | memunmap(*(void **)res); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) |
| 151 | { |
| 152 | return *(void **)res == match_data; |
| 153 | } |
| 154 | |
| 155 | void *devm_memremap(struct device *dev, resource_size_t offset, |
| 156 | size_t size, unsigned long flags) |
| 157 | { |
| 158 | void **ptr, *addr; |
| 159 | |
Dan Williams | 538ea4a | 2015-10-05 20:35:56 -0400 | [diff] [blame] | 160 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
| 161 | dev_to_node(dev)); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 162 | if (!ptr) |
Dan Williams | b36f476 | 2015-09-15 02:42:20 -0400 | [diff] [blame] | 163 | return ERR_PTR(-ENOMEM); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 164 | |
| 165 | addr = memremap(offset, size, flags); |
| 166 | if (addr) { |
| 167 | *ptr = addr; |
| 168 | devres_add(dev, ptr); |
Toshi Kani | 93f834d | 2016-02-20 14:32:24 -0800 | [diff] [blame] | 169 | } else { |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 170 | devres_free(ptr); |
Toshi Kani | 93f834d | 2016-02-20 14:32:24 -0800 | [diff] [blame] | 171 | return ERR_PTR(-ENXIO); |
| 172 | } |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 173 | |
| 174 | return addr; |
| 175 | } |
| 176 | EXPORT_SYMBOL(devm_memremap); |
| 177 | |
| 178 | void devm_memunmap(struct device *dev, void *addr) |
| 179 | { |
Dan Williams | d741314 | 2015-09-15 02:37:48 -0400 | [diff] [blame] | 180 | WARN_ON(devres_release(dev, devm_memremap_release, |
| 181 | devm_memremap_match, addr)); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 182 | } |
| 183 | EXPORT_SYMBOL(devm_memunmap); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 184 | |
| 185 | #ifdef CONFIG_ZONE_DEVICE |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 186 | static DEFINE_MUTEX(pgmap_lock); |
| 187 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); |
| 188 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) |
| 189 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) |
| 190 | |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 191 | struct page_map { |
| 192 | struct resource res; |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 193 | struct percpu_ref *ref; |
| 194 | struct dev_pagemap pgmap; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 195 | struct vmem_altmap altmap; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 196 | }; |
| 197 | |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 198 | static unsigned long order_at(struct resource *res, unsigned long pgoff) |
| 199 | { |
| 200 | unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; |
| 201 | unsigned long nr_pages, mask; |
| 202 | |
| 203 | nr_pages = PHYS_PFN(resource_size(res)); |
| 204 | if (nr_pages == pgoff) |
| 205 | return ULONG_MAX; |
| 206 | |
| 207 | /* |
| 208 | * What is the largest aligned power-of-2 range available from |
| 209 | * this resource pgoff to the end of the resource range, |
| 210 | * considering the alignment of the current pgoff? |
| 211 | */ |
| 212 | mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); |
| 213 | if (!mask) |
| 214 | return ULONG_MAX; |
| 215 | |
| 216 | return find_first_bit(&mask, BITS_PER_LONG); |
| 217 | } |
| 218 | |
| 219 | #define foreach_order_pgoff(res, order, pgoff) \ |
| 220 | for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ |
| 221 | pgoff += 1UL << order, order = order_at((res), pgoff)) |
| 222 | |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 223 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
| 224 | int device_private_entry_fault(struct vm_area_struct *vma, |
| 225 | unsigned long addr, |
| 226 | swp_entry_t entry, |
| 227 | unsigned int flags, |
| 228 | pmd_t *pmdp) |
| 229 | { |
| 230 | struct page *page = device_private_entry_to_page(entry); |
| 231 | |
| 232 | /* |
| 233 | * The page_fault() callback must migrate page back to system memory |
| 234 | * so that CPU can access it. This might fail for various reasons |
| 235 | * (device issue, device was unsafely unplugged, ...). When such |
| 236 | * error conditions happen, the callback must return VM_FAULT_SIGBUS. |
| 237 | * |
| 238 | * Note that because memory cgroup charges are accounted to the device |
| 239 | * memory, this should never fail because of memory restrictions (but |
| 240 | * allocation of regular system page might still fail because we are |
| 241 | * out of memory). |
| 242 | * |
| 243 | * There is a more in-depth description of what that callback can and |
| 244 | * cannot do, in include/linux/memremap.h |
| 245 | */ |
| 246 | return page->pgmap->page_fault(vma, addr, page, flags, pmdp); |
| 247 | } |
| 248 | EXPORT_SYMBOL(device_private_entry_fault); |
| 249 | #endif /* CONFIG_DEVICE_PRIVATE */ |
| 250 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 251 | static void pgmap_radix_release(struct resource *res) |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 252 | { |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 253 | unsigned long pgoff, order; |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 254 | |
| 255 | mutex_lock(&pgmap_lock); |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 256 | foreach_order_pgoff(res, order, pgoff) |
| 257 | radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 258 | mutex_unlock(&pgmap_lock); |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 259 | |
| 260 | synchronize_rcu(); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 261 | } |
| 262 | |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 263 | static unsigned long pfn_first(struct page_map *page_map) |
| 264 | { |
| 265 | struct dev_pagemap *pgmap = &page_map->pgmap; |
| 266 | const struct resource *res = &page_map->res; |
| 267 | struct vmem_altmap *altmap = pgmap->altmap; |
| 268 | unsigned long pfn; |
| 269 | |
| 270 | pfn = res->start >> PAGE_SHIFT; |
| 271 | if (altmap) |
| 272 | pfn += vmem_altmap_offset(altmap); |
| 273 | return pfn; |
| 274 | } |
| 275 | |
| 276 | static unsigned long pfn_end(struct page_map *page_map) |
| 277 | { |
| 278 | const struct resource *res = &page_map->res; |
| 279 | |
| 280 | return (res->start + resource_size(res)) >> PAGE_SHIFT; |
| 281 | } |
| 282 | |
| 283 | #define for_each_device_pfn(pfn, map) \ |
| 284 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) |
| 285 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 286 | static void devm_memremap_pages_release(struct device *dev, void *data) |
| 287 | { |
| 288 | struct page_map *page_map = data; |
| 289 | struct resource *res = &page_map->res; |
| 290 | resource_size_t align_start, align_size; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 291 | struct dev_pagemap *pgmap = &page_map->pgmap; |
Dan Williams | 7138970 | 2017-04-28 10:23:37 -0700 | [diff] [blame] | 292 | unsigned long pfn; |
| 293 | |
| 294 | for_each_device_pfn(pfn, page_map) |
| 295 | put_page(pfn_to_page(pfn)); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 296 | |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 297 | if (percpu_ref_tryget_live(pgmap->ref)) { |
| 298 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); |
| 299 | percpu_ref_put(pgmap->ref); |
| 300 | } |
| 301 | |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 302 | /* pages are dead and unused, undo the arch mapping */ |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 303 | align_start = res->start & ~(SECTION_SIZE - 1); |
| 304 | align_size = ALIGN(resource_size(res), SECTION_SIZE); |
Dan Williams | b5d24fd | 2017-02-24 14:55:45 -0800 | [diff] [blame] | 305 | |
Dan Williams | f931ab4 | 2017-01-10 16:57:36 -0800 | [diff] [blame] | 306 | mem_hotplug_begin(); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 307 | arch_remove_memory(align_start, align_size); |
Dan Williams | f931ab4 | 2017-01-10 16:57:36 -0800 | [diff] [blame] | 308 | mem_hotplug_done(); |
Dan Williams | b5d24fd | 2017-02-24 14:55:45 -0800 | [diff] [blame] | 309 | |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 310 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
Dan Williams | eb7d78c | 2016-01-29 21:48:34 -0800 | [diff] [blame] | 311 | pgmap_radix_release(res); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 312 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, |
| 313 | "%s: failed to free all reserved pages\n", __func__); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | /* assumes rcu_read_lock() held at entry */ |
| 317 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys) |
| 318 | { |
| 319 | struct page_map *page_map; |
| 320 | |
| 321 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 322 | |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 323 | page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 324 | return page_map ? &page_map->pgmap : NULL; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 325 | } |
| 326 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 327 | /** |
| 328 | * devm_memremap_pages - remap and provide memmap backing for the given resource |
| 329 | * @dev: hosting device for @res |
| 330 | * @res: "host memory" address range |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 331 | * @ref: a live per-cpu reference count |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 332 | * @altmap: optional descriptor for allocating the memmap from @res |
| 333 | * |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 334 | * Notes: |
| 335 | * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time |
Dan Williams | 7138970 | 2017-04-28 10:23:37 -0700 | [diff] [blame] | 336 | * (or devm release event). The expected order of events is that @ref has |
| 337 | * been through percpu_ref_kill() before devm_memremap_pages_release(). The |
| 338 | * wait for the completion of all references being dropped and |
| 339 | * percpu_ref_exit() must occur after devm_memremap_pages_release(). |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 340 | * |
| 341 | * 2/ @res is expected to be a host memory range that could feasibly be |
| 342 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
| 343 | * this is not enforced. |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 344 | */ |
| 345 | void *devm_memremap_pages(struct device *dev, struct resource *res, |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 346 | struct percpu_ref *ref, struct vmem_altmap *altmap) |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 347 | { |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 348 | resource_size_t align_start, align_size, align_end; |
| 349 | unsigned long pfn, pgoff, order; |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 350 | pgprot_t pgprot = PAGE_KERNEL; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 351 | struct dev_pagemap *pgmap; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 352 | struct page_map *page_map; |
Dan Williams | 5f29a77 | 2016-03-09 14:08:13 -0800 | [diff] [blame] | 353 | int error, nid, is_ram; |
Dan Williams | 5f29a77 | 2016-03-09 14:08:13 -0800 | [diff] [blame] | 354 | |
| 355 | align_start = res->start & ~(SECTION_SIZE - 1); |
| 356 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
| 357 | - align_start; |
Linus Torvalds | d37a14bb | 2016-03-14 15:15:51 -0700 | [diff] [blame] | 358 | is_ram = region_intersects(align_start, align_size, |
| 359 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 360 | |
| 361 | if (is_ram == REGION_MIXED) { |
| 362 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", |
| 363 | __func__, res); |
| 364 | return ERR_PTR(-ENXIO); |
| 365 | } |
| 366 | |
| 367 | if (is_ram == REGION_INTERSECTS) |
| 368 | return __va(res->start); |
| 369 | |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 370 | if (!ref) |
| 371 | return ERR_PTR(-EINVAL); |
| 372 | |
Dan Williams | 538ea4a | 2015-10-05 20:35:56 -0400 | [diff] [blame] | 373 | page_map = devres_alloc_node(devm_memremap_pages_release, |
| 374 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 375 | if (!page_map) |
| 376 | return ERR_PTR(-ENOMEM); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 377 | pgmap = &page_map->pgmap; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 378 | |
| 379 | memcpy(&page_map->res, res, sizeof(*res)); |
| 380 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 381 | pgmap->dev = dev; |
| 382 | if (altmap) { |
| 383 | memcpy(&page_map->altmap, altmap, sizeof(*altmap)); |
| 384 | pgmap->altmap = &page_map->altmap; |
| 385 | } |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 386 | pgmap->ref = ref; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 387 | pgmap->res = &page_map->res; |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 388 | pgmap->type = MEMORY_DEVICE_HOST; |
| 389 | pgmap->page_fault = NULL; |
| 390 | pgmap->page_free = NULL; |
| 391 | pgmap->data = NULL; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 392 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 393 | mutex_lock(&pgmap_lock); |
| 394 | error = 0; |
Dan Williams | eb7d78c | 2016-01-29 21:48:34 -0800 | [diff] [blame] | 395 | align_end = align_start + align_size - 1; |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 396 | |
| 397 | foreach_order_pgoff(res, order, pgoff) { |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 398 | struct dev_pagemap *dup; |
| 399 | |
| 400 | rcu_read_lock(); |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 401 | dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff)); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 402 | rcu_read_unlock(); |
| 403 | if (dup) { |
| 404 | dev_err(dev, "%s: %pr collides with mapping for %s\n", |
| 405 | __func__, res, dev_name(dup->dev)); |
| 406 | error = -EBUSY; |
| 407 | break; |
| 408 | } |
Dan Williams | ab1b597 | 2017-09-06 16:24:13 -0700 | [diff] [blame] | 409 | error = __radix_tree_insert(&pgmap_radix, |
| 410 | PHYS_PFN(res->start) + pgoff, order, page_map); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 411 | if (error) { |
| 412 | dev_err(dev, "%s: failed: %d\n", __func__, error); |
| 413 | break; |
| 414 | } |
| 415 | } |
| 416 | mutex_unlock(&pgmap_lock); |
| 417 | if (error) |
| 418 | goto err_radix; |
| 419 | |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 420 | nid = dev_to_node(dev); |
| 421 | if (nid < 0) |
Dan Williams | 7eff93b | 2015-10-05 20:35:55 -0400 | [diff] [blame] | 422 | nid = numa_mem_id(); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 423 | |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 424 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, |
| 425 | align_size); |
| 426 | if (error) |
| 427 | goto err_pfn_remap; |
| 428 | |
Dan Williams | f931ab4 | 2017-01-10 16:57:36 -0800 | [diff] [blame] | 429 | mem_hotplug_begin(); |
Michal Hocko | 3d79a72 | 2017-07-06 15:38:21 -0700 | [diff] [blame] | 430 | error = arch_add_memory(nid, align_start, align_size, false); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 431 | if (!error) |
| 432 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], |
| 433 | align_start >> PAGE_SHIFT, |
| 434 | align_size >> PAGE_SHIFT); |
Dan Williams | f931ab4 | 2017-01-10 16:57:36 -0800 | [diff] [blame] | 435 | mem_hotplug_done(); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 436 | if (error) |
| 437 | goto err_add_memory; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 438 | |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 439 | for_each_device_pfn(pfn, page_map) { |
| 440 | struct page *page = pfn_to_page(pfn); |
| 441 | |
Dan Williams | d77a117 | 2016-03-09 14:08:10 -0800 | [diff] [blame] | 442 | /* |
| 443 | * ZONE_DEVICE pages union ->lru with a ->pgmap back |
| 444 | * pointer. It is a bug if a ZONE_DEVICE page is ever |
| 445 | * freed or placed on a driver-private list. Seed the |
| 446 | * storage with LIST_POISON* values. |
| 447 | */ |
| 448 | list_del(&page->lru); |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 449 | page->pgmap = pgmap; |
Dan Williams | 7138970 | 2017-04-28 10:23:37 -0700 | [diff] [blame] | 450 | percpu_ref_get(ref); |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 451 | } |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 452 | devres_add(dev, page_map); |
| 453 | return __va(res->start); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 454 | |
| 455 | err_add_memory: |
Dan Williams | 9049771 | 2016-09-07 08:51:21 -0700 | [diff] [blame] | 456 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
| 457 | err_pfn_remap: |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 458 | err_radix: |
| 459 | pgmap_radix_release(res); |
| 460 | devres_free(page_map); |
| 461 | return ERR_PTR(error); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 462 | } |
| 463 | EXPORT_SYMBOL(devm_memremap_pages); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 464 | |
| 465 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
| 466 | { |
| 467 | /* number of pfns from base where pfn_to_page() is valid */ |
| 468 | return altmap->reserve + altmap->free; |
| 469 | } |
| 470 | |
| 471 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) |
| 472 | { |
| 473 | altmap->alloc -= nr_pfns; |
| 474 | } |
| 475 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 476 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) |
| 477 | { |
| 478 | /* |
| 479 | * 'memmap_start' is the virtual address for the first "struct |
| 480 | * page" in this range of the vmemmap array. In the case of |
Andreas Ziegler | 07061aa | 2016-03-15 14:55:33 -0700 | [diff] [blame] | 481 | * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 482 | * pointer arithmetic, so we can perform this to_vmem_altmap() |
| 483 | * conversion without concern for the initialization state of |
| 484 | * the struct page fields. |
| 485 | */ |
| 486 | struct page *page = (struct page *) memmap_start; |
| 487 | struct dev_pagemap *pgmap; |
| 488 | |
| 489 | /* |
Andreas Ziegler | 07061aa | 2016-03-15 14:55:33 -0700 | [diff] [blame] | 490 | * Unconditionally retrieve a dev_pagemap associated with the |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 491 | * given physical address, this is only for use in the |
| 492 | * arch_{add|remove}_memory() for setting up and tearing down |
| 493 | * the memmap. |
| 494 | */ |
| 495 | rcu_read_lock(); |
| 496 | pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); |
| 497 | rcu_read_unlock(); |
| 498 | |
| 499 | return pgmap ? pgmap->altmap : NULL; |
| 500 | } |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 501 | #endif /* CONFIG_ZONE_DEVICE */ |
Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 502 | |
| 503 | |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 504 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
| 505 | void put_zone_device_private_or_public_page(struct page *page) |
Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 506 | { |
| 507 | int count = page_ref_dec_return(page); |
| 508 | |
| 509 | /* |
| 510 | * If refcount is 1 then page is freed and refcount is stable as nobody |
| 511 | * holds a reference on the page. |
| 512 | */ |
| 513 | if (count == 1) { |
| 514 | /* Clear Active bit in case of parallel mark_page_accessed */ |
| 515 | __ClearPageActive(page); |
| 516 | __ClearPageWaiters(page); |
| 517 | |
| 518 | page->mapping = NULL; |
Jérôme Glisse | c733a82 | 2017-09-08 16:11:54 -0700 | [diff] [blame] | 519 | mem_cgroup_uncharge(page); |
Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 520 | |
| 521 | page->pgmap->page_free(page, page->pgmap->data); |
| 522 | } else if (!count) |
| 523 | __put_page(page); |
| 524 | } |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 525 | EXPORT_SYMBOL(put_zone_device_private_or_public_page); |
| 526 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |