blob: 293309cac061cf9a83e99075537720ae3e4c2772 [file] [log] [blame]
Dan Williams92281dee2015-08-10 23:07:06 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williams9476df72016-01-15 16:56:19 -080013#include <linux/radix-tree.h>
14#include <linux/memremap.h>
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -040015#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -040016#include <linux/types.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080017#include <linux/pfn_t.h>
Dan Williams92281dee2015-08-10 23:07:06 -040018#include <linux/io.h>
19#include <linux/mm.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +020020#include <linux/memory_hotplug.h>
Dan Williams92281dee2015-08-10 23:07:06 -040021
22#ifndef ioremap_cache
23/* temporary while we convert existing ioremap_cache users to memremap */
24__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
25{
26 return ioremap(offset, size);
27}
28#endif
29
Dan Williams182475b2015-10-26 16:55:56 -040030static void *try_ram_remap(resource_size_t offset, size_t size)
31{
32 struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
33
34 /* In the simple case just return the existing linear address */
35 if (!PageHighMem(page))
36 return __va(offset);
37 return NULL; /* fallback to ioremap_cache */
38}
39
Dan Williams92281dee2015-08-10 23:07:06 -040040/**
41 * memremap() - remap an iomem_resource as cacheable memory
42 * @offset: iomem resource start address
43 * @size: size of remap
44 * @flags: either MEMREMAP_WB or MEMREMAP_WT
45 *
46 * memremap() is "ioremap" for cases where it is known that the resource
47 * being mapped does not have i/o side effects and the __iomem
48 * annotation is not applicable.
49 *
Toshi Kani1c29f252016-01-26 21:57:28 +010050 * MEMREMAP_WB - matches the default mapping for System RAM on
Dan Williams92281dee2015-08-10 23:07:06 -040051 * the architecture. This is usually a read-allocate write-back cache.
52 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
53 * memremap() will bypass establishing a new mapping and instead return
54 * a pointer into the direct map.
55 *
56 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
57 * cache or are written through to memory and never exist in a
58 * cache-dirty state with respect to program visibility. Attempts to
Toshi Kani1c29f252016-01-26 21:57:28 +010059 * map System RAM with this mapping type will fail.
Dan Williams92281dee2015-08-10 23:07:06 -040060 */
61void *memremap(resource_size_t offset, size_t size, unsigned long flags)
62{
Toshi Kani1c29f252016-01-26 21:57:28 +010063 int is_ram = region_intersects(offset, size,
64 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Dan Williams92281dee2015-08-10 23:07:06 -040065 void *addr = NULL;
66
67 if (is_ram == REGION_MIXED) {
68 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
69 &offset, (unsigned long) size);
70 return NULL;
71 }
72
73 /* Try all mapping types requested until one returns non-NULL */
74 if (flags & MEMREMAP_WB) {
75 flags &= ~MEMREMAP_WB;
76 /*
77 * MEMREMAP_WB is special in that it can be satisifed
78 * from the direct map. Some archs depend on the
79 * capability of memremap() to autodetect cases where
Toshi Kani1c29f252016-01-26 21:57:28 +010080 * the requested range is potentially in System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -040081 */
82 if (is_ram == REGION_INTERSECTS)
Dan Williams182475b2015-10-26 16:55:56 -040083 addr = try_ram_remap(offset, size);
84 if (!addr)
Dan Williams92281dee2015-08-10 23:07:06 -040085 addr = ioremap_cache(offset, size);
86 }
87
88 /*
89 * If we don't have a mapping yet and more request flags are
90 * pending then we will be attempting to establish a new virtual
91 * address mapping. Enforce that this mapping is not aliasing
Toshi Kani1c29f252016-01-26 21:57:28 +010092 * System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -040093 */
94 if (!addr && is_ram == REGION_INTERSECTS && flags) {
95 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
96 &offset, (unsigned long) size);
97 return NULL;
98 }
99
100 if (!addr && (flags & MEMREMAP_WT)) {
101 flags &= ~MEMREMAP_WT;
102 addr = ioremap_wt(offset, size);
103 }
104
105 return addr;
106}
107EXPORT_SYMBOL(memremap);
108
109void memunmap(void *addr)
110{
111 if (is_vmalloc_addr(addr))
112 iounmap((void __iomem *) addr);
113}
114EXPORT_SYMBOL(memunmap);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400115
116static void devm_memremap_release(struct device *dev, void *res)
117{
118 memunmap(res);
119}
120
121static int devm_memremap_match(struct device *dev, void *res, void *match_data)
122{
123 return *(void **)res == match_data;
124}
125
126void *devm_memremap(struct device *dev, resource_size_t offset,
127 size_t size, unsigned long flags)
128{
129 void **ptr, *addr;
130
Dan Williams538ea4a2015-10-05 20:35:56 -0400131 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
132 dev_to_node(dev));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400133 if (!ptr)
Dan Williamsb36f4762015-09-15 02:42:20 -0400134 return ERR_PTR(-ENOMEM);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400135
136 addr = memremap(offset, size, flags);
137 if (addr) {
138 *ptr = addr;
139 devres_add(dev, ptr);
140 } else
141 devres_free(ptr);
142
143 return addr;
144}
145EXPORT_SYMBOL(devm_memremap);
146
147void devm_memunmap(struct device *dev, void *addr)
148{
Dan Williamsd7413142015-09-15 02:37:48 -0400149 WARN_ON(devres_release(dev, devm_memremap_release,
150 devm_memremap_match, addr));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400151}
152EXPORT_SYMBOL(devm_memunmap);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200153
Dan Williams34c0fd52016-01-15 16:56:14 -0800154pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
155{
156 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
157}
158EXPORT_SYMBOL(phys_to_pfn_t);
159
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200160#ifdef CONFIG_ZONE_DEVICE
Dan Williams9476df72016-01-15 16:56:19 -0800161static DEFINE_MUTEX(pgmap_lock);
162static RADIX_TREE(pgmap_radix, GFP_KERNEL);
163#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
164#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
165
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200166struct page_map {
167 struct resource res;
Dan Williams9476df72016-01-15 16:56:19 -0800168 struct percpu_ref *ref;
169 struct dev_pagemap pgmap;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800170 struct vmem_altmap altmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200171};
172
Dan Williams3565fce2016-01-15 16:56:55 -0800173void get_zone_device_page(struct page *page)
174{
175 percpu_ref_get(page->pgmap->ref);
176}
177EXPORT_SYMBOL(get_zone_device_page);
178
179void put_zone_device_page(struct page *page)
180{
181 put_dev_pagemap(page->pgmap);
182}
183EXPORT_SYMBOL(put_zone_device_page);
184
Dan Williams9476df72016-01-15 16:56:19 -0800185static void pgmap_radix_release(struct resource *res)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200186{
Dan Williams9476df72016-01-15 16:56:19 -0800187 resource_size_t key;
188
189 mutex_lock(&pgmap_lock);
190 for (key = res->start; key <= res->end; key += SECTION_SIZE)
191 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
192 mutex_unlock(&pgmap_lock);
193}
194
Dan Williams5c2c2582016-01-15 16:56:49 -0800195static unsigned long pfn_first(struct page_map *page_map)
196{
197 struct dev_pagemap *pgmap = &page_map->pgmap;
198 const struct resource *res = &page_map->res;
199 struct vmem_altmap *altmap = pgmap->altmap;
200 unsigned long pfn;
201
202 pfn = res->start >> PAGE_SHIFT;
203 if (altmap)
204 pfn += vmem_altmap_offset(altmap);
205 return pfn;
206}
207
208static unsigned long pfn_end(struct page_map *page_map)
209{
210 const struct resource *res = &page_map->res;
211
212 return (res->start + resource_size(res)) >> PAGE_SHIFT;
213}
214
215#define for_each_device_pfn(pfn, map) \
216 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
217
Dan Williams9476df72016-01-15 16:56:19 -0800218static void devm_memremap_pages_release(struct device *dev, void *data)
219{
220 struct page_map *page_map = data;
221 struct resource *res = &page_map->res;
222 resource_size_t align_start, align_size;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800223 struct dev_pagemap *pgmap = &page_map->pgmap;
Dan Williams9476df72016-01-15 16:56:19 -0800224
Dan Williams5c2c2582016-01-15 16:56:49 -0800225 if (percpu_ref_tryget_live(pgmap->ref)) {
226 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
227 percpu_ref_put(pgmap->ref);
228 }
229
Dan Williams9476df72016-01-15 16:56:19 -0800230 pgmap_radix_release(res);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200231
232 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800233 align_start = res->start & ~(SECTION_SIZE - 1);
234 align_size = ALIGN(resource_size(res), SECTION_SIZE);
235 arch_remove_memory(align_start, align_size);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800236 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
237 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800238}
239
240/* assumes rcu_read_lock() held at entry */
241struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
242{
243 struct page_map *page_map;
244
245 WARN_ON_ONCE(!rcu_read_lock_held());
246
247 page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
248 return page_map ? &page_map->pgmap : NULL;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200249}
250
Dan Williams4b94ffd2016-01-15 16:56:22 -0800251/**
252 * devm_memremap_pages - remap and provide memmap backing for the given resource
253 * @dev: hosting device for @res
254 * @res: "host memory" address range
Dan Williams5c2c2582016-01-15 16:56:49 -0800255 * @ref: a live per-cpu reference count
Dan Williams4b94ffd2016-01-15 16:56:22 -0800256 * @altmap: optional descriptor for allocating the memmap from @res
257 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800258 * Notes:
259 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
260 * (or devm release event).
261 *
262 * 2/ @res is expected to be a host memory range that could feasibly be
263 * treated as a "System RAM" range, i.e. not a device mmio range, but
264 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800265 */
266void *devm_memremap_pages(struct device *dev, struct resource *res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800267 struct percpu_ref *ref, struct vmem_altmap *altmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200268{
269 int is_ram = region_intersects(res->start, resource_size(res),
Toshi Kani1c29f252016-01-26 21:57:28 +0100270 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Dan Williams9476df72016-01-15 16:56:19 -0800271 resource_size_t key, align_start, align_size;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800272 struct dev_pagemap *pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200273 struct page_map *page_map;
Dan Williams5c2c2582016-01-15 16:56:49 -0800274 unsigned long pfn;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200275 int error, nid;
276
277 if (is_ram == REGION_MIXED) {
278 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
279 __func__, res);
280 return ERR_PTR(-ENXIO);
281 }
282
283 if (is_ram == REGION_INTERSECTS)
284 return __va(res->start);
285
Dan Williams4b94ffd2016-01-15 16:56:22 -0800286 if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) {
287 dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
288 __func__);
289 return ERR_PTR(-ENXIO);
290 }
291
Dan Williams5c2c2582016-01-15 16:56:49 -0800292 if (!ref)
293 return ERR_PTR(-EINVAL);
294
Dan Williams538ea4a2015-10-05 20:35:56 -0400295 page_map = devres_alloc_node(devm_memremap_pages_release,
296 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200297 if (!page_map)
298 return ERR_PTR(-ENOMEM);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800299 pgmap = &page_map->pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200300
301 memcpy(&page_map->res, res, sizeof(*res));
302
Dan Williams4b94ffd2016-01-15 16:56:22 -0800303 pgmap->dev = dev;
304 if (altmap) {
305 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
306 pgmap->altmap = &page_map->altmap;
307 }
Dan Williams5c2c2582016-01-15 16:56:49 -0800308 pgmap->ref = ref;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800309 pgmap->res = &page_map->res;
310
Dan Williams9476df72016-01-15 16:56:19 -0800311 mutex_lock(&pgmap_lock);
312 error = 0;
313 for (key = res->start; key <= res->end; key += SECTION_SIZE) {
314 struct dev_pagemap *dup;
315
316 rcu_read_lock();
317 dup = find_dev_pagemap(key);
318 rcu_read_unlock();
319 if (dup) {
320 dev_err(dev, "%s: %pr collides with mapping for %s\n",
321 __func__, res, dev_name(dup->dev));
322 error = -EBUSY;
323 break;
324 }
325 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
326 page_map);
327 if (error) {
328 dev_err(dev, "%s: failed: %d\n", __func__, error);
329 break;
330 }
331 }
332 mutex_unlock(&pgmap_lock);
333 if (error)
334 goto err_radix;
335
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200336 nid = dev_to_node(dev);
337 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400338 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200339
Dan Williams9476df72016-01-15 16:56:19 -0800340 align_start = res->start & ~(SECTION_SIZE - 1);
341 align_size = ALIGN(resource_size(res), SECTION_SIZE);
342 error = arch_add_memory(nid, align_start, align_size, true);
343 if (error)
344 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200345
Dan Williams5c2c2582016-01-15 16:56:49 -0800346 for_each_device_pfn(pfn, page_map) {
347 struct page *page = pfn_to_page(pfn);
348
349 /* ZONE_DEVICE pages must never appear on a slab lru */
350 list_force_poison(&page->lru);
351 page->pgmap = pgmap;
352 }
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200353 devres_add(dev, page_map);
354 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800355
356 err_add_memory:
357 err_radix:
358 pgmap_radix_release(res);
359 devres_free(page_map);
360 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200361}
362EXPORT_SYMBOL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800363
364unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
365{
366 /* number of pfns from base where pfn_to_page() is valid */
367 return altmap->reserve + altmap->free;
368}
369
370void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
371{
372 altmap->alloc -= nr_pfns;
373}
374
375#ifdef CONFIG_SPARSEMEM_VMEMMAP
376struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
377{
378 /*
379 * 'memmap_start' is the virtual address for the first "struct
380 * page" in this range of the vmemmap array. In the case of
381 * CONFIG_SPARSE_VMEMMAP a page_to_pfn conversion is simple
382 * pointer arithmetic, so we can perform this to_vmem_altmap()
383 * conversion without concern for the initialization state of
384 * the struct page fields.
385 */
386 struct page *page = (struct page *) memmap_start;
387 struct dev_pagemap *pgmap;
388
389 /*
390 * Uncoditionally retrieve a dev_pagemap associated with the
391 * given physical address, this is only for use in the
392 * arch_{add|remove}_memory() for setting up and tearing down
393 * the memmap.
394 */
395 rcu_read_lock();
396 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
397 rcu_read_unlock();
398
399 return pgmap ? pgmap->altmap : NULL;
400}
401#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200402#endif /* CONFIG_ZONE_DEVICE */