blob: 23a6483c3666e35fec7f927d2f6cd4a2e38dbe81 [file] [log] [blame]
Dan Williams92281dee2015-08-10 23:07:06 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williams9476df72016-01-15 16:56:19 -080013#include <linux/radix-tree.h>
14#include <linux/memremap.h>
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -040015#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -040016#include <linux/types.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080017#include <linux/pfn_t.h>
Dan Williams92281dee2015-08-10 23:07:06 -040018#include <linux/io.h>
19#include <linux/mm.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +020020#include <linux/memory_hotplug.h>
Dan Williams92281dee2015-08-10 23:07:06 -040021
22#ifndef ioremap_cache
23/* temporary while we convert existing ioremap_cache users to memremap */
24__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
25{
26 return ioremap(offset, size);
27}
28#endif
29
Ard Biesheuvelc269cba2016-02-22 15:02:07 +010030#ifndef arch_memremap_wb
31static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
32{
33 return (__force void *)ioremap_cache(offset, size);
34}
35#endif
36
Dan Williams182475b2015-10-26 16:55:56 -040037static void *try_ram_remap(resource_size_t offset, size_t size)
38{
Ard Biesheuvelac343e82016-03-09 14:08:32 -080039 unsigned long pfn = PHYS_PFN(offset);
Dan Williams182475b2015-10-26 16:55:56 -040040
41 /* In the simple case just return the existing linear address */
Ard Biesheuvelac343e82016-03-09 14:08:32 -080042 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
Dan Williams182475b2015-10-26 16:55:56 -040043 return __va(offset);
Ard Biesheuvelc269cba2016-02-22 15:02:07 +010044 return NULL; /* fallback to arch_memremap_wb */
Dan Williams182475b2015-10-26 16:55:56 -040045}
46
Dan Williams92281dee2015-08-10 23:07:06 -040047/**
48 * memremap() - remap an iomem_resource as cacheable memory
49 * @offset: iomem resource start address
50 * @size: size of remap
Brian Starkeyc907e0e2016-03-22 14:28:00 -070051 * @flags: any of MEMREMAP_WB, MEMREMAP_WT and MEMREMAP_WC
Dan Williams92281dee2015-08-10 23:07:06 -040052 *
53 * memremap() is "ioremap" for cases where it is known that the resource
54 * being mapped does not have i/o side effects and the __iomem
Brian Starkeyc907e0e2016-03-22 14:28:00 -070055 * annotation is not applicable. In the case of multiple flags, the different
56 * mapping types will be attempted in the order listed below until one of
57 * them succeeds.
Dan Williams92281dee2015-08-10 23:07:06 -040058 *
Toshi Kani1c29f252016-01-26 21:57:28 +010059 * MEMREMAP_WB - matches the default mapping for System RAM on
Dan Williams92281dee2015-08-10 23:07:06 -040060 * the architecture. This is usually a read-allocate write-back cache.
61 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
62 * memremap() will bypass establishing a new mapping and instead return
63 * a pointer into the direct map.
64 *
65 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
66 * cache or are written through to memory and never exist in a
67 * cache-dirty state with respect to program visibility. Attempts to
Toshi Kani1c29f252016-01-26 21:57:28 +010068 * map System RAM with this mapping type will fail.
Brian Starkeyc907e0e2016-03-22 14:28:00 -070069 *
70 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
71 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
72 * uncached. Attempts to map System RAM with this mapping type will fail.
Dan Williams92281dee2015-08-10 23:07:06 -040073 */
74void *memremap(resource_size_t offset, size_t size, unsigned long flags)
75{
Toshi Kani1c29f252016-01-26 21:57:28 +010076 int is_ram = region_intersects(offset, size,
77 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Dan Williams92281dee2015-08-10 23:07:06 -040078 void *addr = NULL;
79
Brian Starkeycf61e2a2016-03-22 14:27:57 -070080 if (!flags)
81 return NULL;
82
Dan Williams92281dee2015-08-10 23:07:06 -040083 if (is_ram == REGION_MIXED) {
84 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
85 &offset, (unsigned long) size);
86 return NULL;
87 }
88
89 /* Try all mapping types requested until one returns non-NULL */
90 if (flags & MEMREMAP_WB) {
Dan Williams92281dee2015-08-10 23:07:06 -040091 /*
92 * MEMREMAP_WB is special in that it can be satisifed
93 * from the direct map. Some archs depend on the
94 * capability of memremap() to autodetect cases where
Toshi Kani1c29f252016-01-26 21:57:28 +010095 * the requested range is potentially in System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -040096 */
97 if (is_ram == REGION_INTERSECTS)
Dan Williams182475b2015-10-26 16:55:56 -040098 addr = try_ram_remap(offset, size);
99 if (!addr)
Ard Biesheuvelc269cba2016-02-22 15:02:07 +0100100 addr = arch_memremap_wb(offset, size);
Dan Williams92281dee2015-08-10 23:07:06 -0400101 }
102
103 /*
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700104 * If we don't have a mapping yet and other request flags are
105 * present then we will be attempting to establish a new virtual
Dan Williams92281dee2015-08-10 23:07:06 -0400106 * address mapping. Enforce that this mapping is not aliasing
Toshi Kani1c29f252016-01-26 21:57:28 +0100107 * System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -0400108 */
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700109 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
Dan Williams92281dee2015-08-10 23:07:06 -0400110 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
111 &offset, (unsigned long) size);
112 return NULL;
113 }
114
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700115 if (!addr && (flags & MEMREMAP_WT))
Dan Williams92281dee2015-08-10 23:07:06 -0400116 addr = ioremap_wt(offset, size);
Dan Williams92281dee2015-08-10 23:07:06 -0400117
Brian Starkeyc907e0e2016-03-22 14:28:00 -0700118 if (!addr && (flags & MEMREMAP_WC))
119 addr = ioremap_wc(offset, size);
120
Dan Williams92281dee2015-08-10 23:07:06 -0400121 return addr;
122}
123EXPORT_SYMBOL(memremap);
124
125void memunmap(void *addr)
126{
127 if (is_vmalloc_addr(addr))
128 iounmap((void __iomem *) addr);
129}
130EXPORT_SYMBOL(memunmap);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400131
132static void devm_memremap_release(struct device *dev, void *res)
133{
Toshi Kani9273a8b2016-02-17 13:11:29 -0800134 memunmap(*(void **)res);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400135}
136
137static int devm_memremap_match(struct device *dev, void *res, void *match_data)
138{
139 return *(void **)res == match_data;
140}
141
142void *devm_memremap(struct device *dev, resource_size_t offset,
143 size_t size, unsigned long flags)
144{
145 void **ptr, *addr;
146
Dan Williams538ea4a2015-10-05 20:35:56 -0400147 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
148 dev_to_node(dev));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400149 if (!ptr)
Dan Williamsb36f4762015-09-15 02:42:20 -0400150 return ERR_PTR(-ENOMEM);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400151
152 addr = memremap(offset, size, flags);
153 if (addr) {
154 *ptr = addr;
155 devres_add(dev, ptr);
Toshi Kani93f834d2016-02-20 14:32:24 -0800156 } else {
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400157 devres_free(ptr);
Toshi Kani93f834d2016-02-20 14:32:24 -0800158 return ERR_PTR(-ENXIO);
159 }
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400160
161 return addr;
162}
163EXPORT_SYMBOL(devm_memremap);
164
165void devm_memunmap(struct device *dev, void *addr)
166{
Dan Williamsd7413142015-09-15 02:37:48 -0400167 WARN_ON(devres_release(dev, devm_memremap_release,
168 devm_memremap_match, addr));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400169}
170EXPORT_SYMBOL(devm_memunmap);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200171
172#ifdef CONFIG_ZONE_DEVICE
Dan Williams9476df72016-01-15 16:56:19 -0800173static DEFINE_MUTEX(pgmap_lock);
174static RADIX_TREE(pgmap_radix, GFP_KERNEL);
175#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
176#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
177
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200178struct page_map {
179 struct resource res;
Dan Williams9476df72016-01-15 16:56:19 -0800180 struct percpu_ref *ref;
181 struct dev_pagemap pgmap;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800182 struct vmem_altmap altmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200183};
184
Dan Williams9476df72016-01-15 16:56:19 -0800185static void pgmap_radix_release(struct resource *res)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200186{
Dan Williamseb7d78c2016-01-29 21:48:34 -0800187 resource_size_t key, align_start, align_size, align_end;
188
189 align_start = res->start & ~(SECTION_SIZE - 1);
190 align_size = ALIGN(resource_size(res), SECTION_SIZE);
191 align_end = align_start + align_size - 1;
Dan Williams9476df72016-01-15 16:56:19 -0800192
193 mutex_lock(&pgmap_lock);
194 for (key = res->start; key <= res->end; key += SECTION_SIZE)
195 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
196 mutex_unlock(&pgmap_lock);
197}
198
Dan Williams5c2c2582016-01-15 16:56:49 -0800199static unsigned long pfn_first(struct page_map *page_map)
200{
201 struct dev_pagemap *pgmap = &page_map->pgmap;
202 const struct resource *res = &page_map->res;
203 struct vmem_altmap *altmap = pgmap->altmap;
204 unsigned long pfn;
205
206 pfn = res->start >> PAGE_SHIFT;
207 if (altmap)
208 pfn += vmem_altmap_offset(altmap);
209 return pfn;
210}
211
212static unsigned long pfn_end(struct page_map *page_map)
213{
214 const struct resource *res = &page_map->res;
215
216 return (res->start + resource_size(res)) >> PAGE_SHIFT;
217}
218
219#define for_each_device_pfn(pfn, map) \
220 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
221
Dan Williams9476df72016-01-15 16:56:19 -0800222static void devm_memremap_pages_release(struct device *dev, void *data)
223{
224 struct page_map *page_map = data;
225 struct resource *res = &page_map->res;
226 resource_size_t align_start, align_size;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800227 struct dev_pagemap *pgmap = &page_map->pgmap;
Dan Williams71389702017-04-28 10:23:37 -0700228 unsigned long pfn;
229
230 for_each_device_pfn(pfn, page_map)
231 put_page(pfn_to_page(pfn));
Dan Williams9476df72016-01-15 16:56:19 -0800232
Dan Williams5c2c2582016-01-15 16:56:49 -0800233 if (percpu_ref_tryget_live(pgmap->ref)) {
234 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
235 percpu_ref_put(pgmap->ref);
236 }
237
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200238 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800239 align_start = res->start & ~(SECTION_SIZE - 1);
240 align_size = ALIGN(resource_size(res), SECTION_SIZE);
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800241
Dan Williamsf931ab42017-01-10 16:57:36 -0800242 mem_hotplug_begin();
Dan Williams9476df72016-01-15 16:56:19 -0800243 arch_remove_memory(align_start, align_size);
Dan Williamsf931ab42017-01-10 16:57:36 -0800244 mem_hotplug_done();
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800245
Dan Williams90497712016-09-07 08:51:21 -0700246 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
Dan Williamseb7d78c2016-01-29 21:48:34 -0800247 pgmap_radix_release(res);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800248 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
249 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800250}
251
252/* assumes rcu_read_lock() held at entry */
253struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
254{
255 struct page_map *page_map;
256
257 WARN_ON_ONCE(!rcu_read_lock_held());
258
259 page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
260 return page_map ? &page_map->pgmap : NULL;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200261}
262
Dan Williams4b94ffd2016-01-15 16:56:22 -0800263/**
264 * devm_memremap_pages - remap and provide memmap backing for the given resource
265 * @dev: hosting device for @res
266 * @res: "host memory" address range
Dan Williams5c2c2582016-01-15 16:56:49 -0800267 * @ref: a live per-cpu reference count
Dan Williams4b94ffd2016-01-15 16:56:22 -0800268 * @altmap: optional descriptor for allocating the memmap from @res
269 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800270 * Notes:
271 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
Dan Williams71389702017-04-28 10:23:37 -0700272 * (or devm release event). The expected order of events is that @ref has
273 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
274 * wait for the completion of all references being dropped and
275 * percpu_ref_exit() must occur after devm_memremap_pages_release().
Dan Williams5c2c2582016-01-15 16:56:49 -0800276 *
277 * 2/ @res is expected to be a host memory range that could feasibly be
278 * treated as a "System RAM" range, i.e. not a device mmio range, but
279 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800280 */
281void *devm_memremap_pages(struct device *dev, struct resource *res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800282 struct percpu_ref *ref, struct vmem_altmap *altmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200283{
Dan Williamseb7d78c2016-01-29 21:48:34 -0800284 resource_size_t key, align_start, align_size, align_end;
Dan Williams90497712016-09-07 08:51:21 -0700285 pgprot_t pgprot = PAGE_KERNEL;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800286 struct dev_pagemap *pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200287 struct page_map *page_map;
Dan Williams5f29a772016-03-09 14:08:13 -0800288 int error, nid, is_ram;
Dan Williams5c2c2582016-01-15 16:56:49 -0800289 unsigned long pfn;
Dan Williams5f29a772016-03-09 14:08:13 -0800290
291 align_start = res->start & ~(SECTION_SIZE - 1);
292 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
293 - align_start;
Linus Torvaldsd37a14bb2016-03-14 15:15:51 -0700294 is_ram = region_intersects(align_start, align_size,
295 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200296
297 if (is_ram == REGION_MIXED) {
298 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
299 __func__, res);
300 return ERR_PTR(-ENXIO);
301 }
302
303 if (is_ram == REGION_INTERSECTS)
304 return __va(res->start);
305
Dan Williams5c2c2582016-01-15 16:56:49 -0800306 if (!ref)
307 return ERR_PTR(-EINVAL);
308
Dan Williams538ea4a2015-10-05 20:35:56 -0400309 page_map = devres_alloc_node(devm_memremap_pages_release,
310 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200311 if (!page_map)
312 return ERR_PTR(-ENOMEM);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800313 pgmap = &page_map->pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200314
315 memcpy(&page_map->res, res, sizeof(*res));
316
Dan Williams4b94ffd2016-01-15 16:56:22 -0800317 pgmap->dev = dev;
318 if (altmap) {
319 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
320 pgmap->altmap = &page_map->altmap;
321 }
Dan Williams5c2c2582016-01-15 16:56:49 -0800322 pgmap->ref = ref;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800323 pgmap->res = &page_map->res;
324
Dan Williams9476df72016-01-15 16:56:19 -0800325 mutex_lock(&pgmap_lock);
326 error = 0;
Dan Williamseb7d78c2016-01-29 21:48:34 -0800327 align_end = align_start + align_size - 1;
328 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
Dan Williams9476df72016-01-15 16:56:19 -0800329 struct dev_pagemap *dup;
330
331 rcu_read_lock();
332 dup = find_dev_pagemap(key);
333 rcu_read_unlock();
334 if (dup) {
335 dev_err(dev, "%s: %pr collides with mapping for %s\n",
336 __func__, res, dev_name(dup->dev));
337 error = -EBUSY;
338 break;
339 }
340 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
341 page_map);
342 if (error) {
343 dev_err(dev, "%s: failed: %d\n", __func__, error);
344 break;
345 }
346 }
347 mutex_unlock(&pgmap_lock);
348 if (error)
349 goto err_radix;
350
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200351 nid = dev_to_node(dev);
352 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400353 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200354
Dan Williams90497712016-09-07 08:51:21 -0700355 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
356 align_size);
357 if (error)
358 goto err_pfn_remap;
359
Dan Williamsf931ab42017-01-10 16:57:36 -0800360 mem_hotplug_begin();
Dan Williams9476df72016-01-15 16:56:19 -0800361 error = arch_add_memory(nid, align_start, align_size, true);
Dan Williamsf931ab42017-01-10 16:57:36 -0800362 mem_hotplug_done();
Dan Williams9476df72016-01-15 16:56:19 -0800363 if (error)
364 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200365
Dan Williams5c2c2582016-01-15 16:56:49 -0800366 for_each_device_pfn(pfn, page_map) {
367 struct page *page = pfn_to_page(pfn);
368
Dan Williamsd77a1172016-03-09 14:08:10 -0800369 /*
370 * ZONE_DEVICE pages union ->lru with a ->pgmap back
371 * pointer. It is a bug if a ZONE_DEVICE page is ever
372 * freed or placed on a driver-private list. Seed the
373 * storage with LIST_POISON* values.
374 */
375 list_del(&page->lru);
Dan Williams5c2c2582016-01-15 16:56:49 -0800376 page->pgmap = pgmap;
Dan Williams71389702017-04-28 10:23:37 -0700377 percpu_ref_get(ref);
Dan Williams5c2c2582016-01-15 16:56:49 -0800378 }
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200379 devres_add(dev, page_map);
380 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800381
382 err_add_memory:
Dan Williams90497712016-09-07 08:51:21 -0700383 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
384 err_pfn_remap:
Dan Williams9476df72016-01-15 16:56:19 -0800385 err_radix:
386 pgmap_radix_release(res);
387 devres_free(page_map);
388 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200389}
390EXPORT_SYMBOL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800391
392unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
393{
394 /* number of pfns from base where pfn_to_page() is valid */
395 return altmap->reserve + altmap->free;
396}
397
398void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
399{
400 altmap->alloc -= nr_pfns;
401}
402
Dan Williams4b94ffd2016-01-15 16:56:22 -0800403struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
404{
405 /*
406 * 'memmap_start' is the virtual address for the first "struct
407 * page" in this range of the vmemmap array. In the case of
Andreas Ziegler07061aa2016-03-15 14:55:33 -0700408 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
Dan Williams4b94ffd2016-01-15 16:56:22 -0800409 * pointer arithmetic, so we can perform this to_vmem_altmap()
410 * conversion without concern for the initialization state of
411 * the struct page fields.
412 */
413 struct page *page = (struct page *) memmap_start;
414 struct dev_pagemap *pgmap;
415
416 /*
Andreas Ziegler07061aa2016-03-15 14:55:33 -0700417 * Unconditionally retrieve a dev_pagemap associated with the
Dan Williams4b94ffd2016-01-15 16:56:22 -0800418 * given physical address, this is only for use in the
419 * arch_{add|remove}_memory() for setting up and tearing down
420 * the memmap.
421 */
422 rcu_read_lock();
423 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
424 rcu_read_unlock();
425
426 return pgmap ? pgmap->altmap : NULL;
427}
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200428#endif /* CONFIG_ZONE_DEVICE */