blob: 4849be5f9b3c30120f0f964dfd32f5bbf1546a54 [file] [log] [blame]
Dan Williams92281dee2015-08-10 23:07:06 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williams9476df72016-01-15 16:56:19 -080013#include <linux/radix-tree.h>
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -040014#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -040015#include <linux/types.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080016#include <linux/pfn_t.h>
Dan Williams92281dee2015-08-10 23:07:06 -040017#include <linux/io.h>
18#include <linux/mm.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +020019#include <linux/memory_hotplug.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -070020#include <linux/swap.h>
21#include <linux/swapops.h>
Dan Williams92281dee2015-08-10 23:07:06 -040022
23#ifndef ioremap_cache
24/* temporary while we convert existing ioremap_cache users to memremap */
25__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
26{
27 return ioremap(offset, size);
28}
29#endif
30
Ard Biesheuvelc269cba2016-02-22 15:02:07 +010031#ifndef arch_memremap_wb
32static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
33{
34 return (__force void *)ioremap_cache(offset, size);
35}
36#endif
37
Tom Lendacky8f716c92017-07-17 16:10:16 -050038#ifndef arch_memremap_can_ram_remap
39static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
40 unsigned long flags)
41{
42 return true;
43}
44#endif
45
46static void *try_ram_remap(resource_size_t offset, size_t size,
47 unsigned long flags)
Dan Williams182475b2015-10-26 16:55:56 -040048{
Ard Biesheuvelac343e82016-03-09 14:08:32 -080049 unsigned long pfn = PHYS_PFN(offset);
Dan Williams182475b2015-10-26 16:55:56 -040050
51 /* In the simple case just return the existing linear address */
Tom Lendacky8f716c92017-07-17 16:10:16 -050052 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
53 arch_memremap_can_ram_remap(offset, size, flags))
Dan Williams182475b2015-10-26 16:55:56 -040054 return __va(offset);
Tom Lendacky8f716c92017-07-17 16:10:16 -050055
Ard Biesheuvelc269cba2016-02-22 15:02:07 +010056 return NULL; /* fallback to arch_memremap_wb */
Dan Williams182475b2015-10-26 16:55:56 -040057}
58
Dan Williams92281dee2015-08-10 23:07:06 -040059/**
60 * memremap() - remap an iomem_resource as cacheable memory
61 * @offset: iomem resource start address
62 * @size: size of remap
Tom Lendacky8f716c92017-07-17 16:10:16 -050063 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
64 * MEMREMAP_ENC, MEMREMAP_DEC
Dan Williams92281dee2015-08-10 23:07:06 -040065 *
66 * memremap() is "ioremap" for cases where it is known that the resource
67 * being mapped does not have i/o side effects and the __iomem
Brian Starkeyc907e0e2016-03-22 14:28:00 -070068 * annotation is not applicable. In the case of multiple flags, the different
69 * mapping types will be attempted in the order listed below until one of
70 * them succeeds.
Dan Williams92281dee2015-08-10 23:07:06 -040071 *
Toshi Kani1c29f252016-01-26 21:57:28 +010072 * MEMREMAP_WB - matches the default mapping for System RAM on
Dan Williams92281dee2015-08-10 23:07:06 -040073 * the architecture. This is usually a read-allocate write-back cache.
74 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
75 * memremap() will bypass establishing a new mapping and instead return
76 * a pointer into the direct map.
77 *
78 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
79 * cache or are written through to memory and never exist in a
80 * cache-dirty state with respect to program visibility. Attempts to
Toshi Kani1c29f252016-01-26 21:57:28 +010081 * map System RAM with this mapping type will fail.
Brian Starkeyc907e0e2016-03-22 14:28:00 -070082 *
83 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
84 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
85 * uncached. Attempts to map System RAM with this mapping type will fail.
Dan Williams92281dee2015-08-10 23:07:06 -040086 */
87void *memremap(resource_size_t offset, size_t size, unsigned long flags)
88{
Toshi Kani1c29f252016-01-26 21:57:28 +010089 int is_ram = region_intersects(offset, size,
90 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Dan Williams92281dee2015-08-10 23:07:06 -040091 void *addr = NULL;
92
Brian Starkeycf61e2a2016-03-22 14:27:57 -070093 if (!flags)
94 return NULL;
95
Dan Williams92281dee2015-08-10 23:07:06 -040096 if (is_ram == REGION_MIXED) {
97 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
98 &offset, (unsigned long) size);
99 return NULL;
100 }
101
102 /* Try all mapping types requested until one returns non-NULL */
103 if (flags & MEMREMAP_WB) {
Dan Williams92281dee2015-08-10 23:07:06 -0400104 /*
105 * MEMREMAP_WB is special in that it can be satisifed
106 * from the direct map. Some archs depend on the
107 * capability of memremap() to autodetect cases where
Toshi Kani1c29f252016-01-26 21:57:28 +0100108 * the requested range is potentially in System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -0400109 */
110 if (is_ram == REGION_INTERSECTS)
Tom Lendacky8f716c92017-07-17 16:10:16 -0500111 addr = try_ram_remap(offset, size, flags);
Dan Williams182475b2015-10-26 16:55:56 -0400112 if (!addr)
Ard Biesheuvelc269cba2016-02-22 15:02:07 +0100113 addr = arch_memremap_wb(offset, size);
Dan Williams92281dee2015-08-10 23:07:06 -0400114 }
115
116 /*
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700117 * If we don't have a mapping yet and other request flags are
118 * present then we will be attempting to establish a new virtual
Dan Williams92281dee2015-08-10 23:07:06 -0400119 * address mapping. Enforce that this mapping is not aliasing
Toshi Kani1c29f252016-01-26 21:57:28 +0100120 * System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -0400121 */
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700122 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
Dan Williams92281dee2015-08-10 23:07:06 -0400123 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
124 &offset, (unsigned long) size);
125 return NULL;
126 }
127
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700128 if (!addr && (flags & MEMREMAP_WT))
Dan Williams92281dee2015-08-10 23:07:06 -0400129 addr = ioremap_wt(offset, size);
Dan Williams92281dee2015-08-10 23:07:06 -0400130
Brian Starkeyc907e0e2016-03-22 14:28:00 -0700131 if (!addr && (flags & MEMREMAP_WC))
132 addr = ioremap_wc(offset, size);
133
Dan Williams92281dee2015-08-10 23:07:06 -0400134 return addr;
135}
136EXPORT_SYMBOL(memremap);
137
138void memunmap(void *addr)
139{
140 if (is_vmalloc_addr(addr))
141 iounmap((void __iomem *) addr);
142}
143EXPORT_SYMBOL(memunmap);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400144
145static void devm_memremap_release(struct device *dev, void *res)
146{
Toshi Kani9273a8b2016-02-17 13:11:29 -0800147 memunmap(*(void **)res);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400148}
149
150static int devm_memremap_match(struct device *dev, void *res, void *match_data)
151{
152 return *(void **)res == match_data;
153}
154
155void *devm_memremap(struct device *dev, resource_size_t offset,
156 size_t size, unsigned long flags)
157{
158 void **ptr, *addr;
159
Dan Williams538ea4a2015-10-05 20:35:56 -0400160 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
161 dev_to_node(dev));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400162 if (!ptr)
Dan Williamsb36f4762015-09-15 02:42:20 -0400163 return ERR_PTR(-ENOMEM);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400164
165 addr = memremap(offset, size, flags);
166 if (addr) {
167 *ptr = addr;
168 devres_add(dev, ptr);
Toshi Kani93f834d2016-02-20 14:32:24 -0800169 } else {
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400170 devres_free(ptr);
Toshi Kani93f834d2016-02-20 14:32:24 -0800171 return ERR_PTR(-ENXIO);
172 }
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400173
174 return addr;
175}
176EXPORT_SYMBOL(devm_memremap);
177
178void devm_memunmap(struct device *dev, void *addr)
179{
Dan Williamsd7413142015-09-15 02:37:48 -0400180 WARN_ON(devres_release(dev, devm_memremap_release,
181 devm_memremap_match, addr));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400182}
183EXPORT_SYMBOL(devm_memunmap);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200184
185#ifdef CONFIG_ZONE_DEVICE
Dan Williams9476df72016-01-15 16:56:19 -0800186static DEFINE_MUTEX(pgmap_lock);
187static RADIX_TREE(pgmap_radix, GFP_KERNEL);
188#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
189#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
190
Dan Williamsab1b5972017-09-06 16:24:13 -0700191static unsigned long order_at(struct resource *res, unsigned long pgoff)
192{
193 unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
194 unsigned long nr_pages, mask;
195
196 nr_pages = PHYS_PFN(resource_size(res));
197 if (nr_pages == pgoff)
198 return ULONG_MAX;
199
200 /*
201 * What is the largest aligned power-of-2 range available from
202 * this resource pgoff to the end of the resource range,
203 * considering the alignment of the current pgoff?
204 */
205 mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
206 if (!mask)
207 return ULONG_MAX;
208
209 return find_first_bit(&mask, BITS_PER_LONG);
210}
211
212#define foreach_order_pgoff(res, order, pgoff) \
213 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
214 pgoff += 1UL << order, order = order_at((res), pgoff))
215
Jérôme Glisse5042db42017-09-08 16:11:43 -0700216#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
217int device_private_entry_fault(struct vm_area_struct *vma,
218 unsigned long addr,
219 swp_entry_t entry,
220 unsigned int flags,
221 pmd_t *pmdp)
222{
223 struct page *page = device_private_entry_to_page(entry);
224
225 /*
226 * The page_fault() callback must migrate page back to system memory
227 * so that CPU can access it. This might fail for various reasons
228 * (device issue, device was unsafely unplugged, ...). When such
229 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
230 *
231 * Note that because memory cgroup charges are accounted to the device
232 * memory, this should never fail because of memory restrictions (but
233 * allocation of regular system page might still fail because we are
234 * out of memory).
235 *
236 * There is a more in-depth description of what that callback can and
237 * cannot do, in include/linux/memremap.h
238 */
239 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
240}
241EXPORT_SYMBOL(device_private_entry_fault);
242#endif /* CONFIG_DEVICE_PRIVATE */
243
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800244static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200245{
Dan Williamsab1b5972017-09-06 16:24:13 -0700246 unsigned long pgoff, order;
Dan Williams9476df72016-01-15 16:56:19 -0800247
248 mutex_lock(&pgmap_lock);
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800249 foreach_order_pgoff(res, order, pgoff) {
250 if (pgoff >= end_pgoff)
251 break;
Dan Williamsab1b5972017-09-06 16:24:13 -0700252 radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800253 }
Dan Williams9476df72016-01-15 16:56:19 -0800254 mutex_unlock(&pgmap_lock);
Dan Williamsab1b5972017-09-06 16:24:13 -0700255
256 synchronize_rcu();
Dan Williams9476df72016-01-15 16:56:19 -0800257}
258
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100259static unsigned long pfn_first(struct dev_pagemap *pgmap)
Dan Williams5c2c2582016-01-15 16:56:49 -0800260{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100261 const struct resource *res = &pgmap->res;
262 struct vmem_altmap *altmap = &pgmap->altmap;
Dan Williams5c2c2582016-01-15 16:56:49 -0800263 unsigned long pfn;
264
265 pfn = res->start >> PAGE_SHIFT;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100266 if (pgmap->altmap_valid)
Dan Williams5c2c2582016-01-15 16:56:49 -0800267 pfn += vmem_altmap_offset(altmap);
268 return pfn;
269}
270
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100271static unsigned long pfn_end(struct dev_pagemap *pgmap)
Dan Williams5c2c2582016-01-15 16:56:49 -0800272{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100273 const struct resource *res = &pgmap->res;
Dan Williams5c2c2582016-01-15 16:56:49 -0800274
275 return (res->start + resource_size(res)) >> PAGE_SHIFT;
276}
277
278#define for_each_device_pfn(pfn, map) \
279 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
280
Christoph Hellwige8d51342017-12-29 08:54:05 +0100281static void devm_memremap_pages_release(void *data)
Dan Williams9476df72016-01-15 16:56:19 -0800282{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100283 struct dev_pagemap *pgmap = data;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100284 struct device *dev = pgmap->dev;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100285 struct resource *res = &pgmap->res;
Dan Williams9476df72016-01-15 16:56:19 -0800286 resource_size_t align_start, align_size;
Dan Williams71389702017-04-28 10:23:37 -0700287 unsigned long pfn;
288
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100289 for_each_device_pfn(pfn, pgmap)
Dan Williams71389702017-04-28 10:23:37 -0700290 put_page(pfn_to_page(pfn));
Dan Williams9476df72016-01-15 16:56:19 -0800291
Dan Williams5c2c2582016-01-15 16:56:49 -0800292 if (percpu_ref_tryget_live(pgmap->ref)) {
293 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
294 percpu_ref_put(pgmap->ref);
295 }
296
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200297 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800298 align_start = res->start & ~(SECTION_SIZE - 1);
Jan H. Schönherr10a0cd62018-01-19 16:27:54 -0800299 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
300 - align_start;
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800301
Dan Williamsf931ab42017-01-10 16:57:36 -0800302 mem_hotplug_begin();
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100303 arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
304 &pgmap->altmap : NULL);
Dan Williamsf931ab42017-01-10 16:57:36 -0800305 mem_hotplug_done();
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800306
Dan Williams90497712016-09-07 08:51:21 -0700307 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800308 pgmap_radix_release(res, -1);
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100309 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
310 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800311}
312
Dan Williams4b94ffd2016-01-15 16:56:22 -0800313/**
314 * devm_memremap_pages - remap and provide memmap backing for the given resource
315 * @dev: hosting device for @res
Christoph Hellwige8d51342017-12-29 08:54:05 +0100316 * @pgmap: pointer to a struct dev_pgmap
Dan Williams4b94ffd2016-01-15 16:56:22 -0800317 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800318 * Notes:
Christoph Hellwige8d51342017-12-29 08:54:05 +0100319 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
320 * by the caller before passing it to this function
321 *
322 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
323 * must be set to true
324 *
325 * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
326 * time (or devm release event). The expected order of events is that ref has
Dan Williams71389702017-04-28 10:23:37 -0700327 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
328 * wait for the completion of all references being dropped and
329 * percpu_ref_exit() must occur after devm_memremap_pages_release().
Dan Williams5c2c2582016-01-15 16:56:49 -0800330 *
Christoph Hellwige8d51342017-12-29 08:54:05 +0100331 * 4/ res is expected to be a host memory range that could feasibly be
Dan Williams5c2c2582016-01-15 16:56:49 -0800332 * treated as a "System RAM" range, i.e. not a device mmio range, but
333 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800334 */
Christoph Hellwige8d51342017-12-29 08:54:05 +0100335void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200336{
Dan Williamsab1b5972017-09-06 16:24:13 -0700337 resource_size_t align_start, align_size, align_end;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100338 struct vmem_altmap *altmap = pgmap->altmap_valid ?
339 &pgmap->altmap : NULL;
Dan Williamsab1b5972017-09-06 16:24:13 -0700340 unsigned long pfn, pgoff, order;
Dan Williams90497712016-09-07 08:51:21 -0700341 pgprot_t pgprot = PAGE_KERNEL;
Michal Hocko1fdcce62017-10-03 16:16:23 -0700342 int error, nid, is_ram, i = 0;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100343 struct resource *res = &pgmap->res;
Dan Williams5f29a772016-03-09 14:08:13 -0800344
345 align_start = res->start & ~(SECTION_SIZE - 1);
346 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
347 - align_start;
Linus Torvaldsd37a14bb2016-03-14 15:15:51 -0700348 is_ram = region_intersects(align_start, align_size,
349 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200350
351 if (is_ram == REGION_MIXED) {
352 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
353 __func__, res);
354 return ERR_PTR(-ENXIO);
355 }
356
357 if (is_ram == REGION_INTERSECTS)
358 return __va(res->start);
359
Christoph Hellwige8d51342017-12-29 08:54:05 +0100360 if (!pgmap->ref)
Dan Williams5c2c2582016-01-15 16:56:49 -0800361 return ERR_PTR(-EINVAL);
362
Dan Williams4b94ffd2016-01-15 16:56:22 -0800363 pgmap->dev = dev;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800364
Dan Williams9476df72016-01-15 16:56:19 -0800365 mutex_lock(&pgmap_lock);
366 error = 0;
Dan Williamseb7d78c2016-01-29 21:48:34 -0800367 align_end = align_start + align_size - 1;
Dan Williamsab1b5972017-09-06 16:24:13 -0700368
369 foreach_order_pgoff(res, order, pgoff) {
Dan Williamsab1b5972017-09-06 16:24:13 -0700370 error = __radix_tree_insert(&pgmap_radix,
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100371 PHYS_PFN(res->start) + pgoff, order, pgmap);
Dan Williams9476df72016-01-15 16:56:19 -0800372 if (error) {
373 dev_err(dev, "%s: failed: %d\n", __func__, error);
374 break;
375 }
376 }
377 mutex_unlock(&pgmap_lock);
378 if (error)
379 goto err_radix;
380
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200381 nid = dev_to_node(dev);
382 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400383 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200384
Dan Williams90497712016-09-07 08:51:21 -0700385 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
386 align_size);
387 if (error)
388 goto err_pfn_remap;
389
Dan Williamsf931ab42017-01-10 16:57:36 -0800390 mem_hotplug_begin();
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100391 error = arch_add_memory(nid, align_start, align_size, altmap, false);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700392 if (!error)
393 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
394 align_start >> PAGE_SHIFT,
Christoph Hellwiga99583e2017-12-29 08:53:57 +0100395 align_size >> PAGE_SHIFT, altmap);
Dan Williamsf931ab42017-01-10 16:57:36 -0800396 mem_hotplug_done();
Dan Williams9476df72016-01-15 16:56:19 -0800397 if (error)
398 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200399
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100400 for_each_device_pfn(pfn, pgmap) {
Dan Williams5c2c2582016-01-15 16:56:49 -0800401 struct page *page = pfn_to_page(pfn);
402
Dan Williamsd77a1172016-03-09 14:08:10 -0800403 /*
404 * ZONE_DEVICE pages union ->lru with a ->pgmap back
405 * pointer. It is a bug if a ZONE_DEVICE page is ever
406 * freed or placed on a driver-private list. Seed the
407 * storage with LIST_POISON* values.
408 */
409 list_del(&page->lru);
Dan Williams5c2c2582016-01-15 16:56:49 -0800410 page->pgmap = pgmap;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100411 percpu_ref_get(pgmap->ref);
Michal Hocko1fdcce62017-10-03 16:16:23 -0700412 if (!(++i % 1024))
413 cond_resched();
Dan Williams5c2c2582016-01-15 16:56:49 -0800414 }
Christoph Hellwige8d51342017-12-29 08:54:05 +0100415
416 devm_add_action(dev, devm_memremap_pages_release, pgmap);
417
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200418 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800419
420 err_add_memory:
Dan Williams90497712016-09-07 08:51:21 -0700421 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
422 err_pfn_remap:
Dan Williams9476df72016-01-15 16:56:19 -0800423 err_radix:
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800424 pgmap_radix_release(res, pgoff);
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100425 devres_free(pgmap);
Dan Williams9476df72016-01-15 16:56:19 -0800426 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200427}
428EXPORT_SYMBOL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800429
430unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
431{
432 /* number of pfns from base where pfn_to_page() is valid */
433 return altmap->reserve + altmap->free;
434}
435
436void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
437{
438 altmap->alloc -= nr_pfns;
439}
440
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100441/**
442 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
443 * @pfn: page frame number to lookup page_map
444 * @pgmap: optional known pgmap that already has a reference
445 *
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100446 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
447 * is non-NULL but does not cover @pfn the reference to it will be released.
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100448 */
449struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
450 struct dev_pagemap *pgmap)
451{
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100452 resource_size_t phys = PFN_PHYS(pfn);
453
454 /*
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100455 * In the cached case we're already holding a live reference.
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100456 */
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100457 if (pgmap) {
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100458 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100459 return pgmap;
460 put_dev_pagemap(pgmap);
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100461 }
462
463 /* fall back to slow path lookup */
464 rcu_read_lock();
Christoph Hellwige697c5b2017-12-29 08:54:06 +0100465 pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100466 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
467 pgmap = NULL;
468 rcu_read_unlock();
469
470 return pgmap;
471}
472#endif /* CONFIG_ZONE_DEVICE */
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700473
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700474#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
475void put_zone_device_private_or_public_page(struct page *page)
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700476{
477 int count = page_ref_dec_return(page);
478
479 /*
480 * If refcount is 1 then page is freed and refcount is stable as nobody
481 * holds a reference on the page.
482 */
483 if (count == 1) {
484 /* Clear Active bit in case of parallel mark_page_accessed */
485 __ClearPageActive(page);
486 __ClearPageWaiters(page);
487
488 page->mapping = NULL;
Jérôme Glissec733a822017-09-08 16:11:54 -0700489 mem_cgroup_uncharge(page);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700490
491 page->pgmap->page_free(page, page->pgmap->data);
492 } else if (!count)
493 __put_page(page);
494}
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700495EXPORT_SYMBOL(put_zone_device_private_or_public_page);
496#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */