blob: 38283363da06d40057ddfe71c3c8ec1a2c7a94d2 [file] [log] [blame]
Dan Williams59816902018-03-29 19:07:13 -07001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
Dan Williams9476df72016-01-15 16:56:19 -08003#include <linux/radix-tree.h>
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -04004#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -04005#include <linux/types.h>
Dan Williams34c0fd52016-01-15 16:56:14 -08006#include <linux/pfn_t.h>
Dan Williams92281dee2015-08-10 23:07:06 -04007#include <linux/io.h>
8#include <linux/mm.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +02009#include <linux/memory_hotplug.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -070010#include <linux/swap.h>
11#include <linux/swapops.h>
Dan Williamse76384882018-05-16 11:46:08 -070012#include <linux/wait_bit.h>
Dan Williams92281dee2015-08-10 23:07:06 -040013
Dan Williams9476df72016-01-15 16:56:19 -080014static DEFINE_MUTEX(pgmap_lock);
15static RADIX_TREE(pgmap_radix, GFP_KERNEL);
16#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
18
Dan Williamsab1b5972017-09-06 16:24:13 -070019static unsigned long order_at(struct resource *res, unsigned long pgoff)
20{
21 unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
22 unsigned long nr_pages, mask;
23
24 nr_pages = PHYS_PFN(resource_size(res));
25 if (nr_pages == pgoff)
26 return ULONG_MAX;
27
28 /*
29 * What is the largest aligned power-of-2 range available from
30 * this resource pgoff to the end of the resource range,
31 * considering the alignment of the current pgoff?
32 */
33 mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
34 if (!mask)
35 return ULONG_MAX;
36
37 return find_first_bit(&mask, BITS_PER_LONG);
38}
39
40#define foreach_order_pgoff(res, order, pgoff) \
41 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
42 pgoff += 1UL << order, order = order_at((res), pgoff))
43
Jérôme Glisse5042db42017-09-08 16:11:43 -070044#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
45int device_private_entry_fault(struct vm_area_struct *vma,
46 unsigned long addr,
47 swp_entry_t entry,
48 unsigned int flags,
49 pmd_t *pmdp)
50{
51 struct page *page = device_private_entry_to_page(entry);
52
53 /*
54 * The page_fault() callback must migrate page back to system memory
55 * so that CPU can access it. This might fail for various reasons
56 * (device issue, device was unsafely unplugged, ...). When such
57 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
58 *
59 * Note that because memory cgroup charges are accounted to the device
60 * memory, this should never fail because of memory restrictions (but
61 * allocation of regular system page might still fail because we are
62 * out of memory).
63 *
64 * There is a more in-depth description of what that callback can and
65 * cannot do, in include/linux/memremap.h
66 */
67 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
68}
69EXPORT_SYMBOL(device_private_entry_fault);
70#endif /* CONFIG_DEVICE_PRIVATE */
71
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -080072static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
Christoph Hellwig41e94a82015-08-17 16:00:35 +020073{
Dan Williamsab1b5972017-09-06 16:24:13 -070074 unsigned long pgoff, order;
Dan Williams9476df72016-01-15 16:56:19 -080075
76 mutex_lock(&pgmap_lock);
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -080077 foreach_order_pgoff(res, order, pgoff) {
78 if (pgoff >= end_pgoff)
79 break;
Dan Williamsab1b5972017-09-06 16:24:13 -070080 radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -080081 }
Dan Williams9476df72016-01-15 16:56:19 -080082 mutex_unlock(&pgmap_lock);
Dan Williamsab1b5972017-09-06 16:24:13 -070083
84 synchronize_rcu();
Dan Williams9476df72016-01-15 16:56:19 -080085}
86
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010087static unsigned long pfn_first(struct dev_pagemap *pgmap)
Dan Williams5c2c2582016-01-15 16:56:49 -080088{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010089 const struct resource *res = &pgmap->res;
90 struct vmem_altmap *altmap = &pgmap->altmap;
Dan Williams5c2c2582016-01-15 16:56:49 -080091 unsigned long pfn;
92
93 pfn = res->start >> PAGE_SHIFT;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010094 if (pgmap->altmap_valid)
Dan Williams5c2c2582016-01-15 16:56:49 -080095 pfn += vmem_altmap_offset(altmap);
96 return pfn;
97}
98
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010099static unsigned long pfn_end(struct dev_pagemap *pgmap)
Dan Williams5c2c2582016-01-15 16:56:49 -0800100{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100101 const struct resource *res = &pgmap->res;
Dan Williams5c2c2582016-01-15 16:56:49 -0800102
103 return (res->start + resource_size(res)) >> PAGE_SHIFT;
104}
105
Dan Williams949b93252018-02-06 19:34:11 -0800106static unsigned long pfn_next(unsigned long pfn)
107{
108 if (pfn % 1024 == 0)
109 cond_resched();
110 return pfn + 1;
111}
112
Dan Williams5c2c2582016-01-15 16:56:49 -0800113#define for_each_device_pfn(pfn, map) \
Dan Williams949b93252018-02-06 19:34:11 -0800114 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
Dan Williams5c2c2582016-01-15 16:56:49 -0800115
Christoph Hellwige8d51342017-12-29 08:54:05 +0100116static void devm_memremap_pages_release(void *data)
Dan Williams9476df72016-01-15 16:56:19 -0800117{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100118 struct dev_pagemap *pgmap = data;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100119 struct device *dev = pgmap->dev;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100120 struct resource *res = &pgmap->res;
Dan Williams9476df72016-01-15 16:56:19 -0800121 resource_size_t align_start, align_size;
Dan Williams71389702017-04-28 10:23:37 -0700122 unsigned long pfn;
123
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100124 for_each_device_pfn(pfn, pgmap)
Dan Williams71389702017-04-28 10:23:37 -0700125 put_page(pfn_to_page(pfn));
Dan Williams9476df72016-01-15 16:56:19 -0800126
Dan Williams5c2c2582016-01-15 16:56:49 -0800127 if (percpu_ref_tryget_live(pgmap->ref)) {
128 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
129 percpu_ref_put(pgmap->ref);
130 }
131
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200132 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800133 align_start = res->start & ~(SECTION_SIZE - 1);
Jan H. Schönherr10a0cd62018-01-19 16:27:54 -0800134 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
135 - align_start;
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800136
Dan Williamsf931ab42017-01-10 16:57:36 -0800137 mem_hotplug_begin();
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100138 arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
139 &pgmap->altmap : NULL);
Dan Williamsf931ab42017-01-10 16:57:36 -0800140 mem_hotplug_done();
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800141
Dan Williams90497712016-09-07 08:51:21 -0700142 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800143 pgmap_radix_release(res, -1);
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100144 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
145 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800146}
147
Dan Williams4b94ffd2016-01-15 16:56:22 -0800148/**
149 * devm_memremap_pages - remap and provide memmap backing for the given resource
150 * @dev: hosting device for @res
Christoph Hellwige8d51342017-12-29 08:54:05 +0100151 * @pgmap: pointer to a struct dev_pgmap
Dan Williams4b94ffd2016-01-15 16:56:22 -0800152 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800153 * Notes:
Christoph Hellwige8d51342017-12-29 08:54:05 +0100154 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
155 * by the caller before passing it to this function
156 *
157 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
158 * must be set to true
159 *
160 * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
161 * time (or devm release event). The expected order of events is that ref has
Dan Williams71389702017-04-28 10:23:37 -0700162 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
163 * wait for the completion of all references being dropped and
164 * percpu_ref_exit() must occur after devm_memremap_pages_release().
Dan Williams5c2c2582016-01-15 16:56:49 -0800165 *
Christoph Hellwige8d51342017-12-29 08:54:05 +0100166 * 4/ res is expected to be a host memory range that could feasibly be
Dan Williams5c2c2582016-01-15 16:56:49 -0800167 * treated as a "System RAM" range, i.e. not a device mmio range, but
168 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800169 */
Christoph Hellwige8d51342017-12-29 08:54:05 +0100170void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200171{
Dan Williamsab1b5972017-09-06 16:24:13 -0700172 resource_size_t align_start, align_size, align_end;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100173 struct vmem_altmap *altmap = pgmap->altmap_valid ?
174 &pgmap->altmap : NULL;
Dan Williams949b93252018-02-06 19:34:11 -0800175 struct resource *res = &pgmap->res;
Dan Williamsab1b5972017-09-06 16:24:13 -0700176 unsigned long pfn, pgoff, order;
Dan Williams90497712016-09-07 08:51:21 -0700177 pgprot_t pgprot = PAGE_KERNEL;
Dan Williams949b93252018-02-06 19:34:11 -0800178 int error, nid, is_ram;
Dave Jiang15d36fe2018-07-26 16:37:15 -0700179 struct dev_pagemap *conflict_pgmap;
Dan Williams5f29a772016-03-09 14:08:13 -0800180
181 align_start = res->start & ~(SECTION_SIZE - 1);
182 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
183 - align_start;
Dave Jiang15d36fe2018-07-26 16:37:15 -0700184 align_end = align_start + align_size - 1;
185
186 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
187 if (conflict_pgmap) {
188 dev_WARN(dev, "Conflicting mapping in same section\n");
189 put_dev_pagemap(conflict_pgmap);
190 return ERR_PTR(-ENOMEM);
191 }
192
193 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
194 if (conflict_pgmap) {
195 dev_WARN(dev, "Conflicting mapping in same section\n");
196 put_dev_pagemap(conflict_pgmap);
197 return ERR_PTR(-ENOMEM);
198 }
199
Linus Torvaldsd37a14bb2016-03-14 15:15:51 -0700200 is_ram = region_intersects(align_start, align_size,
201 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200202
203 if (is_ram == REGION_MIXED) {
204 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
205 __func__, res);
206 return ERR_PTR(-ENXIO);
207 }
208
209 if (is_ram == REGION_INTERSECTS)
210 return __va(res->start);
211
Christoph Hellwige8d51342017-12-29 08:54:05 +0100212 if (!pgmap->ref)
Dan Williams5c2c2582016-01-15 16:56:49 -0800213 return ERR_PTR(-EINVAL);
214
Dan Williams4b94ffd2016-01-15 16:56:22 -0800215 pgmap->dev = dev;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800216
Dan Williams9476df72016-01-15 16:56:19 -0800217 mutex_lock(&pgmap_lock);
218 error = 0;
Dan Williamsab1b5972017-09-06 16:24:13 -0700219
220 foreach_order_pgoff(res, order, pgoff) {
Dan Williamsab1b5972017-09-06 16:24:13 -0700221 error = __radix_tree_insert(&pgmap_radix,
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100222 PHYS_PFN(res->start) + pgoff, order, pgmap);
Dan Williams9476df72016-01-15 16:56:19 -0800223 if (error) {
224 dev_err(dev, "%s: failed: %d\n", __func__, error);
225 break;
226 }
227 }
228 mutex_unlock(&pgmap_lock);
229 if (error)
230 goto err_radix;
231
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200232 nid = dev_to_node(dev);
233 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400234 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200235
Dan Williams90497712016-09-07 08:51:21 -0700236 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
237 align_size);
238 if (error)
239 goto err_pfn_remap;
240
Dan Williamsf931ab42017-01-10 16:57:36 -0800241 mem_hotplug_begin();
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100242 error = arch_add_memory(nid, align_start, align_size, altmap, false);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700243 if (!error)
244 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
245 align_start >> PAGE_SHIFT,
Christoph Hellwiga99583e2017-12-29 08:53:57 +0100246 align_size >> PAGE_SHIFT, altmap);
Dan Williamsf931ab42017-01-10 16:57:36 -0800247 mem_hotplug_done();
Dan Williams9476df72016-01-15 16:56:19 -0800248 if (error)
249 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200250
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100251 for_each_device_pfn(pfn, pgmap) {
Dan Williams5c2c2582016-01-15 16:56:49 -0800252 struct page *page = pfn_to_page(pfn);
253
Dan Williamsd77a1172016-03-09 14:08:10 -0800254 /*
255 * ZONE_DEVICE pages union ->lru with a ->pgmap back
256 * pointer. It is a bug if a ZONE_DEVICE page is ever
257 * freed or placed on a driver-private list. Seed the
258 * storage with LIST_POISON* values.
259 */
260 list_del(&page->lru);
Dan Williams5c2c2582016-01-15 16:56:49 -0800261 page->pgmap = pgmap;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100262 percpu_ref_get(pgmap->ref);
Dan Williams5c2c2582016-01-15 16:56:49 -0800263 }
Christoph Hellwige8d51342017-12-29 08:54:05 +0100264
265 devm_add_action(dev, devm_memremap_pages_release, pgmap);
266
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200267 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800268
269 err_add_memory:
Dan Williams90497712016-09-07 08:51:21 -0700270 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
271 err_pfn_remap:
Dan Williams9476df72016-01-15 16:56:19 -0800272 err_radix:
Jan H. Schönherr77dd66a2018-01-19 16:26:33 -0800273 pgmap_radix_release(res, pgoff);
Dan Williams9476df72016-01-15 16:56:19 -0800274 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200275}
276EXPORT_SYMBOL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800277
278unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
279{
280 /* number of pfns from base where pfn_to_page() is valid */
281 return altmap->reserve + altmap->free;
282}
283
284void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
285{
286 altmap->alloc -= nr_pfns;
287}
288
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100289/**
290 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
291 * @pfn: page frame number to lookup page_map
292 * @pgmap: optional known pgmap that already has a reference
293 *
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100294 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
295 * is non-NULL but does not cover @pfn the reference to it will be released.
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100296 */
297struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
298 struct dev_pagemap *pgmap)
299{
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100300 resource_size_t phys = PFN_PHYS(pfn);
301
302 /*
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100303 * In the cached case we're already holding a live reference.
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100304 */
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100305 if (pgmap) {
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100306 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100307 return pgmap;
308 put_dev_pagemap(pgmap);
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100309 }
310
311 /* fall back to slow path lookup */
312 rcu_read_lock();
Christoph Hellwige697c5b2017-12-29 08:54:06 +0100313 pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100314 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
315 pgmap = NULL;
316 rcu_read_unlock();
317
318 return pgmap;
319}
Dan Williamse76384882018-05-16 11:46:08 -0700320EXPORT_SYMBOL_GPL(get_dev_pagemap);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700321
Dan Williamse76384882018-05-16 11:46:08 -0700322#ifdef CONFIG_DEV_PAGEMAP_OPS
323DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
Dan Williams31c5bda2018-07-26 16:37:22 -0700324EXPORT_SYMBOL(devmap_managed_key);
Dan Williamse76384882018-05-16 11:46:08 -0700325static atomic_t devmap_enable;
326
327/*
328 * Toggle the static key for ->page_free() callbacks when dev_pagemap
329 * pages go idle.
330 */
331void dev_pagemap_get_ops(void)
332{
333 if (atomic_inc_return(&devmap_enable) == 1)
334 static_branch_enable(&devmap_managed_key);
335}
336EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
337
338void dev_pagemap_put_ops(void)
339{
340 if (atomic_dec_and_test(&devmap_enable))
341 static_branch_disable(&devmap_managed_key);
342}
343EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
344
345void __put_devmap_managed_page(struct page *page)
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700346{
347 int count = page_ref_dec_return(page);
348
349 /*
350 * If refcount is 1 then page is freed and refcount is stable as nobody
351 * holds a reference on the page.
352 */
353 if (count == 1) {
354 /* Clear Active bit in case of parallel mark_page_accessed */
355 __ClearPageActive(page);
356 __ClearPageWaiters(page);
357
358 page->mapping = NULL;
Jérôme Glissec733a822017-09-08 16:11:54 -0700359 mem_cgroup_uncharge(page);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700360
361 page->pgmap->page_free(page, page->pgmap->data);
362 } else if (!count)
363 __put_page(page);
364}
Dan Williams31c5bda2018-07-26 16:37:22 -0700365EXPORT_SYMBOL(__put_devmap_managed_page);
Dan Williamse76384882018-05-16 11:46:08 -0700366#endif /* CONFIG_DEV_PAGEMAP_OPS */