blob: 1490e63f69a955e3857e8b8e10bdaa62cc063a3d [file] [log] [blame]
Dan Williams59816902018-03-29 19:07:13 -07001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -04003#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -04004#include <linux/io.h>
Andrey Ryabinin0207df42018-08-17 15:47:04 -07005#include <linux/kasan.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +02006#include <linux/memory_hotplug.h>
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -04007#include <linux/mm.h>
8#include <linux/pfn_t.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -07009#include <linux/swap.h>
10#include <linux/swapops.h>
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -040011#include <linux/types.h>
Dan Williamse76384882018-05-16 11:46:08 -070012#include <linux/wait_bit.h>
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -040013#include <linux/xarray.h>
Dan Williams063a7d12018-12-28 00:39:46 -080014#include <linux/hmm.h>
Dan Williams92281dee2015-08-10 23:07:06 -040015
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -040016static DEFINE_XARRAY(pgmap_array);
Dan Williams9476df72016-01-15 16:56:19 -080017#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
18#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19
Jérôme Glisse5042db42017-09-08 16:11:43 -070020#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
Souptick Joarder2b740302018-08-23 17:01:36 -070021vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
Jérôme Glisse5042db42017-09-08 16:11:43 -070022 unsigned long addr,
23 swp_entry_t entry,
24 unsigned int flags,
25 pmd_t *pmdp)
26{
27 struct page *page = device_private_entry_to_page(entry);
Dan Williams063a7d12018-12-28 00:39:46 -080028 struct hmm_devmem *devmem;
29
30 devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
Jérôme Glisse5042db42017-09-08 16:11:43 -070031
32 /*
33 * The page_fault() callback must migrate page back to system memory
34 * so that CPU can access it. This might fail for various reasons
35 * (device issue, device was unsafely unplugged, ...). When such
36 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
37 *
38 * Note that because memory cgroup charges are accounted to the device
39 * memory, this should never fail because of memory restrictions (but
40 * allocation of regular system page might still fail because we are
41 * out of memory).
42 *
43 * There is a more in-depth description of what that callback can and
44 * cannot do, in include/linux/memremap.h
45 */
Dan Williams063a7d12018-12-28 00:39:46 -080046 return devmem->page_fault(vma, addr, page, flags, pmdp);
Jérôme Glisse5042db42017-09-08 16:11:43 -070047}
Jérôme Glisse5042db42017-09-08 16:11:43 -070048#endif /* CONFIG_DEVICE_PRIVATE */
49
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -040050static void pgmap_array_delete(struct resource *res)
Christoph Hellwig41e94a82015-08-17 16:00:35 +020051{
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -040052 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
53 NULL, GFP_KERNEL);
Dan Williamsab1b5972017-09-06 16:24:13 -070054 synchronize_rcu();
Dan Williams9476df72016-01-15 16:56:19 -080055}
56
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010057static unsigned long pfn_first(struct dev_pagemap *pgmap)
Dan Williams5c2c2582016-01-15 16:56:49 -080058{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010059 const struct resource *res = &pgmap->res;
60 struct vmem_altmap *altmap = &pgmap->altmap;
Dan Williams5c2c2582016-01-15 16:56:49 -080061 unsigned long pfn;
62
63 pfn = res->start >> PAGE_SHIFT;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010064 if (pgmap->altmap_valid)
Dan Williams5c2c2582016-01-15 16:56:49 -080065 pfn += vmem_altmap_offset(altmap);
66 return pfn;
67}
68
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010069static unsigned long pfn_end(struct dev_pagemap *pgmap)
Dan Williams5c2c2582016-01-15 16:56:49 -080070{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010071 const struct resource *res = &pgmap->res;
Dan Williams5c2c2582016-01-15 16:56:49 -080072
73 return (res->start + resource_size(res)) >> PAGE_SHIFT;
74}
75
Dan Williams949b93252018-02-06 19:34:11 -080076static unsigned long pfn_next(unsigned long pfn)
77{
78 if (pfn % 1024 == 0)
79 cond_resched();
80 return pfn + 1;
81}
82
Dan Williams5c2c2582016-01-15 16:56:49 -080083#define for_each_device_pfn(pfn, map) \
Dan Williams949b93252018-02-06 19:34:11 -080084 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
Dan Williams5c2c2582016-01-15 16:56:49 -080085
Christoph Hellwige8d51342017-12-29 08:54:05 +010086static void devm_memremap_pages_release(void *data)
Dan Williams9476df72016-01-15 16:56:19 -080087{
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010088 struct dev_pagemap *pgmap = data;
Christoph Hellwige8d51342017-12-29 08:54:05 +010089 struct device *dev = pgmap->dev;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010090 struct resource *res = &pgmap->res;
Dan Williams9476df72016-01-15 16:56:19 -080091 resource_size_t align_start, align_size;
Dan Williams71389702017-04-28 10:23:37 -070092 unsigned long pfn;
Oscar Salvador2c2a5af2018-12-28 00:36:22 -080093 int nid;
Dan Williams71389702017-04-28 10:23:37 -070094
Dan Williamsa95c90f2018-12-28 00:34:57 -080095 pgmap->kill(pgmap->ref);
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010096 for_each_device_pfn(pfn, pgmap)
Dan Williams71389702017-04-28 10:23:37 -070097 put_page(pfn_to_page(pfn));
Dan Williams9476df72016-01-15 16:56:19 -080098
Christoph Hellwig41e94a82015-08-17 16:00:35 +020099 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800100 align_start = res->start & ~(SECTION_SIZE - 1);
Jan H. Schönherr10a0cd62018-01-19 16:27:54 -0800101 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
102 - align_start;
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800103
Oscar Salvador2c2a5af2018-12-28 00:36:22 -0800104 nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
105
Dan Williamsf931ab42017-01-10 16:57:36 -0800106 mem_hotplug_begin();
Dan Williams69324b82018-12-28 00:35:01 -0800107 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
108 pfn = align_start >> PAGE_SHIFT;
109 __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
110 align_size >> PAGE_SHIFT, NULL);
111 } else {
Oscar Salvador2c2a5af2018-12-28 00:36:22 -0800112 arch_remove_memory(nid, align_start, align_size,
Dan Williams69324b82018-12-28 00:35:01 -0800113 pgmap->altmap_valid ? &pgmap->altmap : NULL);
114 kasan_remove_zero_shadow(__va(align_start), align_size);
115 }
Dan Williamsf931ab42017-01-10 16:57:36 -0800116 mem_hotplug_done();
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800117
Dan Williams90497712016-09-07 08:51:21 -0700118 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -0400119 pgmap_array_delete(res);
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100120 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
121 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800122}
123
Dan Williams4b94ffd2016-01-15 16:56:22 -0800124/**
125 * devm_memremap_pages - remap and provide memmap backing for the given resource
126 * @dev: hosting device for @res
Dan Williamsa95c90f2018-12-28 00:34:57 -0800127 * @pgmap: pointer to a struct dev_pagemap
Dan Williams4b94ffd2016-01-15 16:56:22 -0800128 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800129 * Notes:
Christoph Hellwige8d51342017-12-29 08:54:05 +0100130 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
131 * by the caller before passing it to this function
132 *
133 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
134 * must be set to true
135 *
Dan Williamsa95c90f2018-12-28 00:34:57 -0800136 * 3/ pgmap->ref must be 'live' on entry and will be killed at
137 * devm_memremap_pages_release() time, or if this routine fails.
Dan Williams5c2c2582016-01-15 16:56:49 -0800138 *
Christoph Hellwige8d51342017-12-29 08:54:05 +0100139 * 4/ res is expected to be a host memory range that could feasibly be
Dan Williams5c2c2582016-01-15 16:56:49 -0800140 * treated as a "System RAM" range, i.e. not a device mmio range, but
141 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800142 */
Christoph Hellwige8d51342017-12-29 08:54:05 +0100143void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200144{
Dan Williamsab1b5972017-09-06 16:24:13 -0700145 resource_size_t align_start, align_size, align_end;
Christoph Hellwige8d51342017-12-29 08:54:05 +0100146 struct vmem_altmap *altmap = pgmap->altmap_valid ?
147 &pgmap->altmap : NULL;
Dan Williams949b93252018-02-06 19:34:11 -0800148 struct resource *res = &pgmap->res;
Dave Jiang15d36fe2018-07-26 16:37:15 -0700149 struct dev_pagemap *conflict_pgmap;
Michal Hocko940519f2019-05-13 17:21:26 -0700150 struct mhp_restrictions restrictions = {
151 /*
152 * We do not want any optional features only our own memmap
153 */
154 .altmap = altmap,
155 };
Alexander Duyck966cf442018-10-26 15:07:52 -0700156 pgprot_t pgprot = PAGE_KERNEL;
Alexander Duyck966cf442018-10-26 15:07:52 -0700157 int error, nid, is_ram;
Dan Williams5f29a772016-03-09 14:08:13 -0800158
Dan Williamsa95c90f2018-12-28 00:34:57 -0800159 if (!pgmap->ref || !pgmap->kill)
160 return ERR_PTR(-EINVAL);
161
Dan Williams5f29a772016-03-09 14:08:13 -0800162 align_start = res->start & ~(SECTION_SIZE - 1);
163 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
164 - align_start;
Dave Jiang15d36fe2018-07-26 16:37:15 -0700165 align_end = align_start + align_size - 1;
166
167 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
168 if (conflict_pgmap) {
169 dev_WARN(dev, "Conflicting mapping in same section\n");
170 put_dev_pagemap(conflict_pgmap);
171 return ERR_PTR(-ENOMEM);
172 }
173
174 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
175 if (conflict_pgmap) {
176 dev_WARN(dev, "Conflicting mapping in same section\n");
177 put_dev_pagemap(conflict_pgmap);
178 return ERR_PTR(-ENOMEM);
179 }
180
Linus Torvaldsd37a14bb2016-03-14 15:15:51 -0700181 is_ram = region_intersects(align_start, align_size,
182 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200183
Dan Williams06489cf2018-12-28 00:34:54 -0800184 if (is_ram != REGION_DISJOINT) {
185 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
186 is_ram == REGION_MIXED ? "mixed" : "ram", res);
Dan Williamsa95c90f2018-12-28 00:34:57 -0800187 error = -ENXIO;
188 goto err_array;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200189 }
190
Dan Williams4b94ffd2016-01-15 16:56:22 -0800191 pgmap->dev = dev;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800192
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -0400193 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
194 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
Dan Williams9476df72016-01-15 16:56:19 -0800195 if (error)
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -0400196 goto err_array;
Dan Williams9476df72016-01-15 16:56:19 -0800197
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200198 nid = dev_to_node(dev);
199 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400200 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200201
Dan Williams90497712016-09-07 08:51:21 -0700202 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
203 align_size);
204 if (error)
205 goto err_pfn_remap;
206
Dan Williamsf931ab42017-01-10 16:57:36 -0800207 mem_hotplug_begin();
Dan Williams69324b82018-12-28 00:35:01 -0800208
209 /*
210 * For device private memory we call add_pages() as we only need to
211 * allocate and initialize struct page for the device memory. More-
212 * over the device memory is un-accessible thus we do not want to
213 * create a linear mapping for the memory like arch_add_memory()
214 * would do.
215 *
216 * For all other device memory types, which are accessible by
217 * the CPU, we do want the linear mapping and thus use
218 * arch_add_memory().
219 */
220 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
221 error = add_pages(nid, align_start >> PAGE_SHIFT,
Michal Hocko940519f2019-05-13 17:21:26 -0700222 align_size >> PAGE_SHIFT, &restrictions);
Dan Williams69324b82018-12-28 00:35:01 -0800223 } else {
224 error = kasan_add_zero_shadow(__va(align_start), align_size);
225 if (error) {
226 mem_hotplug_done();
227 goto err_kasan;
228 }
229
Michal Hocko940519f2019-05-13 17:21:26 -0700230 error = arch_add_memory(nid, align_start, align_size,
231 &restrictions);
Andrey Ryabinin0207df42018-08-17 15:47:04 -0700232 }
233
Dan Williams69324b82018-12-28 00:35:01 -0800234 if (!error) {
235 struct zone *zone;
236
237 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
238 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
239 align_size >> PAGE_SHIFT, altmap);
240 }
241
Dan Williamsf931ab42017-01-10 16:57:36 -0800242 mem_hotplug_done();
Dan Williams9476df72016-01-15 16:56:19 -0800243 if (error)
244 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200245
Alexander Duyck966cf442018-10-26 15:07:52 -0700246 /*
247 * Initialization of the pages has been deferred until now in order
248 * to allow us to do the work while not holding the hotplug lock.
249 */
250 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
251 align_start >> PAGE_SHIFT,
252 align_size >> PAGE_SHIFT, pgmap);
253 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
Christoph Hellwige8d51342017-12-29 08:54:05 +0100254
Dan Williamsa95c90f2018-12-28 00:34:57 -0800255 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
256 pgmap);
257 if (error)
258 return ERR_PTR(error);
Christoph Hellwige8d51342017-12-29 08:54:05 +0100259
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200260 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800261
262 err_add_memory:
Andrey Ryabinin0207df42018-08-17 15:47:04 -0700263 kasan_remove_zero_shadow(__va(align_start), align_size);
264 err_kasan:
Dan Williams90497712016-09-07 08:51:21 -0700265 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
266 err_pfn_remap:
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -0400267 pgmap_array_delete(res);
268 err_array:
Dan Williamsa95c90f2018-12-28 00:34:57 -0800269 pgmap->kill(pgmap->ref);
Dan Williams9476df72016-01-15 16:56:19 -0800270 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200271}
Dan Williams808153e2018-12-28 00:34:50 -0800272EXPORT_SYMBOL_GPL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800273
274unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
275{
276 /* number of pfns from base where pfn_to_page() is valid */
277 return altmap->reserve + altmap->free;
278}
279
280void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
281{
282 altmap->alloc -= nr_pfns;
283}
284
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100285/**
286 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
287 * @pfn: page frame number to lookup page_map
288 * @pgmap: optional known pgmap that already has a reference
289 *
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100290 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
291 * is non-NULL but does not cover @pfn the reference to it will be released.
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100292 */
293struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
294 struct dev_pagemap *pgmap)
295{
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100296 resource_size_t phys = PFN_PHYS(pfn);
297
298 /*
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100299 * In the cached case we're already holding a live reference.
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100300 */
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100301 if (pgmap) {
Logan Gunthorpee7744aa2017-12-29 08:54:04 +0100302 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
Christoph Hellwig832d7aa2017-12-29 08:54:01 +0100303 return pgmap;
304 put_dev_pagemap(pgmap);
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100305 }
306
307 /* fall back to slow path lookup */
308 rcu_read_lock();
Matthew Wilcoxbcfa4b72018-08-15 14:22:16 -0400309 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100310 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
311 pgmap = NULL;
312 rcu_read_unlock();
313
314 return pgmap;
315}
Dan Williamse76384882018-05-16 11:46:08 -0700316EXPORT_SYMBOL_GPL(get_dev_pagemap);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700317
Dan Williamse76384882018-05-16 11:46:08 -0700318#ifdef CONFIG_DEV_PAGEMAP_OPS
319DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
Dan Williams31c5bda2018-07-26 16:37:22 -0700320EXPORT_SYMBOL(devmap_managed_key);
Dan Williamse76384882018-05-16 11:46:08 -0700321static atomic_t devmap_enable;
322
323/*
324 * Toggle the static key for ->page_free() callbacks when dev_pagemap
325 * pages go idle.
326 */
327void dev_pagemap_get_ops(void)
328{
329 if (atomic_inc_return(&devmap_enable) == 1)
330 static_branch_enable(&devmap_managed_key);
331}
332EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
333
334void dev_pagemap_put_ops(void)
335{
336 if (atomic_dec_and_test(&devmap_enable))
337 static_branch_disable(&devmap_managed_key);
338}
339EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
340
341void __put_devmap_managed_page(struct page *page)
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700342{
343 int count = page_ref_dec_return(page);
344
345 /*
346 * If refcount is 1 then page is freed and refcount is stable as nobody
347 * holds a reference on the page.
348 */
349 if (count == 1) {
350 /* Clear Active bit in case of parallel mark_page_accessed */
351 __ClearPageActive(page);
352 __ClearPageWaiters(page);
353
Jérôme Glissec733a822017-09-08 16:11:54 -0700354 mem_cgroup_uncharge(page);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700355
356 page->pgmap->page_free(page, page->pgmap->data);
357 } else if (!count)
358 __put_page(page);
359}
Dan Williams31c5bda2018-07-26 16:37:22 -0700360EXPORT_SYMBOL(__put_devmap_managed_page);
Dan Williamse76384882018-05-16 11:46:08 -0700361#endif /* CONFIG_DEV_PAGEMAP_OPS */