blob: f0628660d54100ffd890b7878c863bc698569a41 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Dan Williams9476df72016-01-15 16:56:19 -08002#ifndef _LINUX_MEMREMAP_H_
3#define _LINUX_MEMREMAP_H_
Dan Williams5c2c2582016-01-15 16:56:49 -08004#include <linux/ioport.h>
5#include <linux/percpu-refcount.h>
Dan Williams9476df72016-01-15 16:56:19 -08006
7struct resource;
8struct device;
Dan Williams4b94ffd2016-01-15 16:56:22 -08009
10/**
11 * struct vmem_altmap - pre-allocated storage for vmemmap_populate
12 * @base_pfn: base of the entire dev_pagemap mapping
13 * @reserve: pages mapped, but reserved for driver use (relative to @base)
14 * @free: free pages set aside in the mapping for memmap storage
15 * @align: pages reserved to meet allocation alignments
16 * @alloc: track pages consumed, private to vmemmap_populate()
17 */
18struct vmem_altmap {
19 const unsigned long base_pfn;
20 const unsigned long reserve;
21 unsigned long free;
22 unsigned long align;
23 unsigned long alloc;
24};
25
Jérôme Glisse5042db42017-09-08 16:11:43 -070026/*
27 * Specialize ZONE_DEVICE memory into multiple types each having differents
28 * usage.
29 *
Jérôme Glisse5042db42017-09-08 16:11:43 -070030 * MEMORY_DEVICE_PRIVATE:
31 * Device memory that is not directly addressable by the CPU: CPU can neither
32 * read nor write private memory. In this case, we do still have struct pages
33 * backing the device memory. Doing so simplifies the implementation, but it is
34 * important to remember that there are certain points at which the struct page
35 * must be treated as an opaque object, rather than a "normal" struct page.
36 *
37 * A more complete discussion of unaddressable memory may be found in
Mike Rapoportad56b732018-03-21 21:22:47 +020038 * include/linux/hmm.h and Documentation/vm/hmm.rst.
Jérôme Glissedf6ad692017-09-08 16:12:24 -070039 *
40 * MEMORY_DEVICE_PUBLIC:
41 * Device memory that is cache coherent from device and CPU point of view. This
42 * is use on platform that have an advance system bus (like CAPI or CCIX). A
43 * driver can hotplug the device memory using ZONE_DEVICE and with that memory
44 * type. Any page of a process can be migrated to such memory. However no one
45 * should be allow to pin such memory so that it can always be evicted.
Dan Williamse76384882018-05-16 11:46:08 -070046 *
47 * MEMORY_DEVICE_FS_DAX:
48 * Host memory that has similar access semantics as System RAM i.e. DMA
49 * coherent and supports page pinning. In support of coordinating page
50 * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
51 * wakeup event whenever a page is unpinned and becomes idle. This
52 * wakeup is used to coordinate physical address space management (ex:
53 * fs truncate/hole punch) vs pinned pages (ex: device dma).
Logan Gunthorpe52916982018-10-04 15:27:35 -060054 *
55 * MEMORY_DEVICE_PCI_P2PDMA:
56 * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
57 * transactions.
Jérôme Glisse5042db42017-09-08 16:11:43 -070058 */
59enum memory_type {
Dan Williamse76384882018-05-16 11:46:08 -070060 MEMORY_DEVICE_PRIVATE = 1,
Jérôme Glissedf6ad692017-09-08 16:12:24 -070061 MEMORY_DEVICE_PUBLIC,
Dan Williamse76384882018-05-16 11:46:08 -070062 MEMORY_DEVICE_FS_DAX,
Logan Gunthorpe52916982018-10-04 15:27:35 -060063 MEMORY_DEVICE_PCI_P2PDMA,
Jérôme Glisse5042db42017-09-08 16:11:43 -070064};
65
66/*
Jérôme Glisse5042db42017-09-08 16:11:43 -070067 * Additional notes about MEMORY_DEVICE_PRIVATE may be found in
Mike Rapoportad56b732018-03-21 21:22:47 +020068 * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief
Jérôme Glisse5042db42017-09-08 16:11:43 -070069 * explanation in include/linux/memory_hotplug.h.
70 *
Jérôme Glisse5042db42017-09-08 16:11:43 -070071 * The page_free() callback is called once the page refcount reaches 1
72 * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug.
73 * This allows the device driver to implement its own memory management.)
74 */
Jérôme Glisse5042db42017-09-08 16:11:43 -070075typedef void (*dev_page_free_t)(struct page *page, void *data);
76
Dan Williams9476df72016-01-15 16:56:19 -080077/**
78 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
Jérôme Glisse5042db42017-09-08 16:11:43 -070079 * @page_free: free page callback when page refcount reaches 1
Dan Williams4b94ffd2016-01-15 16:56:22 -080080 * @altmap: pre-allocated/reserved memory for vmemmap allocations
Dan Williams5c2c2582016-01-15 16:56:49 -080081 * @res: physical address range covered by @ref
82 * @ref: reference count that pins the devm_memremap_pages() mapping
Dan Williamsa95c90f2018-12-28 00:34:57 -080083 * @kill: callback to transition @ref to the dead state
Dan Williams9476df72016-01-15 16:56:19 -080084 * @dev: host device of the mapping for debug
Jérôme Glisse5042db42017-09-08 16:11:43 -070085 * @data: private data pointer for page_free()
86 * @type: memory type: see MEMORY_* in memory_hotplug.h
Dan Williams9476df72016-01-15 16:56:19 -080087 */
88struct dev_pagemap {
Jérôme Glisse5042db42017-09-08 16:11:43 -070089 dev_page_free_t page_free;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010090 struct vmem_altmap altmap;
91 bool altmap_valid;
92 struct resource res;
Dan Williams5c2c2582016-01-15 16:56:49 -080093 struct percpu_ref *ref;
Dan Williamsa95c90f2018-12-28 00:34:57 -080094 void (*kill)(struct percpu_ref *ref);
Dan Williams9476df72016-01-15 16:56:19 -080095 struct device *dev;
Jérôme Glisse5042db42017-09-08 16:11:43 -070096 void *data;
97 enum memory_type type;
Logan Gunthorpe977196b2018-10-04 15:27:37 -060098 u64 pci_p2pdma_bus_offset;
Dan Williams9476df72016-01-15 16:56:19 -080099};
100
101#ifdef CONFIG_ZONE_DEVICE
Christoph Hellwige8d51342017-12-29 08:54:05 +0100102void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100103struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
104 struct dev_pagemap *pgmap);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700105
Christoph Hellwig8e37d002017-12-29 08:53:50 +0100106unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
107void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
Dan Williams9476df72016-01-15 16:56:19 -0800108#else
109static inline void *devm_memremap_pages(struct device *dev,
Christoph Hellwige8d51342017-12-29 08:54:05 +0100110 struct dev_pagemap *pgmap)
Dan Williams9476df72016-01-15 16:56:19 -0800111{
112 /*
113 * Fail attempts to call devm_memremap_pages() without
114 * ZONE_DEVICE support enabled, this requires callers to fall
115 * back to plain devm_memremap() based on config
116 */
117 WARN_ON_ONCE(1);
118 return ERR_PTR(-ENXIO);
119}
120
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100121static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
122 struct dev_pagemap *pgmap)
Dan Williams9476df72016-01-15 16:56:19 -0800123{
124 return NULL;
125}
Christoph Hellwig8e37d002017-12-29 08:53:50 +0100126
127static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
128{
129 return 0;
130}
131
132static inline void vmem_altmap_free(struct vmem_altmap *altmap,
133 unsigned long nr_pfns)
134{
135}
136#endif /* CONFIG_ZONE_DEVICE */
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700137
Dan Williams5c2c2582016-01-15 16:56:49 -0800138static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
139{
140 if (pgmap)
141 percpu_ref_put(pgmap->ref);
142}
Dan Williams9476df72016-01-15 16:56:19 -0800143#endif /* _LINUX_MEMREMAP_H_ */