Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 2 | #ifndef _LINUX_MIGRATE_H |
| 3 | #define _LINUX_MIGRATE_H |
| 4 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 5 | #include <linux/mm.h> |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 6 | #include <linux/mempolicy.h> |
Andrew Morton | 6536e31 | 2012-01-20 14:33:53 -0800 | [diff] [blame] | 7 | #include <linux/migrate_mode.h> |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 8 | #include <linux/hugetlb.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 9 | |
Michal Hocko | 666feb2 | 2018-04-10 16:30:03 -0700 | [diff] [blame] | 10 | typedef struct page *new_page_t(struct page *page, unsigned long private); |
David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 11 | typedef void free_page_t(struct page *page, unsigned long private); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 12 | |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 13 | /* |
| 14 | * Return values from addresss_space_operations.migratepage(): |
| 15 | * - negative errno on page migration failure; |
| 16 | * - zero on page migration success; |
| 17 | */ |
| 18 | #define MIGRATEPAGE_SUCCESS 0 |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 19 | |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 20 | enum migrate_reason { |
| 21 | MR_COMPACTION, |
| 22 | MR_MEMORY_FAILURE, |
| 23 | MR_MEMORY_HOTPLUG, |
| 24 | MR_SYSCALL, /* also applies to cpusets */ |
| 25 | MR_MEMPOLICY_MBIND, |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 26 | MR_NUMA_MISPLACED, |
Anshuman Khandual | 3102535 | 2018-04-05 16:22:08 -0700 | [diff] [blame] | 27 | MR_CONTIG_RANGE, |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 28 | MR_TYPES |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 29 | }; |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 30 | |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 31 | /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ |
Alexey Dobriyan | 9a2f45f | 2018-12-28 00:35:59 -0800 | [diff] [blame] | 32 | extern const char *migrate_reason_names[MR_TYPES]; |
Vlastimil Babka | 7cd12b4 | 2016-03-15 14:56:18 -0700 | [diff] [blame] | 33 | |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 34 | static inline struct page *new_page_nodemask(struct page *page, |
| 35 | int preferred_nid, nodemask_t *nodemask) |
| 36 | { |
Michal Hocko | 0f55685 | 2017-07-12 14:36:58 -0700 | [diff] [blame] | 37 | gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; |
Naoya Horiguchi | 8135d89 | 2017-09-08 16:11:15 -0700 | [diff] [blame] | 38 | unsigned int order = 0; |
| 39 | struct page *new_page = NULL; |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 40 | |
| 41 | if (PageHuge(page)) |
| 42 | return alloc_huge_page_nodemask(page_hstate(compound_head(page)), |
Michal Hocko | 3e59fcb | 2017-07-10 15:49:11 -0700 | [diff] [blame] | 43 | preferred_nid, nodemask); |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 44 | |
Michal Hocko | 94723aa | 2018-04-10 16:30:07 -0700 | [diff] [blame] | 45 | if (PageTransHuge(page)) { |
Naoya Horiguchi | 8135d89 | 2017-09-08 16:11:15 -0700 | [diff] [blame] | 46 | gfp_mask |= GFP_TRANSHUGE; |
Michal Hocko | 94723aa | 2018-04-10 16:30:07 -0700 | [diff] [blame] | 47 | order = HPAGE_PMD_ORDER; |
Naoya Horiguchi | 8135d89 | 2017-09-08 16:11:15 -0700 | [diff] [blame] | 48 | } |
| 49 | |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 50 | if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) |
| 51 | gfp_mask |= __GFP_HIGHMEM; |
| 52 | |
Naoya Horiguchi | 8135d89 | 2017-09-08 16:11:15 -0700 | [diff] [blame] | 53 | new_page = __alloc_pages_nodemask(gfp_mask, order, |
| 54 | preferred_nid, nodemask); |
| 55 | |
Zi Yan | 40a899e | 2017-11-29 16:11:12 -0800 | [diff] [blame] | 56 | if (new_page && PageTransHuge(new_page)) |
Naoya Horiguchi | 8135d89 | 2017-09-08 16:11:15 -0700 | [diff] [blame] | 57 | prep_transhuge_page(new_page); |
| 58 | |
| 59 | return new_page; |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 62 | #ifdef CONFIG_MIGRATION |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 63 | |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 64 | extern void putback_movable_pages(struct list_head *l); |
Pushkar Jambhlekar | 9927e38 | 2017-05-03 14:54:45 -0700 | [diff] [blame] | 65 | extern int migrate_page(struct address_space *mapping, |
| 66 | struct page *newpage, struct page *page, |
| 67 | enum migrate_mode mode); |
David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 68 | extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, |
Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 69 | unsigned long private, enum migrate_mode mode, int reason); |
Yisheng Xie | 9e5bcd6 | 2017-02-24 14:57:29 -0800 | [diff] [blame] | 70 | extern int isolate_movable_page(struct page *page, isolate_mode_t mode); |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 71 | extern void putback_movable_page(struct page *page); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 72 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 73 | extern int migrate_prep(void); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 74 | extern int migrate_prep_local(void); |
Jérôme Glisse | 2916ecc | 2017-09-08 16:12:06 -0700 | [diff] [blame] | 75 | extern void migrate_page_states(struct page *newpage, struct page *page); |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 76 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
| 77 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 78 | struct page *newpage, struct page *page); |
Gu Zheng | 36bc08c | 2013-07-16 17:56:16 +0800 | [diff] [blame] | 79 | extern int migrate_page_move_mapping(struct address_space *mapping, |
Jan Kara | ab41ee6 | 2018-12-28 00:39:20 -0800 | [diff] [blame] | 80 | struct page *newpage, struct page *page, enum migrate_mode mode, |
Benjamin LaHaise | 8e321fe | 2013-12-21 17:56:08 -0500 | [diff] [blame] | 81 | int extra_count); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 82 | #else |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 83 | |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 84 | static inline void putback_movable_pages(struct list_head *l) {} |
David Rientjes | 68711a7 | 2014-06-04 16:08:25 -0700 | [diff] [blame] | 85 | static inline int migrate_pages(struct list_head *l, new_page_t new, |
| 86 | free_page_t free, unsigned long private, enum migrate_mode mode, |
| 87 | int reason) |
Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 88 | { return -ENOSYS; } |
Yisheng Xie | cbae017 | 2017-02-24 14:57:32 -0800 | [diff] [blame] | 89 | static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) |
| 90 | { return -EBUSY; } |
Christoph Lameter | 9bf9e89 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 91 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 92 | static inline int migrate_prep(void) { return -ENOSYS; } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 93 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 94 | |
Jérôme Glisse | 2916ecc | 2017-09-08 16:12:06 -0700 | [diff] [blame] | 95 | static inline void migrate_page_states(struct page *newpage, struct page *page) |
| 96 | { |
| 97 | } |
| 98 | |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 99 | static inline void migrate_page_copy(struct page *newpage, |
| 100 | struct page *page) {} |
| 101 | |
Naoya Horiguchi | 6f39ce0 | 2010-09-30 11:54:51 +0900 | [diff] [blame] | 102 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 103 | struct page *newpage, struct page *page) |
| 104 | { |
| 105 | return -ENOSYS; |
| 106 | } |
| 107 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 108 | #endif /* CONFIG_MIGRATION */ |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 109 | |
Minchan Kim | dd4123f | 2016-07-26 15:26:50 -0700 | [diff] [blame] | 110 | #ifdef CONFIG_COMPACTION |
| 111 | extern int PageMovable(struct page *page); |
| 112 | extern void __SetPageMovable(struct page *page, struct address_space *mapping); |
| 113 | extern void __ClearPageMovable(struct page *page); |
| 114 | #else |
| 115 | static inline int PageMovable(struct page *page) { return 0; }; |
| 116 | static inline void __SetPageMovable(struct page *page, |
| 117 | struct address_space *mapping) |
| 118 | { |
| 119 | } |
| 120 | static inline void __ClearPageMovable(struct page *page) |
| 121 | { |
| 122 | } |
| 123 | #endif |
| 124 | |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 125 | #ifdef CONFIG_NUMA_BALANCING |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 126 | extern bool pmd_trans_migrating(pmd_t pmd); |
Mel Gorman | 1bc115d | 2013-10-07 11:29:05 +0100 | [diff] [blame] | 127 | extern int migrate_misplaced_page(struct page *page, |
| 128 | struct vm_area_struct *vma, int node); |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 129 | #else |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 130 | static inline bool pmd_trans_migrating(pmd_t pmd) |
| 131 | { |
| 132 | return false; |
| 133 | } |
Mel Gorman | 1bc115d | 2013-10-07 11:29:05 +0100 | [diff] [blame] | 134 | static inline int migrate_misplaced_page(struct page *page, |
| 135 | struct vm_area_struct *vma, int node) |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 136 | { |
| 137 | return -EAGAIN; /* can't migrate now */ |
| 138 | } |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 139 | #endif /* CONFIG_NUMA_BALANCING */ |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 140 | |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 141 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 142 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 143 | struct vm_area_struct *vma, |
| 144 | pmd_t *pmd, pmd_t entry, |
| 145 | unsigned long address, |
| 146 | struct page *page, int node); |
| 147 | #else |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 148 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 149 | struct vm_area_struct *vma, |
| 150 | pmd_t *pmd, pmd_t entry, |
| 151 | unsigned long address, |
| 152 | struct page *page, int node) |
| 153 | { |
| 154 | return -EAGAIN; |
| 155 | } |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 156 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 157 | |
Jérôme Glisse | 8763cb4 | 2017-09-08 16:12:09 -0700 | [diff] [blame] | 158 | |
| 159 | #ifdef CONFIG_MIGRATION |
| 160 | |
Jérôme Glisse | a5430dd | 2017-09-08 16:12:17 -0700 | [diff] [blame] | 161 | /* |
| 162 | * Watch out for PAE architecture, which has an unsigned long, and might not |
| 163 | * have enough bits to store all physical address and flags. So far we have |
| 164 | * enough room for all our flags. |
| 165 | */ |
Jérôme Glisse | 8763cb4 | 2017-09-08 16:12:09 -0700 | [diff] [blame] | 166 | #define MIGRATE_PFN_VALID (1UL << 0) |
| 167 | #define MIGRATE_PFN_MIGRATE (1UL << 1) |
| 168 | #define MIGRATE_PFN_LOCKED (1UL << 2) |
| 169 | #define MIGRATE_PFN_WRITE (1UL << 3) |
Jérôme Glisse | a5430dd | 2017-09-08 16:12:17 -0700 | [diff] [blame] | 170 | #define MIGRATE_PFN_DEVICE (1UL << 4) |
| 171 | #define MIGRATE_PFN_ERROR (1UL << 5) |
| 172 | #define MIGRATE_PFN_SHIFT 6 |
Jérôme Glisse | 8763cb4 | 2017-09-08 16:12:09 -0700 | [diff] [blame] | 173 | |
| 174 | static inline struct page *migrate_pfn_to_page(unsigned long mpfn) |
| 175 | { |
| 176 | if (!(mpfn & MIGRATE_PFN_VALID)) |
| 177 | return NULL; |
| 178 | return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); |
| 179 | } |
| 180 | |
| 181 | static inline unsigned long migrate_pfn(unsigned long pfn) |
| 182 | { |
| 183 | return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * struct migrate_vma_ops - migrate operation callback |
| 188 | * |
| 189 | * @alloc_and_copy: alloc destination memory and copy source memory to it |
| 190 | * @finalize_and_map: allow caller to map the successfully migrated pages |
| 191 | * |
| 192 | * |
| 193 | * The alloc_and_copy() callback happens once all source pages have been locked, |
| 194 | * unmapped and checked (checked whether pinned or not). All pages that can be |
| 195 | * migrated will have an entry in the src array set with the pfn value of the |
| 196 | * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other |
| 197 | * flags might be set but should be ignored by the callback). |
| 198 | * |
| 199 | * The alloc_and_copy() callback can then allocate destination memory and copy |
| 200 | * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and |
| 201 | * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the |
| 202 | * callback must update each corresponding entry in the dst array with the pfn |
| 203 | * value of the destination page and with the MIGRATE_PFN_VALID and |
| 204 | * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages |
| 205 | * locked, via lock_page()). |
| 206 | * |
| 207 | * At this point the alloc_and_copy() callback is done and returns. |
| 208 | * |
| 209 | * Note that the callback does not have to migrate all the pages that are |
| 210 | * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration |
| 211 | * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also |
| 212 | * set in the src array entry). If the device driver cannot migrate a device |
| 213 | * page back to system memory, then it must set the corresponding dst array |
| 214 | * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to |
| 215 | * access any of the virtual addresses originally backed by this page. Because |
| 216 | * a SIGBUS is such a severe result for the userspace process, the device |
| 217 | * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an |
| 218 | * unrecoverable state. |
| 219 | * |
Jérôme Glisse | 8315ada | 2017-09-08 16:12:21 -0700 | [diff] [blame] | 220 | * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we |
| 221 | * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus |
| 222 | * allowing device driver to allocate device memory for those unback virtual |
| 223 | * address. For this the device driver simply have to allocate device memory |
| 224 | * and properly set the destination entry like for regular migration. Note that |
| 225 | * this can still fails and thus inside the device driver must check if the |
| 226 | * migration was successful for those entry inside the finalize_and_map() |
| 227 | * callback just like for regular migration. |
| 228 | * |
Jérôme Glisse | 8763cb4 | 2017-09-08 16:12:09 -0700 | [diff] [blame] | 229 | * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES |
| 230 | * OR BAD THINGS WILL HAPPEN ! |
| 231 | * |
| 232 | * |
| 233 | * The finalize_and_map() callback happens after struct page migration from |
| 234 | * source to destination (destination struct pages are the struct pages for the |
| 235 | * memory allocated by the alloc_and_copy() callback). Migration can fail, and |
| 236 | * thus the finalize_and_map() allows the driver to inspect which pages were |
| 237 | * successfully migrated, and which were not. Successfully migrated pages will |
| 238 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. |
| 239 | * |
| 240 | * It is safe to update device page table from within the finalize_and_map() |
| 241 | * callback because both destination and source page are still locked, and the |
| 242 | * mmap_sem is held in read mode (hence no one can unmap the range being |
| 243 | * migrated). |
| 244 | * |
| 245 | * Once callback is done cleaning up things and updating its page table (if it |
| 246 | * chose to do so, this is not an obligation) then it returns. At this point, |
| 247 | * the HMM core will finish up the final steps, and the migration is complete. |
| 248 | * |
| 249 | * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY |
| 250 | * ENTRIES OR BAD THINGS WILL HAPPEN ! |
| 251 | */ |
| 252 | struct migrate_vma_ops { |
| 253 | void (*alloc_and_copy)(struct vm_area_struct *vma, |
| 254 | const unsigned long *src, |
| 255 | unsigned long *dst, |
| 256 | unsigned long start, |
| 257 | unsigned long end, |
| 258 | void *private); |
| 259 | void (*finalize_and_map)(struct vm_area_struct *vma, |
| 260 | const unsigned long *src, |
| 261 | const unsigned long *dst, |
| 262 | unsigned long start, |
| 263 | unsigned long end, |
| 264 | void *private); |
| 265 | }; |
| 266 | |
Jérôme Glisse | 6b368cd | 2017-09-08 16:12:32 -0700 | [diff] [blame] | 267 | #if defined(CONFIG_MIGRATE_VMA_HELPER) |
Jérôme Glisse | 8763cb4 | 2017-09-08 16:12:09 -0700 | [diff] [blame] | 268 | int migrate_vma(const struct migrate_vma_ops *ops, |
| 269 | struct vm_area_struct *vma, |
| 270 | unsigned long start, |
| 271 | unsigned long end, |
| 272 | unsigned long *src, |
| 273 | unsigned long *dst, |
| 274 | void *private); |
Jérôme Glisse | 6b368cd | 2017-09-08 16:12:32 -0700 | [diff] [blame] | 275 | #else |
| 276 | static inline int migrate_vma(const struct migrate_vma_ops *ops, |
| 277 | struct vm_area_struct *vma, |
| 278 | unsigned long start, |
| 279 | unsigned long end, |
| 280 | unsigned long *src, |
| 281 | unsigned long *dst, |
| 282 | void *private) |
| 283 | { |
| 284 | return -EINVAL; |
| 285 | } |
| 286 | #endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */ |
Jérôme Glisse | 8763cb4 | 2017-09-08 16:12:09 -0700 | [diff] [blame] | 287 | |
| 288 | #endif /* CONFIG_MIGRATION */ |
| 289 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 290 | #endif /* _LINUX_MIGRATE_H */ |