Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_SWAPOPS_H |
| 3 | #define _LINUX_SWAPOPS_H |
| 4 | |
| 5 | #include <linux/radix-tree.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 6 | #include <linux/bug.h> |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | /* |
| 9 | * swapcache pages are stored in the swapper_space radix tree. We want to |
| 10 | * get good packing density in that tree, so the index should be dense in |
| 11 | * the low-order bits. |
| 12 | * |
Hugh Dickins | 9b15b81 | 2012-06-15 17:55:50 -0700 | [diff] [blame] | 13 | * We arrange the `type' and `offset' fields so that `type' is at the seven |
Paolo 'Blaisorblade' Giarrusso | e83a959 | 2005-09-03 15:54:53 -0700 | [diff] [blame] | 14 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
Hugh Dickins | 9b15b81 | 2012-06-15 17:55:50 -0700 | [diff] [blame] | 15 | * remaining bits. Although `type' itself needs only five bits, we allow for |
| 16 | * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * |
| 18 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
| 19 | */ |
Hugh Dickins | 9b15b81 | 2012-06-15 17:55:50 -0700 | [diff] [blame] | 20 | #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ |
| 21 | (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) |
| 23 | |
| 24 | /* |
| 25 | * Store a type+offset into a swp_entry_t in an arch-independent format |
| 26 | */ |
| 27 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) |
| 28 | { |
| 29 | swp_entry_t ret; |
| 30 | |
| 31 | ret.val = (type << SWP_TYPE_SHIFT(ret)) | |
| 32 | (offset & SWP_OFFSET_MASK(ret)); |
| 33 | return ret; |
| 34 | } |
| 35 | |
| 36 | /* |
| 37 | * Extract the `type' field from a swp_entry_t. The swp_entry_t is in |
| 38 | * arch-independent format |
| 39 | */ |
| 40 | static inline unsigned swp_type(swp_entry_t entry) |
| 41 | { |
| 42 | return (entry.val >> SWP_TYPE_SHIFT(entry)); |
| 43 | } |
| 44 | |
| 45 | /* |
| 46 | * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in |
| 47 | * arch-independent format |
| 48 | */ |
| 49 | static inline pgoff_t swp_offset(swp_entry_t entry) |
| 50 | { |
| 51 | return entry.val & SWP_OFFSET_MASK(entry); |
| 52 | } |
| 53 | |
Matt Mackall | 880cdf3 | 2008-02-09 00:10:12 -0800 | [diff] [blame] | 54 | #ifdef CONFIG_MMU |
Matt Mackall | 698dd4b | 2008-02-04 22:29:00 -0800 | [diff] [blame] | 55 | /* check whether a pte points to a swap entry */ |
| 56 | static inline int is_swap_pte(pte_t pte) |
| 57 | { |
Mel Gorman | 21d9ee3 | 2015-02-12 14:58:32 -0800 | [diff] [blame] | 58 | return !pte_none(pte) && !pte_present(pte); |
Matt Mackall | 698dd4b | 2008-02-04 22:29:00 -0800 | [diff] [blame] | 59 | } |
Matt Mackall | 880cdf3 | 2008-02-09 00:10:12 -0800 | [diff] [blame] | 60 | #endif |
Matt Mackall | 698dd4b | 2008-02-04 22:29:00 -0800 | [diff] [blame] | 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | /* |
| 63 | * Convert the arch-dependent pte representation of a swp_entry_t into an |
| 64 | * arch-independent swp_entry_t. |
| 65 | */ |
| 66 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) |
| 67 | { |
| 68 | swp_entry_t arch_entry; |
| 69 | |
Cyrill Gorcunov | 179ef71 | 2013-08-13 16:00:49 -0700 | [diff] [blame] | 70 | if (pte_swp_soft_dirty(pte)) |
| 71 | pte = pte_swp_clear_soft_dirty(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | arch_entry = __pte_to_swp_entry(pte); |
| 73 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
| 74 | } |
| 75 | |
| 76 | /* |
| 77 | * Convert the arch-independent representation of a swp_entry_t into the |
| 78 | * arch-dependent pte representation. |
| 79 | */ |
| 80 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) |
| 81 | { |
| 82 | swp_entry_t arch_entry; |
| 83 | |
| 84 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | return __swp_entry_to_pte(arch_entry); |
| 86 | } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 87 | |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 88 | static inline swp_entry_t radix_to_swp_entry(void *arg) |
| 89 | { |
| 90 | swp_entry_t entry; |
| 91 | |
| 92 | entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 93 | return entry; |
| 94 | } |
| 95 | |
| 96 | static inline void *swp_to_radix_entry(swp_entry_t entry) |
| 97 | { |
| 98 | unsigned long value; |
| 99 | |
| 100 | value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 101 | return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); |
| 102 | } |
| 103 | |
Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 104 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
| 105 | static inline swp_entry_t make_device_private_entry(struct page *page, bool write) |
| 106 | { |
| 107 | return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, |
| 108 | page_to_pfn(page)); |
| 109 | } |
| 110 | |
| 111 | static inline bool is_device_private_entry(swp_entry_t entry) |
| 112 | { |
| 113 | int type = swp_type(entry); |
| 114 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; |
| 115 | } |
| 116 | |
| 117 | static inline void make_device_private_entry_read(swp_entry_t *entry) |
| 118 | { |
| 119 | *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); |
| 120 | } |
| 121 | |
| 122 | static inline bool is_write_device_private_entry(swp_entry_t entry) |
| 123 | { |
| 124 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); |
| 125 | } |
| 126 | |
| 127 | static inline struct page *device_private_entry_to_page(swp_entry_t entry) |
| 128 | { |
| 129 | return pfn_to_page(swp_offset(entry)); |
| 130 | } |
| 131 | |
| 132 | int device_private_entry_fault(struct vm_area_struct *vma, |
| 133 | unsigned long addr, |
| 134 | swp_entry_t entry, |
| 135 | unsigned int flags, |
| 136 | pmd_t *pmdp); |
| 137 | #else /* CONFIG_DEVICE_PRIVATE */ |
| 138 | static inline swp_entry_t make_device_private_entry(struct page *page, bool write) |
| 139 | { |
| 140 | return swp_entry(0, 0); |
| 141 | } |
| 142 | |
| 143 | static inline void make_device_private_entry_read(swp_entry_t *entry) |
| 144 | { |
| 145 | } |
| 146 | |
| 147 | static inline bool is_device_private_entry(swp_entry_t entry) |
| 148 | { |
| 149 | return false; |
| 150 | } |
| 151 | |
| 152 | static inline bool is_write_device_private_entry(swp_entry_t entry) |
| 153 | { |
| 154 | return false; |
| 155 | } |
| 156 | |
| 157 | static inline struct page *device_private_entry_to_page(swp_entry_t entry) |
| 158 | { |
| 159 | return NULL; |
| 160 | } |
| 161 | |
| 162 | static inline int device_private_entry_fault(struct vm_area_struct *vma, |
| 163 | unsigned long addr, |
| 164 | swp_entry_t entry, |
| 165 | unsigned int flags, |
| 166 | pmd_t *pmdp) |
| 167 | { |
| 168 | return VM_FAULT_SIGBUS; |
| 169 | } |
| 170 | #endif /* CONFIG_DEVICE_PRIVATE */ |
| 171 | |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 172 | #ifdef CONFIG_MIGRATION |
| 173 | static inline swp_entry_t make_migration_entry(struct page *page, int write) |
| 174 | { |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 175 | BUG_ON(!PageLocked(compound_head(page))); |
| 176 | |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 177 | return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, |
| 178 | page_to_pfn(page)); |
| 179 | } |
| 180 | |
| 181 | static inline int is_migration_entry(swp_entry_t entry) |
| 182 | { |
| 183 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || |
| 184 | swp_type(entry) == SWP_MIGRATION_WRITE); |
| 185 | } |
| 186 | |
| 187 | static inline int is_write_migration_entry(swp_entry_t entry) |
| 188 | { |
| 189 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); |
| 190 | } |
| 191 | |
| 192 | static inline struct page *migration_entry_to_page(swp_entry_t entry) |
| 193 | { |
| 194 | struct page *p = pfn_to_page(swp_offset(entry)); |
| 195 | /* |
| 196 | * Any use of migration entries may only occur while the |
| 197 | * corresponding page is locked |
| 198 | */ |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 199 | BUG_ON(!PageLocked(compound_head(p))); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 200 | return p; |
| 201 | } |
| 202 | |
| 203 | static inline void make_migration_entry_read(swp_entry_t *entry) |
| 204 | { |
| 205 | *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); |
| 206 | } |
| 207 | |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 208 | extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
| 209 | spinlock_t *ptl); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 210 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 211 | unsigned long address); |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 212 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, |
| 213 | struct mm_struct *mm, pte_t *pte); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 214 | #else |
| 215 | |
| 216 | #define make_migration_entry(page, write) swp_entry(0, 0) |
Andrew Morton | 5ec553a | 2007-02-20 13:57:50 -0800 | [diff] [blame] | 217 | static inline int is_migration_entry(swp_entry_t swp) |
| 218 | { |
| 219 | return 0; |
| 220 | } |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 221 | static inline struct page *migration_entry_to_page(swp_entry_t entry) |
| 222 | { |
| 223 | return NULL; |
| 224 | } |
| 225 | |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 226 | static inline void make_migration_entry_read(swp_entry_t *entryp) { } |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 227 | static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
| 228 | spinlock_t *ptl) { } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 229 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 230 | unsigned long address) { } |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 231 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, |
| 232 | struct mm_struct *mm, pte_t *pte) { } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 233 | static inline int is_write_migration_entry(swp_entry_t entry) |
| 234 | { |
| 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | #endif |
| 239 | |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 240 | struct page_vma_mapped_walk; |
| 241 | |
| 242 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 243 | extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
| 244 | struct page *page); |
| 245 | |
| 246 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
| 247 | struct page *new); |
| 248 | |
| 249 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); |
| 250 | |
| 251 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
| 252 | { |
| 253 | swp_entry_t arch_entry; |
| 254 | |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 255 | if (pmd_swp_soft_dirty(pmd)) |
| 256 | pmd = pmd_swp_clear_soft_dirty(pmd); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 257 | arch_entry = __pmd_to_swp_entry(pmd); |
| 258 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
| 259 | } |
| 260 | |
| 261 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
| 262 | { |
| 263 | swp_entry_t arch_entry; |
| 264 | |
| 265 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
| 266 | return __swp_entry_to_pmd(arch_entry); |
| 267 | } |
| 268 | |
| 269 | static inline int is_pmd_migration_entry(pmd_t pmd) |
| 270 | { |
| 271 | return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); |
| 272 | } |
| 273 | #else |
| 274 | static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
| 275 | struct page *page) |
| 276 | { |
| 277 | BUILD_BUG(); |
| 278 | } |
| 279 | |
| 280 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
| 281 | struct page *new) |
| 282 | { |
| 283 | BUILD_BUG(); |
| 284 | } |
| 285 | |
| 286 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } |
| 287 | |
| 288 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
| 289 | { |
| 290 | return swp_entry(0, 0); |
| 291 | } |
| 292 | |
| 293 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
| 294 | { |
| 295 | return __pmd(0); |
| 296 | } |
| 297 | |
| 298 | static inline int is_pmd_migration_entry(pmd_t pmd) |
| 299 | { |
| 300 | return 0; |
| 301 | } |
| 302 | #endif |
| 303 | |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 304 | #ifdef CONFIG_MEMORY_FAILURE |
Naoya Horiguchi | 8e30456 | 2015-09-08 15:03:24 -0700 | [diff] [blame] | 305 | |
| 306 | extern atomic_long_t num_poisoned_pages __read_mostly; |
| 307 | |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 308 | /* |
| 309 | * Support for hardware poisoned pages |
| 310 | */ |
| 311 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| 312 | { |
| 313 | BUG_ON(!PageLocked(page)); |
| 314 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); |
| 315 | } |
| 316 | |
| 317 | static inline int is_hwpoison_entry(swp_entry_t entry) |
| 318 | { |
| 319 | return swp_type(entry) == SWP_HWPOISON; |
| 320 | } |
Naoya Horiguchi | 8e30456 | 2015-09-08 15:03:24 -0700 | [diff] [blame] | 321 | |
Wanpeng Li | da1b13c | 2015-09-08 15:03:27 -0700 | [diff] [blame] | 322 | static inline bool test_set_page_hwpoison(struct page *page) |
| 323 | { |
| 324 | return TestSetPageHWPoison(page); |
| 325 | } |
| 326 | |
Naoya Horiguchi | 8e30456 | 2015-09-08 15:03:24 -0700 | [diff] [blame] | 327 | static inline void num_poisoned_pages_inc(void) |
| 328 | { |
| 329 | atomic_long_inc(&num_poisoned_pages); |
| 330 | } |
| 331 | |
| 332 | static inline void num_poisoned_pages_dec(void) |
| 333 | { |
| 334 | atomic_long_dec(&num_poisoned_pages); |
| 335 | } |
| 336 | |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 337 | #else |
| 338 | |
| 339 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| 340 | { |
| 341 | return swp_entry(0, 0); |
| 342 | } |
| 343 | |
| 344 | static inline int is_hwpoison_entry(swp_entry_t swp) |
| 345 | { |
| 346 | return 0; |
| 347 | } |
Wanpeng Li | da1b13c | 2015-09-08 15:03:27 -0700 | [diff] [blame] | 348 | |
| 349 | static inline bool test_set_page_hwpoison(struct page *page) |
| 350 | { |
| 351 | return false; |
| 352 | } |
| 353 | |
| 354 | static inline void num_poisoned_pages_inc(void) |
| 355 | { |
| 356 | } |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 357 | #endif |
| 358 | |
| 359 | #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) |
| 360 | static inline int non_swap_entry(swp_entry_t entry) |
| 361 | { |
| 362 | return swp_type(entry) >= MAX_SWAPFILES; |
| 363 | } |
| 364 | #else |
| 365 | static inline int non_swap_entry(swp_entry_t entry) |
| 366 | { |
| 367 | return 0; |
| 368 | } |
| 369 | #endif |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 370 | |
| 371 | #endif /* _LINUX_SWAPOPS_H */ |