blob: 9c5a2628d6ce7b4f09e7542505c7b00ed0c8db8a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07002#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07007
Linus Torvalds1da177e2005-04-16 15:20:36 -07008/*
9 * swapcache pages are stored in the swapper_space radix tree. We want to
10 * get good packing density in that tree, so the index should be dense in
11 * the low-order bits.
12 *
Hugh Dickins9b15b812012-06-15 17:55:50 -070013 * We arrange the `type' and `offset' fields so that `type' is at the seven
Paolo 'Blaisorblade' Giarrussoe83a9592005-09-03 15:54:53 -070014 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
Hugh Dickins9b15b812012-06-15 17:55:50 -070015 * remaining bits. Although `type' itself needs only five bits, we allow for
16 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 *
18 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
19 */
Hugh Dickins9b15b812012-06-15 17:55:50 -070020#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
21 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
23
24/*
25 * Store a type+offset into a swp_entry_t in an arch-independent format
26 */
27static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
28{
29 swp_entry_t ret;
30
31 ret.val = (type << SWP_TYPE_SHIFT(ret)) |
32 (offset & SWP_OFFSET_MASK(ret));
33 return ret;
34}
35
36/*
37 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
38 * arch-independent format
39 */
40static inline unsigned swp_type(swp_entry_t entry)
41{
42 return (entry.val >> SWP_TYPE_SHIFT(entry));
43}
44
45/*
46 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
47 * arch-independent format
48 */
49static inline pgoff_t swp_offset(swp_entry_t entry)
50{
51 return entry.val & SWP_OFFSET_MASK(entry);
52}
53
Matt Mackall880cdf32008-02-09 00:10:12 -080054#ifdef CONFIG_MMU
Matt Mackall698dd4b2008-02-04 22:29:00 -080055/* check whether a pte points to a swap entry */
56static inline int is_swap_pte(pte_t pte)
57{
Mel Gorman21d9ee32015-02-12 14:58:32 -080058 return !pte_none(pte) && !pte_present(pte);
Matt Mackall698dd4b2008-02-04 22:29:00 -080059}
Matt Mackall880cdf32008-02-09 00:10:12 -080060#endif
Matt Mackall698dd4b2008-02-04 22:29:00 -080061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062/*
63 * Convert the arch-dependent pte representation of a swp_entry_t into an
64 * arch-independent swp_entry_t.
65 */
66static inline swp_entry_t pte_to_swp_entry(pte_t pte)
67{
68 swp_entry_t arch_entry;
69
Cyrill Gorcunov179ef712013-08-13 16:00:49 -070070 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 arch_entry = __pte_to_swp_entry(pte);
73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
74}
75
76/*
77 * Convert the arch-independent representation of a swp_entry_t into the
78 * arch-dependent pte representation.
79 */
80static inline pte_t swp_entry_to_pte(swp_entry_t entry)
81{
82 swp_entry_t arch_entry;
83
84 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 return __swp_entry_to_pte(arch_entry);
86}
Christoph Lameter06972122006-06-23 02:03:35 -070087
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070088static inline swp_entry_t radix_to_swp_entry(void *arg)
89{
90 swp_entry_t entry;
91
92 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
93 return entry;
94}
95
96static inline void *swp_to_radix_entry(swp_entry_t entry)
97{
98 unsigned long value;
99
100 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
101 return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
102}
103
Jérôme Glisse5042db42017-09-08 16:11:43 -0700104#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
105static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
106{
107 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
108 page_to_pfn(page));
109}
110
111static inline bool is_device_private_entry(swp_entry_t entry)
112{
113 int type = swp_type(entry);
114 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
115}
116
117static inline void make_device_private_entry_read(swp_entry_t *entry)
118{
119 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
120}
121
122static inline bool is_write_device_private_entry(swp_entry_t entry)
123{
124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
125}
126
127static inline struct page *device_private_entry_to_page(swp_entry_t entry)
128{
129 return pfn_to_page(swp_offset(entry));
130}
131
132int device_private_entry_fault(struct vm_area_struct *vma,
133 unsigned long addr,
134 swp_entry_t entry,
135 unsigned int flags,
136 pmd_t *pmdp);
137#else /* CONFIG_DEVICE_PRIVATE */
138static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
139{
140 return swp_entry(0, 0);
141}
142
143static inline void make_device_private_entry_read(swp_entry_t *entry)
144{
145}
146
147static inline bool is_device_private_entry(swp_entry_t entry)
148{
149 return false;
150}
151
152static inline bool is_write_device_private_entry(swp_entry_t entry)
153{
154 return false;
155}
156
157static inline struct page *device_private_entry_to_page(swp_entry_t entry)
158{
159 return NULL;
160}
161
162static inline int device_private_entry_fault(struct vm_area_struct *vma,
163 unsigned long addr,
164 swp_entry_t entry,
165 unsigned int flags,
166 pmd_t *pmdp)
167{
168 return VM_FAULT_SIGBUS;
169}
170#endif /* CONFIG_DEVICE_PRIVATE */
171
Christoph Lameter06972122006-06-23 02:03:35 -0700172#ifdef CONFIG_MIGRATION
173static inline swp_entry_t make_migration_entry(struct page *page, int write)
174{
Zi Yan616b8372017-09-08 16:10:57 -0700175 BUG_ON(!PageLocked(compound_head(page)));
176
Christoph Lameter06972122006-06-23 02:03:35 -0700177 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
178 page_to_pfn(page));
179}
180
181static inline int is_migration_entry(swp_entry_t entry)
182{
183 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
184 swp_type(entry) == SWP_MIGRATION_WRITE);
185}
186
187static inline int is_write_migration_entry(swp_entry_t entry)
188{
189 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
190}
191
192static inline struct page *migration_entry_to_page(swp_entry_t entry)
193{
194 struct page *p = pfn_to_page(swp_offset(entry));
195 /*
196 * Any use of migration entries may only occur while the
197 * corresponding page is locked
198 */
Zi Yan616b8372017-09-08 16:10:57 -0700199 BUG_ON(!PageLocked(compound_head(p)));
Christoph Lameter06972122006-06-23 02:03:35 -0700200 return p;
201}
202
203static inline void make_migration_entry_read(swp_entry_t *entry)
204{
205 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
206}
207
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800208extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
209 spinlock_t *ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700210extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
211 unsigned long address);
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800212extern void migration_entry_wait_huge(struct vm_area_struct *vma,
213 struct mm_struct *mm, pte_t *pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700214#else
215
216#define make_migration_entry(page, write) swp_entry(0, 0)
Andrew Morton5ec553a2007-02-20 13:57:50 -0800217static inline int is_migration_entry(swp_entry_t swp)
218{
219 return 0;
220}
Zi Yan616b8372017-09-08 16:10:57 -0700221static inline struct page *migration_entry_to_page(swp_entry_t entry)
222{
223 return NULL;
224}
225
Christoph Lameter06972122006-06-23 02:03:35 -0700226static inline void make_migration_entry_read(swp_entry_t *entryp) { }
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800227static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
228 spinlock_t *ptl) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700229static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
230 unsigned long address) { }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800231static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
232 struct mm_struct *mm, pte_t *pte) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700233static inline int is_write_migration_entry(swp_entry_t entry)
234{
235 return 0;
236}
237
238#endif
239
Zi Yan616b8372017-09-08 16:10:57 -0700240struct page_vma_mapped_walk;
241
242#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
243extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
244 struct page *page);
245
246extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
247 struct page *new);
248
249extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
250
251static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
252{
253 swp_entry_t arch_entry;
254
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700255 if (pmd_swp_soft_dirty(pmd))
256 pmd = pmd_swp_clear_soft_dirty(pmd);
Zi Yan616b8372017-09-08 16:10:57 -0700257 arch_entry = __pmd_to_swp_entry(pmd);
258 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
259}
260
261static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
262{
263 swp_entry_t arch_entry;
264
265 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
266 return __swp_entry_to_pmd(arch_entry);
267}
268
269static inline int is_pmd_migration_entry(pmd_t pmd)
270{
271 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
272}
273#else
274static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
275 struct page *page)
276{
277 BUILD_BUG();
278}
279
280static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
281 struct page *new)
282{
283 BUILD_BUG();
284}
285
286static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
287
288static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
289{
290 return swp_entry(0, 0);
291}
292
293static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
294{
295 return __pmd(0);
296}
297
298static inline int is_pmd_migration_entry(pmd_t pmd)
299{
300 return 0;
301}
302#endif
303
Andi Kleena7420aa2009-09-16 11:50:05 +0200304#ifdef CONFIG_MEMORY_FAILURE
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700305
306extern atomic_long_t num_poisoned_pages __read_mostly;
307
Andi Kleena7420aa2009-09-16 11:50:05 +0200308/*
309 * Support for hardware poisoned pages
310 */
311static inline swp_entry_t make_hwpoison_entry(struct page *page)
312{
313 BUG_ON(!PageLocked(page));
314 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
315}
316
317static inline int is_hwpoison_entry(swp_entry_t entry)
318{
319 return swp_type(entry) == SWP_HWPOISON;
320}
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700321
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700322static inline bool test_set_page_hwpoison(struct page *page)
323{
324 return TestSetPageHWPoison(page);
325}
326
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700327static inline void num_poisoned_pages_inc(void)
328{
329 atomic_long_inc(&num_poisoned_pages);
330}
331
332static inline void num_poisoned_pages_dec(void)
333{
334 atomic_long_dec(&num_poisoned_pages);
335}
336
Andi Kleena7420aa2009-09-16 11:50:05 +0200337#else
338
339static inline swp_entry_t make_hwpoison_entry(struct page *page)
340{
341 return swp_entry(0, 0);
342}
343
344static inline int is_hwpoison_entry(swp_entry_t swp)
345{
346 return 0;
347}
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700348
349static inline bool test_set_page_hwpoison(struct page *page)
350{
351 return false;
352}
353
354static inline void num_poisoned_pages_inc(void)
355{
356}
Andi Kleena7420aa2009-09-16 11:50:05 +0200357#endif
358
359#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
360static inline int non_swap_entry(swp_entry_t entry)
361{
362 return swp_type(entry) >= MAX_SWAPFILES;
363}
364#else
365static inline int non_swap_entry(swp_entry_t entry)
366{
367 return 0;
368}
369#endif
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700370
371#endif /* _LINUX_SWAPOPS_H */