blob: d9b7c9132c2f66473d351a1ebfe2c9ce533ee1b9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07002#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Souptick Joarder2b740302018-08-23 17:01:36 -07007#include <linux/mm_types.h>
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07008
Christoph Hellwig9b98fa22019-07-16 16:26:33 -07009#ifdef CONFIG_MMU
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011/*
12 * swapcache pages are stored in the swapper_space radix tree. We want to
13 * get good packing density in that tree, so the index should be dense in
14 * the low-order bits.
15 *
Hugh Dickins9b15b812012-06-15 17:55:50 -070016 * We arrange the `type' and `offset' fields so that `type' is at the seven
Paolo 'Blaisorblade' Giarrussoe83a9592005-09-03 15:54:53 -070017 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
Hugh Dickins9b15b812012-06-15 17:55:50 -070018 * remaining bits. Although `type' itself needs only five bits, we allow for
19 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 *
21 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
22 */
Matthew Wilcox3159f942017-11-03 13:30:42 -040023#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
24#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
27 * Store a type+offset into a swp_entry_t in an arch-independent format
28 */
29static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
30{
31 swp_entry_t ret;
32
Matthew Wilcox3159f942017-11-03 13:30:42 -040033 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 return ret;
35}
36
37/*
38 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
39 * arch-independent format
40 */
41static inline unsigned swp_type(swp_entry_t entry)
42{
Matthew Wilcox3159f942017-11-03 13:30:42 -040043 return (entry.val >> SWP_TYPE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044}
45
46/*
47 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
48 * arch-independent format
49 */
50static inline pgoff_t swp_offset(swp_entry_t entry)
51{
Matthew Wilcox3159f942017-11-03 13:30:42 -040052 return entry.val & SWP_OFFSET_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
Matt Mackall698dd4b2008-02-04 22:29:00 -080055/* check whether a pte points to a swap entry */
56static inline int is_swap_pte(pte_t pte)
57{
Mel Gorman21d9ee32015-02-12 14:58:32 -080058 return !pte_none(pte) && !pte_present(pte);
Matt Mackall698dd4b2008-02-04 22:29:00 -080059}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * Convert the arch-dependent pte representation of a swp_entry_t into an
63 * arch-independent swp_entry_t.
64 */
65static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66{
67 swp_entry_t arch_entry;
68
Cyrill Gorcunov179ef712013-08-13 16:00:49 -070069 if (pte_swp_soft_dirty(pte))
70 pte = pte_swp_clear_soft_dirty(pte);
Peter Xuf45ec5f2020-04-06 20:06:01 -070071 if (pte_swp_uffd_wp(pte))
72 pte = pte_swp_clear_uffd_wp(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 arch_entry = __pte_to_swp_entry(pte);
74 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
75}
76
77/*
78 * Convert the arch-independent representation of a swp_entry_t into the
79 * arch-dependent pte representation.
80 */
81static inline pte_t swp_entry_to_pte(swp_entry_t entry)
82{
83 swp_entry_t arch_entry;
84
85 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return __swp_entry_to_pte(arch_entry);
87}
Christoph Lameter06972122006-06-23 02:03:35 -070088
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070089static inline swp_entry_t radix_to_swp_entry(void *arg)
90{
91 swp_entry_t entry;
92
Matthew Wilcox3159f942017-11-03 13:30:42 -040093 entry.val = xa_to_value(arg);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070094 return entry;
95}
96
97static inline void *swp_to_radix_entry(swp_entry_t entry)
98{
Matthew Wilcox3159f942017-11-03 13:30:42 -040099 return xa_mk_value(entry.val);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700100}
101
Jérôme Glisse5042db42017-09-08 16:11:43 -0700102#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
103static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
104{
105 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
106 page_to_pfn(page));
107}
108
109static inline bool is_device_private_entry(swp_entry_t entry)
110{
111 int type = swp_type(entry);
112 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
113}
114
115static inline void make_device_private_entry_read(swp_entry_t *entry)
116{
117 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
118}
119
120static inline bool is_write_device_private_entry(swp_entry_t entry)
121{
122 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
123}
124
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300125static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
126{
127 return swp_offset(entry);
128}
129
Jérôme Glisse5042db42017-09-08 16:11:43 -0700130static inline struct page *device_private_entry_to_page(swp_entry_t entry)
131{
132 return pfn_to_page(swp_offset(entry));
133}
Jérôme Glisse5042db42017-09-08 16:11:43 -0700134#else /* CONFIG_DEVICE_PRIVATE */
135static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
136{
137 return swp_entry(0, 0);
138}
139
140static inline void make_device_private_entry_read(swp_entry_t *entry)
141{
142}
143
144static inline bool is_device_private_entry(swp_entry_t entry)
145{
146 return false;
147}
148
149static inline bool is_write_device_private_entry(swp_entry_t entry)
150{
151 return false;
152}
153
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300154static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
155{
156 return 0;
157}
158
Jérôme Glisse5042db42017-09-08 16:11:43 -0700159static inline struct page *device_private_entry_to_page(swp_entry_t entry)
160{
161 return NULL;
162}
Jérôme Glisse5042db42017-09-08 16:11:43 -0700163#endif /* CONFIG_DEVICE_PRIVATE */
164
Christoph Lameter06972122006-06-23 02:03:35 -0700165#ifdef CONFIG_MIGRATION
166static inline swp_entry_t make_migration_entry(struct page *page, int write)
167{
Zi Yan616b8372017-09-08 16:10:57 -0700168 BUG_ON(!PageLocked(compound_head(page)));
169
Christoph Lameter06972122006-06-23 02:03:35 -0700170 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
171 page_to_pfn(page));
172}
173
174static inline int is_migration_entry(swp_entry_t entry)
175{
176 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
177 swp_type(entry) == SWP_MIGRATION_WRITE);
178}
179
180static inline int is_write_migration_entry(swp_entry_t entry)
181{
182 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
183}
184
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300185static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
186{
187 return swp_offset(entry);
188}
189
Christoph Lameter06972122006-06-23 02:03:35 -0700190static inline struct page *migration_entry_to_page(swp_entry_t entry)
191{
192 struct page *p = pfn_to_page(swp_offset(entry));
193 /*
194 * Any use of migration entries may only occur while the
195 * corresponding page is locked
196 */
Zi Yan616b8372017-09-08 16:10:57 -0700197 BUG_ON(!PageLocked(compound_head(p)));
Christoph Lameter06972122006-06-23 02:03:35 -0700198 return p;
199}
200
201static inline void make_migration_entry_read(swp_entry_t *entry)
202{
203 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
204}
205
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800206extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
207 spinlock_t *ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700208extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
209 unsigned long address);
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800210extern void migration_entry_wait_huge(struct vm_area_struct *vma,
211 struct mm_struct *mm, pte_t *pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700212#else
213
214#define make_migration_entry(page, write) swp_entry(0, 0)
Andrew Morton5ec553a2007-02-20 13:57:50 -0800215static inline int is_migration_entry(swp_entry_t swp)
216{
217 return 0;
218}
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300219
220static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
221{
222 return 0;
223}
224
Zi Yan616b8372017-09-08 16:10:57 -0700225static inline struct page *migration_entry_to_page(swp_entry_t entry)
226{
227 return NULL;
228}
229
Christoph Lameter06972122006-06-23 02:03:35 -0700230static inline void make_migration_entry_read(swp_entry_t *entryp) { }
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800231static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
232 spinlock_t *ptl) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700233static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
234 unsigned long address) { }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800235static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
236 struct mm_struct *mm, pte_t *pte) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700237static inline int is_write_migration_entry(swp_entry_t entry)
238{
239 return 0;
240}
241
242#endif
243
Zi Yan616b8372017-09-08 16:10:57 -0700244struct page_vma_mapped_walk;
245
246#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
247extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
248 struct page *page);
249
250extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
251 struct page *new);
252
253extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
254
255static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
256{
257 swp_entry_t arch_entry;
258
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700259 if (pmd_swp_soft_dirty(pmd))
260 pmd = pmd_swp_clear_soft_dirty(pmd);
Zi Yan616b8372017-09-08 16:10:57 -0700261 arch_entry = __pmd_to_swp_entry(pmd);
262 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
263}
264
265static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
266{
267 swp_entry_t arch_entry;
268
269 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
270 return __swp_entry_to_pmd(arch_entry);
271}
272
273static inline int is_pmd_migration_entry(pmd_t pmd)
274{
275 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
276}
277#else
278static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
279 struct page *page)
280{
281 BUILD_BUG();
282}
283
284static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
285 struct page *new)
286{
287 BUILD_BUG();
288}
289
290static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
291
292static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
293{
294 return swp_entry(0, 0);
295}
296
297static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
298{
299 return __pmd(0);
300}
301
302static inline int is_pmd_migration_entry(pmd_t pmd)
303{
304 return 0;
305}
306#endif
307
Andi Kleena7420aa2009-09-16 11:50:05 +0200308#ifdef CONFIG_MEMORY_FAILURE
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700309
310extern atomic_long_t num_poisoned_pages __read_mostly;
311
Andi Kleena7420aa2009-09-16 11:50:05 +0200312/*
313 * Support for hardware poisoned pages
314 */
315static inline swp_entry_t make_hwpoison_entry(struct page *page)
316{
317 BUG_ON(!PageLocked(page));
318 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
319}
320
321static inline int is_hwpoison_entry(swp_entry_t entry)
322{
323 return swp_type(entry) == SWP_HWPOISON;
324}
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700325
326static inline void num_poisoned_pages_inc(void)
327{
328 atomic_long_inc(&num_poisoned_pages);
329}
330
331static inline void num_poisoned_pages_dec(void)
332{
333 atomic_long_dec(&num_poisoned_pages);
334}
335
Andi Kleena7420aa2009-09-16 11:50:05 +0200336#else
337
338static inline swp_entry_t make_hwpoison_entry(struct page *page)
339{
340 return swp_entry(0, 0);
341}
342
343static inline int is_hwpoison_entry(swp_entry_t swp)
344{
345 return 0;
346}
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700347
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700348static inline void num_poisoned_pages_inc(void)
349{
350}
Andi Kleena7420aa2009-09-16 11:50:05 +0200351#endif
352
Steven Price3f3673d2020-04-06 20:08:43 -0700353#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
354 defined(CONFIG_DEVICE_PRIVATE)
Andi Kleena7420aa2009-09-16 11:50:05 +0200355static inline int non_swap_entry(swp_entry_t entry)
356{
357 return swp_type(entry) >= MAX_SWAPFILES;
358}
359#else
360static inline int non_swap_entry(swp_entry_t entry)
361{
362 return 0;
363}
364#endif
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700365
Christoph Hellwig9b98fa22019-07-16 16:26:33 -0700366#endif /* CONFIG_MMU */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700367#endif /* _LINUX_SWAPOPS_H */