blob: e98ff460e9e9178f8e710d8ed12569ac636ca619 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/kernel_stat.h>
13#include <linux/swap.h>
Hugh Dickins46017e92008-02-04 22:28:41 -080014#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/backing-dev.h>
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -070018#include <linux/blkdev.h>
Hugh Dickinsc484d412006-01-06 00:10:55 -080019#include <linux/pagevec.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/migrate.h>
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080021#include <linux/vmalloc.h>
Tim Chen67afa382017-02-22 15:45:39 -080022#include <linux/swap_slots.h>
Huang Ying38d8b4e2017-07-06 15:37:18 -070023#include <linux/huge_mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
27 * swapper_space is a fiction, retained to simplify the path through
Jens Axboe7eaceac2011-03-10 08:52:07 +010028 * vmscan's shrink_page_list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070030static const struct address_space_operations swap_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 .writepage = swap_writepage,
Mel Gorman62c230b2012-07-31 16:44:55 -070032 .set_page_dirty = swap_set_page_dirty,
Andrew Morton1c939232014-10-09 15:27:59 -070033#ifdef CONFIG_MIGRATION
Christoph Lametere965f962006-02-01 03:05:41 -080034 .migratepage = migrate_page,
Andrew Morton1c939232014-10-09 15:27:59 -070035#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070036};
37
Changbin Du783cb682017-11-15 17:36:06 -080038struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
39static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
Colin Ian Kingf5c754d2018-04-05 16:25:05 -070040static bool enable_vma_readahead __read_mostly = true;
Huang Yingec560172017-09-06 16:24:36 -070041
Huang Yingec560172017-09-06 16:24:36 -070042#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
43#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
44#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
45#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
46
47#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
48#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
49#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
50
51#define SWAP_RA_VAL(addr, win, hits) \
52 (((addr) & PAGE_MASK) | \
53 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
54 ((hits) & SWAP_RA_HITS_MASK))
55
56/* Initial readahead hits is 4 to start up with a small window */
57#define GET_SWAP_RA_VAL(vma) \
58 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
Huang Ying38d8b4e2017-07-06 15:37:18 -070061#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63static struct {
64 unsigned long add_total;
65 unsigned long del_total;
66 unsigned long find_success;
67 unsigned long find_total;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068} swap_cache_info;
69
Shaohua Li33806f02013-02-22 16:34:37 -080070unsigned long total_swapcache_pages(void)
71{
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080072 unsigned int i, j, nr;
Shaohua Li33806f02013-02-22 16:34:37 -080073 unsigned long ret = 0;
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080074 struct address_space *spaces;
Huang Ying054f1d12019-07-11 20:55:37 -070075 struct swap_info_struct *si;
Shaohua Li33806f02013-02-22 16:34:37 -080076
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080077 for (i = 0; i < MAX_SWAPFILES; i++) {
Huang Ying054f1d12019-07-11 20:55:37 -070078 swp_entry_t entry = swp_entry(i, 1);
79
80 /* Avoid get_swap_device() to warn for bad swap entry */
81 if (!swp_swap_info(entry))
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080082 continue;
Huang Ying054f1d12019-07-11 20:55:37 -070083 /* Prevent swapoff to free swapper_spaces */
84 si = get_swap_device(entry);
85 if (!si)
86 continue;
87 nr = nr_swapper_spaces[i];
88 spaces = swapper_spaces[i];
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080089 for (j = 0; j < nr; j++)
90 ret += spaces[j].nrpages;
Huang Ying054f1d12019-07-11 20:55:37 -070091 put_swap_device(si);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080092 }
Shaohua Li33806f02013-02-22 16:34:37 -080093 return ret;
94}
95
Shaohua Li579f8292014-02-06 12:04:21 -080096static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098void show_swap_cache_info(void)
99{
Shaohua Li33806f02013-02-22 16:34:37 -0800100 printk("%lu pages in swap cache\n", total_swapcache_pages());
Johannes Weiner2c97b7f2008-07-25 19:46:01 -0700101 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 swap_cache_info.add_total, swap_cache_info.del_total,
Hugh Dickinsbb63be02008-02-04 22:28:49 -0800103 swap_cache_info.find_success, swap_cache_info.find_total);
Shaohua Liec8acf22013-02-22 16:34:38 -0800104 printk("Free swap = %ldkB\n",
105 get_nr_swap_pages() << (PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
107}
108
109/*
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500110 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 * but sets SwapCache flag and private instead of mapping and index.
112 */
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500113int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500115 struct address_space *address_space = swap_address_space(entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700116 pgoff_t idx = swp_offset(entry);
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500117 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
Wei Yangcb774452020-04-01 21:06:26 -0700118 unsigned long i, nr = hpage_nr_pages(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Sasha Levin309381fea2014-01-23 15:52:54 -0800120 VM_BUG_ON_PAGE(!PageLocked(page), page);
121 VM_BUG_ON_PAGE(PageSwapCache(page), page);
122 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
Hugh Dickins51726b12009-01-06 14:39:25 -0800123
Huang Ying38d8b4e2017-07-06 15:37:18 -0700124 page_ref_add(page, nr);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700125 SetPageSwapCache(page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700126
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500127 do {
128 xas_lock_irq(&xas);
129 xas_create_range(&xas);
130 if (xas_error(&xas))
131 goto unlock;
132 for (i = 0; i < nr; i++) {
133 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
134 set_page_private(page + i, entry.val + i);
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -0700135 xas_store(&xas, page);
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500136 xas_next(&xas);
137 }
Huang Ying38d8b4e2017-07-06 15:37:18 -0700138 address_space->nrpages += nr;
139 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
140 ADD_CACHE_INFO(add_total, nr);
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500141unlock:
142 xas_unlock_irq(&xas);
143 } while (xas_nomem(&xas, gfp));
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700144
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500145 if (!xas_error(&xas))
146 return 0;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700147
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500148 ClearPageSwapCache(page);
149 page_ref_sub(page, nr);
150 return xas_error(&xas);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151}
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/*
154 * This must be called only on pages that have
155 * been verified to be in the swap cache.
156 */
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500157void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500159 struct address_space *address_space = swap_address_space(entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700160 int i, nr = hpage_nr_pages(page);
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500161 pgoff_t idx = swp_offset(entry);
162 XA_STATE(xas, &address_space->i_pages, idx);
Shaohua Li33806f02013-02-22 16:34:37 -0800163
Sasha Levin309381fea2014-01-23 15:52:54 -0800164 VM_BUG_ON_PAGE(!PageLocked(page), page);
165 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
166 VM_BUG_ON_PAGE(PageWriteback(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Huang Ying38d8b4e2017-07-06 15:37:18 -0700168 for (i = 0; i < nr; i++) {
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500169 void *entry = xas_store(&xas, NULL);
Matthew Wilcox (Oracle)41011962019-09-23 15:34:52 -0700170 VM_BUG_ON_PAGE(entry != page, entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700171 set_page_private(page + i, 0);
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500172 xas_next(&xas);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 ClearPageSwapCache(page);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700175 address_space->nrpages -= nr;
176 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
177 ADD_CACHE_INFO(del_total, nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
180/**
181 * add_to_swap - allocate swap space for a page
182 * @page: page we want to move to swap
183 *
184 * Allocate swap space for the page and add the page to the
185 * swap cache. Caller needs to hold the page lock.
186 */
Minchan Kim0f074652017-07-06 15:37:24 -0700187int add_to_swap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
189 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 int err;
191
Sasha Levin309381fea2014-01-23 15:52:54 -0800192 VM_BUG_ON_PAGE(!PageLocked(page), page);
193 VM_BUG_ON_PAGE(!PageUptodate(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Huang Ying38d8b4e2017-07-06 15:37:18 -0700195 entry = get_swap_page(page);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700196 if (!entry.val)
Minchan Kim0f074652017-07-06 15:37:24 -0700197 return 0;
198
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700199 /*
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500200 * XArray node allocations from PF_MEMALLOC contexts could
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700201 * completely exhaust the page allocator. __GFP_NOMEMALLOC
202 * stops emergency reserves from being allocated.
203 *
204 * TODO: this could cause a theoretical memory reclaim
205 * deadlock in the swap out path.
206 */
207 /*
Minchan Kim854e9ed2016-01-15 16:54:53 -0800208 * Add it to the swap cache.
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700209 */
210 err = add_to_swap_cache(page, entry,
211 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700212 if (err)
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700213 /*
214 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
215 * clear SWAP_HAS_CACHE flag.
216 */
Minchan Kim0f074652017-07-06 15:37:24 -0700217 goto fail;
Shaohua Li96254562017-10-03 16:15:32 -0700218 /*
219 * Normally the page will be dirtied in unmap because its pte should be
220 * dirty. A special case is MADV_FREE page. The page'e pte could have
221 * dirty bit cleared but the page's SwapBacked bit is still set because
222 * clearing the dirty bit and SwapBacked bit has no lock protected. For
223 * such page, unmap will not set dirty bit for it, so page reclaim will
224 * not write the page out. This can cause data corruption when the page
225 * is swap in later. Always setting the dirty bit for the page solves
226 * the problem.
227 */
228 set_page_dirty(page);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700229
230 return 1;
231
Huang Ying38d8b4e2017-07-06 15:37:18 -0700232fail:
Minchan Kim0f074652017-07-06 15:37:24 -0700233 put_swap_page(page, entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700234 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235}
236
237/*
238 * This must be called only on pages that have
239 * been verified to be in the swap cache and locked.
240 * It will never put the page into the free list,
241 * the caller has a reference on the page.
242 */
243void delete_from_swap_cache(struct page *page)
244{
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500245 swp_entry_t entry = { .val = page_private(page) };
246 struct address_space *address_space = swap_address_space(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700248 xa_lock_irq(&address_space->i_pages);
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500249 __delete_from_swap_cache(page, entry);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700250 xa_unlock_irq(&address_space->i_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Minchan Kim75f6d6d2017-07-06 15:37:21 -0700252 put_swap_page(page, entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700253 page_ref_sub(page, hpage_nr_pages(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/*
257 * If we are the only user, then try to free up the swap cache.
258 *
259 * Its ok to check for PageSwapCache without the page lock
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800260 * here because we are going to recheck again inside
261 * try_to_free_swap() _with_ the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 * - Marcelo
263 */
264static inline void free_swap_cache(struct page *page)
265{
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800266 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
267 try_to_free_swap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 unlock_page(page);
269 }
270}
271
272/*
273 * Perform a free_page(), also freeing any swap cache associated with
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700274 * this page if it is the last user of the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 */
276void free_page_and_swap_cache(struct page *page)
277{
278 free_swap_cache(page);
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700279 if (!is_huge_zero_page(page))
Gerald Schaefer770a5372016-06-08 15:33:50 -0700280 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}
282
283/*
284 * Passed an array of pages, drop them all from swapcache and then release
285 * them. They are removed from the LRU and freed if this is their last use.
286 */
287void free_pages_and_swap_cache(struct page **pages, int nr)
288{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 struct page **pagep = pages;
Michal Hockoaabfb572014-10-09 15:28:52 -0700290 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292 lru_add_drain();
Michal Hockoaabfb572014-10-09 15:28:52 -0700293 for (i = 0; i < nr; i++)
294 free_swap_cache(pagep[i]);
Mel Gormanc6f92f92017-11-15 17:37:55 -0800295 release_pages(pagep, nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700298static inline bool swap_use_vma_readahead(void)
299{
300 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
301}
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303/*
304 * Lookup a swap entry in the swap cache. A found page will be returned
305 * unlocked and with its refcount incremented - we rely on the kernel
306 * lock getting page table operations atomic even if we drop the page
307 * lock before returning.
308 */
Huang Yingec560172017-09-06 16:24:36 -0700309struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
310 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
312 struct page *page;
Huang Yingeb085572019-07-11 20:55:33 -0700313 struct swap_info_struct *si;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Huang Yingeb085572019-07-11 20:55:33 -0700315 si = get_swap_device(entry);
316 if (!si)
317 return NULL;
Huang Yingf6ab1f72016-10-07 17:00:21 -0700318 page = find_get_page(swap_address_space(entry), swp_offset(entry));
Huang Yingeb085572019-07-11 20:55:33 -0700319 put_swap_device(si);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
Huang Yingec560172017-09-06 16:24:36 -0700321 INC_CACHE_INFO(find_total);
322 if (page) {
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700323 bool vma_ra = swap_use_vma_readahead();
324 bool readahead;
325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 INC_CACHE_INFO(find_success);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700327 /*
328 * At the moment, we don't support PG_readahead for anon THP
329 * so let's bail out rather than confusing the readahead stat.
330 */
Huang Yingec560172017-09-06 16:24:36 -0700331 if (unlikely(PageTransCompound(page)))
332 return page;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700333
Huang Yingec560172017-09-06 16:24:36 -0700334 readahead = TestClearPageReadahead(page);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700335 if (vma && vma_ra) {
336 unsigned long ra_val;
337 int win, hits;
338
339 ra_val = GET_SWAP_RA_VAL(vma);
340 win = SWAP_RA_WIN(ra_val);
341 hits = SWAP_RA_HITS(ra_val);
Huang Yingec560172017-09-06 16:24:36 -0700342 if (readahead)
343 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
344 atomic_long_set(&vma->swap_readahead_info,
345 SWAP_RA_VAL(addr, win, hits));
346 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700347
Huang Yingec560172017-09-06 16:24:36 -0700348 if (readahead) {
Huang Yingcbc65df2017-09-06 16:24:29 -0700349 count_vm_event(SWAP_RA_HIT);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700350 if (!vma || !vma_ra)
Huang Yingec560172017-09-06 16:24:36 -0700351 atomic_inc(&swapin_readahead_hits);
Huang Yingcbc65df2017-09-06 16:24:29 -0700352 }
Shaohua Li579f8292014-02-06 12:04:21 -0800353 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return page;
356}
357
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700358struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
359 struct vm_area_struct *vma, unsigned long addr,
360 bool *new_page_allocated)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Huang Yingeb085572019-07-11 20:55:33 -0700362 struct swap_info_struct *si;
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700363 struct page *page;
364
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700365 *new_page_allocated = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700367 for (;;) {
368 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /*
370 * First check the swap cache. Since this is normally
371 * called after lookup_swap_cache() failed, re-calling
372 * that would confuse statistics.
373 */
Huang Yingeb085572019-07-11 20:55:33 -0700374 si = get_swap_device(entry);
375 if (!si)
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700376 return NULL;
377 page = find_get_page(swap_address_space(entry),
378 swp_offset(entry));
Huang Yingeb085572019-07-11 20:55:33 -0700379 put_swap_device(si);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700380 if (page)
381 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
Huang Yingba81f832017-02-22 15:45:46 -0800383 /*
384 * Just skip read ahead for unused swap slot.
385 * During swap_off when swap_slot_cache is disabled,
386 * we have to handle the race between putting
387 * swap entry in swap cache and marking swap slot
388 * as SWAP_HAS_CACHE. That's done in later part of code or
389 * else swap_off will be aborted if we return NULL.
390 */
391 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700392 return NULL;
Tim Chene8c26ab2017-02-22 15:45:29 -0800393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 /*
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700395 * Get a new page to read into from swap. Allocate it now,
396 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
397 * cause any racers to loop around until we add it to cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 */
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700399 page = alloc_page_vma(gfp_mask, vma, addr);
400 if (!page)
401 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 /*
Hugh Dickinsf0009442008-02-04 22:28:49 -0800404 * Swap entry may have been freed since our caller observed it.
405 */
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700406 err = swapcache_prepare(entry);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700407 if (!err)
Hugh Dickinsf0009442008-02-04 22:28:49 -0800408 break;
409
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700410 put_page(page);
411 if (err != -EEXIST)
412 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700414 /*
415 * We might race against __delete_from_swap_cache(), and
416 * stumble across a swap_map entry whose SWAP_HAS_CACHE
417 * has not yet been cleared. Or race against another
418 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
419 * in swap_map, but not yet added its page to swap cache.
420 */
421 cond_resched();
422 }
423
424 /*
425 * The swap entry is ours to swap in. Prepare the new page.
426 */
427
428 __SetPageLocked(page);
429 __SetPageSwapBacked(page);
430
431 /* May fail (-ENOMEM) if XArray node allocation failed. */
432 if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) {
433 put_swap_page(page, entry);
434 goto fail_unlock;
435 }
436
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -0700437 if (mem_cgroup_charge(page, NULL, gfp_mask)) {
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700438 delete_from_swap_cache(page);
439 goto fail_unlock;
440 }
441
Johannes Weiner314b57f2020-06-03 16:03:03 -0700442 /* XXX: Move to lru_cache_add() when it supports new vs putback */
443 spin_lock_irq(&page_pgdat(page)->lru_lock);
Johannes Weiner96f8bf42020-06-03 16:03:09 -0700444 lru_note_cost_page(page);
Johannes Weiner314b57f2020-06-03 16:03:03 -0700445 spin_unlock_irq(&page_pgdat(page)->lru_lock);
446
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700447 /* Caller will initiate read into locked page */
448 SetPageWorkingset(page);
Johannes Weiner6058eae2020-06-03 16:02:40 -0700449 lru_cache_add(page);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700450 *new_page_allocated = true;
451 return page;
452
453fail_unlock:
454 unlock_page(page);
455 put_page(page);
456 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
Hugh Dickins46017e92008-02-04 22:28:41 -0800458
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700459/*
460 * Locate a page of swap in physical memory, reserving swap cache space
461 * and reading the disk if it is not already cached.
462 * A failure return means that either the page allocation failed or that
463 * the swap entry is no longer in use.
464 */
465struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
Shaohua Li23955622017-07-10 15:47:11 -0700466 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700467{
468 bool page_was_allocated;
469 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
470 vma, addr, &page_was_allocated);
471
472 if (page_was_allocated)
Shaohua Li23955622017-07-10 15:47:11 -0700473 swap_readpage(retpage, do_poll);
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700474
475 return retpage;
476}
477
Huang Yingec560172017-09-06 16:24:36 -0700478static unsigned int __swapin_nr_pages(unsigned long prev_offset,
479 unsigned long offset,
480 int hits,
481 int max_pages,
482 int prev_win)
Shaohua Li579f8292014-02-06 12:04:21 -0800483{
Huang Yingec560172017-09-06 16:24:36 -0700484 unsigned int pages, last_ra;
Shaohua Li579f8292014-02-06 12:04:21 -0800485
486 /*
487 * This heuristic has been found to work well on both sequential and
488 * random loads, swapping to hard disk or to SSD: please don't ask
489 * what the "+ 2" means, it just happens to work well, that's all.
490 */
Huang Yingec560172017-09-06 16:24:36 -0700491 pages = hits + 2;
Shaohua Li579f8292014-02-06 12:04:21 -0800492 if (pages == 2) {
493 /*
494 * We can have no readahead hits to judge by: but must not get
495 * stuck here forever, so check for an adjacent offset instead
496 * (and don't even bother to check whether swap type is same).
497 */
498 if (offset != prev_offset + 1 && offset != prev_offset - 1)
499 pages = 1;
Shaohua Li579f8292014-02-06 12:04:21 -0800500 } else {
501 unsigned int roundup = 4;
502 while (roundup < pages)
503 roundup <<= 1;
504 pages = roundup;
505 }
506
507 if (pages > max_pages)
508 pages = max_pages;
509
510 /* Don't shrink readahead too fast */
Huang Yingec560172017-09-06 16:24:36 -0700511 last_ra = prev_win / 2;
Shaohua Li579f8292014-02-06 12:04:21 -0800512 if (pages < last_ra)
513 pages = last_ra;
Huang Yingec560172017-09-06 16:24:36 -0700514
515 return pages;
516}
517
518static unsigned long swapin_nr_pages(unsigned long offset)
519{
520 static unsigned long prev_offset;
521 unsigned int hits, pages, max_pages;
522 static atomic_t last_readahead_pages;
523
524 max_pages = 1 << READ_ONCE(page_cluster);
525 if (max_pages <= 1)
526 return 1;
527
528 hits = atomic_xchg(&swapin_readahead_hits, 0);
Qian Caid6c1f092020-06-01 21:48:40 -0700529 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
530 max_pages,
Huang Yingec560172017-09-06 16:24:36 -0700531 atomic_read(&last_readahead_pages));
532 if (!hits)
Qian Caid6c1f092020-06-01 21:48:40 -0700533 WRITE_ONCE(prev_offset, offset);
Shaohua Li579f8292014-02-06 12:04:21 -0800534 atomic_set(&last_readahead_pages, pages);
535
536 return pages;
537}
538
Hugh Dickins46017e92008-02-04 22:28:41 -0800539/**
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700540 * swap_cluster_readahead - swap in pages in hope we need them soon
Hugh Dickins46017e92008-02-04 22:28:41 -0800541 * @entry: swap entry of this memory
Randy Dunlap76824862008-03-19 17:00:40 -0700542 * @gfp_mask: memory allocation flags
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700543 * @vmf: fault information
Hugh Dickins46017e92008-02-04 22:28:41 -0800544 *
545 * Returns the struct page for entry and addr, after queueing swapin.
546 *
547 * Primitive swap readahead code. We simply read an aligned block of
548 * (1 << page_cluster) entries in the swap area. This method is chosen
549 * because it doesn't cost us any seek time. We also make sure to queue
550 * the 'original' request together with the readahead ones...
551 *
552 * This has been extended to use the NUMA policies from the mm triggering
553 * the readahead.
554 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700555 * Caller must hold read mmap_lock if vmf->vma is not NULL.
Hugh Dickins46017e92008-02-04 22:28:41 -0800556 */
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700557struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
558 struct vm_fault *vmf)
Hugh Dickins46017e92008-02-04 22:28:41 -0800559{
Hugh Dickins46017e92008-02-04 22:28:41 -0800560 struct page *page;
Shaohua Li579f8292014-02-06 12:04:21 -0800561 unsigned long entry_offset = swp_offset(entry);
562 unsigned long offset = entry_offset;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700563 unsigned long start_offset, end_offset;
Shaohua Li579f8292014-02-06 12:04:21 -0800564 unsigned long mask;
Huang Yinge9a6eff2017-11-15 17:33:15 -0800565 struct swap_info_struct *si = swp_swap_info(entry);
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700566 struct blk_plug plug;
Huang Yingc4fa6302017-09-06 16:24:33 -0700567 bool do_poll = true, page_allocated;
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700568 struct vm_area_struct *vma = vmf->vma;
569 unsigned long addr = vmf->address;
Hugh Dickins46017e92008-02-04 22:28:41 -0800570
Shaohua Li579f8292014-02-06 12:04:21 -0800571 mask = swapin_nr_pages(offset) - 1;
572 if (!mask)
573 goto skip;
574
Yang Shi8fd2e0b2019-03-05 15:44:11 -0800575 /* Test swap type to make sure the dereference is safe */
576 if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
577 struct inode *inode = si->swap_file->f_mapping->host;
578 if (inode_read_congested(inode))
579 goto skip;
580 }
581
Shaohua Li23955622017-07-10 15:47:11 -0700582 do_poll = false;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700583 /* Read a page_cluster sized and aligned cluster around offset. */
584 start_offset = offset & ~mask;
585 end_offset = offset | mask;
586 if (!start_offset) /* First page is swap header. */
587 start_offset++;
Huang Yinge9a6eff2017-11-15 17:33:15 -0800588 if (end_offset >= si->max)
589 end_offset = si->max - 1;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700590
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700591 blk_start_plug(&plug);
Rik van Riel67f96aa2012-03-21 16:33:50 -0700592 for (offset = start_offset; offset <= end_offset ; offset++) {
Hugh Dickins46017e92008-02-04 22:28:41 -0800593 /* Ok, do the async read-ahead now */
Huang Yingc4fa6302017-09-06 16:24:33 -0700594 page = __read_swap_cache_async(
595 swp_entry(swp_type(entry), offset),
596 gfp_mask, vma, addr, &page_allocated);
Hugh Dickins46017e92008-02-04 22:28:41 -0800597 if (!page)
Rik van Riel67f96aa2012-03-21 16:33:50 -0700598 continue;
Huang Yingc4fa6302017-09-06 16:24:33 -0700599 if (page_allocated) {
600 swap_readpage(page, false);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700601 if (offset != entry_offset) {
Huang Yingc4fa6302017-09-06 16:24:33 -0700602 SetPageReadahead(page);
603 count_vm_event(SWAP_RA);
604 }
Huang Yingcbc65df2017-09-06 16:24:29 -0700605 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300606 put_page(page);
Hugh Dickins46017e92008-02-04 22:28:41 -0800607 }
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700608 blk_finish_plug(&plug);
609
Hugh Dickins46017e92008-02-04 22:28:41 -0800610 lru_add_drain(); /* Push any new pages onto the LRU now */
Shaohua Li579f8292014-02-06 12:04:21 -0800611skip:
Shaohua Li23955622017-07-10 15:47:11 -0700612 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
Hugh Dickins46017e92008-02-04 22:28:41 -0800613}
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800614
615int init_swap_address_space(unsigned int type, unsigned long nr_pages)
616{
617 struct address_space *spaces, *space;
618 unsigned int i, nr;
619
620 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
Kees Cook778e1cd2018-06-12 14:04:48 -0700621 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800622 if (!spaces)
623 return -ENOMEM;
624 for (i = 0; i < nr; i++) {
625 space = spaces + i;
Matthew Wilcoxa2833482017-12-05 19:04:20 -0500626 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800627 atomic_set(&space->i_mmap_writable, 0);
628 space->a_ops = &swap_aops;
629 /* swap cache doesn't use writeback related tags */
630 mapping_set_no_writeback_tags(space);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800631 }
632 nr_swapper_spaces[type] = nr;
Huang Ying054f1d12019-07-11 20:55:37 -0700633 swapper_spaces[type] = spaces;
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800634
635 return 0;
636}
637
638void exit_swap_address_space(unsigned int type)
639{
Huang Ying054f1d12019-07-11 20:55:37 -0700640 kvfree(swapper_spaces[type]);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800641 nr_swapper_spaces[type] = 0;
Huang Ying054f1d12019-07-11 20:55:37 -0700642 swapper_spaces[type] = NULL;
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800643}
Huang Yingec560172017-09-06 16:24:36 -0700644
645static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
646 unsigned long faddr,
647 unsigned long lpfn,
648 unsigned long rpfn,
649 unsigned long *start,
650 unsigned long *end)
651{
652 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
653 PFN_DOWN(faddr & PMD_MASK));
654 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
655 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
656}
657
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700658static void swap_ra_info(struct vm_fault *vmf,
659 struct vma_swap_readahead *ra_info)
Huang Yingec560172017-09-06 16:24:36 -0700660{
661 struct vm_area_struct *vma = vmf->vma;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700662 unsigned long ra_val;
Huang Yingec560172017-09-06 16:24:36 -0700663 swp_entry_t entry;
664 unsigned long faddr, pfn, fpfn;
665 unsigned long start, end;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700666 pte_t *pte, *orig_pte;
Huang Yingec560172017-09-06 16:24:36 -0700667 unsigned int max_win, hits, prev_win, win, left;
668#ifndef CONFIG_64BIT
669 pte_t *tpte;
670#endif
671
Huang Ying61b63972017-10-13 15:58:29 -0700672 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
673 SWAP_RA_ORDER_CEILING);
674 if (max_win == 1) {
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700675 ra_info->win = 1;
676 return;
Huang Ying61b63972017-10-13 15:58:29 -0700677 }
678
Huang Yingec560172017-09-06 16:24:36 -0700679 faddr = vmf->address;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700680 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
681 entry = pte_to_swp_entry(*pte);
682 if ((unlikely(non_swap_entry(entry)))) {
683 pte_unmap(orig_pte);
684 return;
685 }
Huang Yingec560172017-09-06 16:24:36 -0700686
Huang Yingec560172017-09-06 16:24:36 -0700687 fpfn = PFN_DOWN(faddr);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700688 ra_val = GET_SWAP_RA_VAL(vma);
689 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
690 prev_win = SWAP_RA_WIN(ra_val);
691 hits = SWAP_RA_HITS(ra_val);
692 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
Huang Yingec560172017-09-06 16:24:36 -0700693 max_win, prev_win);
694 atomic_long_set(&vma->swap_readahead_info,
695 SWAP_RA_VAL(faddr, win, 0));
696
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700697 if (win == 1) {
698 pte_unmap(orig_pte);
699 return;
700 }
Huang Yingec560172017-09-06 16:24:36 -0700701
702 /* Copy the PTEs because the page table may be unmapped */
703 if (fpfn == pfn + 1)
704 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
705 else if (pfn == fpfn + 1)
706 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
707 &start, &end);
708 else {
709 left = (win - 1) / 2;
710 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
711 &start, &end);
712 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700713 ra_info->nr_pte = end - start;
714 ra_info->offset = fpfn - start;
715 pte -= ra_info->offset;
Huang Yingec560172017-09-06 16:24:36 -0700716#ifdef CONFIG_64BIT
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700717 ra_info->ptes = pte;
Huang Yingec560172017-09-06 16:24:36 -0700718#else
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700719 tpte = ra_info->ptes;
Huang Yingec560172017-09-06 16:24:36 -0700720 for (pfn = start; pfn != end; pfn++)
721 *tpte++ = *pte++;
722#endif
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700723 pte_unmap(orig_pte);
Huang Yingec560172017-09-06 16:24:36 -0700724}
725
Yang Shie9f59872019-03-05 15:44:15 -0800726/**
727 * swap_vma_readahead - swap in pages in hope we need them soon
728 * @entry: swap entry of this memory
729 * @gfp_mask: memory allocation flags
730 * @vmf: fault information
731 *
732 * Returns the struct page for entry and addr, after queueing swapin.
733 *
734 * Primitive swap readahead code. We simply read in a few pages whoes
735 * virtual addresses are around the fault address in the same vma.
736 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700737 * Caller must hold read mmap_lock if vmf->vma is not NULL.
Yang Shie9f59872019-03-05 15:44:15 -0800738 *
739 */
Colin Ian Kingf5c754d2018-04-05 16:25:05 -0700740static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
741 struct vm_fault *vmf)
Huang Yingec560172017-09-06 16:24:36 -0700742{
743 struct blk_plug plug;
744 struct vm_area_struct *vma = vmf->vma;
745 struct page *page;
746 pte_t *pte, pentry;
747 swp_entry_t entry;
748 unsigned int i;
749 bool page_allocated;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700750 struct vma_swap_readahead ra_info = {0,};
Huang Yingec560172017-09-06 16:24:36 -0700751
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700752 swap_ra_info(vmf, &ra_info);
753 if (ra_info.win == 1)
Huang Yingec560172017-09-06 16:24:36 -0700754 goto skip;
755
756 blk_start_plug(&plug);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700757 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
Huang Yingec560172017-09-06 16:24:36 -0700758 i++, pte++) {
759 pentry = *pte;
760 if (pte_none(pentry))
761 continue;
762 if (pte_present(pentry))
763 continue;
764 entry = pte_to_swp_entry(pentry);
765 if (unlikely(non_swap_entry(entry)))
766 continue;
767 page = __read_swap_cache_async(entry, gfp_mask, vma,
768 vmf->address, &page_allocated);
769 if (!page)
770 continue;
771 if (page_allocated) {
772 swap_readpage(page, false);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700773 if (i != ra_info.offset) {
Huang Yingec560172017-09-06 16:24:36 -0700774 SetPageReadahead(page);
775 count_vm_event(SWAP_RA);
776 }
777 }
778 put_page(page);
779 }
780 blk_finish_plug(&plug);
781 lru_add_drain();
782skip:
783 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700784 ra_info.win == 1);
Huang Yingec560172017-09-06 16:24:36 -0700785}
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700786
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700787/**
788 * swapin_readahead - swap in pages in hope we need them soon
789 * @entry: swap entry of this memory
790 * @gfp_mask: memory allocation flags
791 * @vmf: fault information
792 *
793 * Returns the struct page for entry and addr, after queueing swapin.
794 *
795 * It's a main entry function for swap readahead. By the configuration,
796 * it will read ahead blocks by cluster-based(ie, physical disk based)
797 * or vma-based(ie, virtual address based on faulty address) readahead.
798 */
799struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
800 struct vm_fault *vmf)
801{
802 return swap_use_vma_readahead() ?
803 swap_vma_readahead(entry, gfp_mask, vmf) :
804 swap_cluster_readahead(entry, gfp_mask, vmf);
805}
806
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700807#ifdef CONFIG_SYSFS
808static ssize_t vma_ra_enabled_show(struct kobject *kobj,
809 struct kobj_attribute *attr, char *buf)
810{
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700811 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700812}
813static ssize_t vma_ra_enabled_store(struct kobject *kobj,
814 struct kobj_attribute *attr,
815 const char *buf, size_t count)
816{
817 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700818 enable_vma_readahead = true;
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700819 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700820 enable_vma_readahead = false;
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700821 else
822 return -EINVAL;
823
824 return count;
825}
826static struct kobj_attribute vma_ra_enabled_attr =
827 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
828 vma_ra_enabled_store);
829
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700830static struct attribute *swap_attrs[] = {
831 &vma_ra_enabled_attr.attr,
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700832 NULL,
833};
834
835static struct attribute_group swap_attr_group = {
836 .attrs = swap_attrs,
837};
838
839static int __init swap_init_sysfs(void)
840{
841 int err;
842 struct kobject *swap_kobj;
843
844 swap_kobj = kobject_create_and_add("swap", mm_kobj);
845 if (!swap_kobj) {
846 pr_err("failed to create swap kobject\n");
847 return -ENOMEM;
848 }
849 err = sysfs_create_group(swap_kobj, &swap_attr_group);
850 if (err) {
851 pr_err("failed to register swap group\n");
852 goto delete_obj;
853 }
854 return 0;
855
856delete_obj:
857 kobject_put(swap_kobj);
858 return err;
859}
860subsys_initcall(swap_init_sysfs);
861#endif