blob: 0bf8e40f4e573b4541c206ec41f18622c5b61602 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
Mel Gorman62c230b2012-07-31 16:44:55 -070021#include <linux/buffer_head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/writeback.h>
Dan Magenheimer38b5faf2012-04-09 17:08:06 -060023#include <linux/frontswap.h>
Minchan Kimb430e9d2013-07-03 15:01:24 -070024#include <linux/blkdev.h>
Minchan Kim93779062019-11-30 17:58:29 -080025#include <linux/psi.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080026#include <linux/uio.h>
Tetsuo Handab0ba2d02017-08-02 13:32:09 -070027#include <linux/sched/task.h>
Yang Yanga3d5dc92022-01-19 18:10:02 -080028#include <linux/delayacct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020030void end_swap_bio_write(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031{
Ming Lei263663c2017-12-18 20:22:04 +080032 struct page *page = bio_first_page_all(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020034 if (bio->bi_status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070036 /*
37 * We failed to write the page out to swap-space.
38 * Re-dirty the page in order to avoid it being reclaimed.
39 * Also print a dire warning that things will go BAD (tm)
40 * very quickly.
41 *
Matthew Wilcox (Oracle)575ced12020-12-08 01:25:39 -050042 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070043 */
44 set_page_dirty(page);
Georgi Djakov25eaab42021-02-24 12:03:01 -080045 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
46 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
47 (unsigned long long)bio->bi_iter.bi_sector);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070048 ClearPageReclaim(page);
49 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 end_page_writeback(page);
51 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052}
53
Minchan Kim3f2b1a02016-03-22 14:24:36 -070054static void swap_slot_free_notify(struct page *page)
55{
56 struct swap_info_struct *sis;
57 struct gendisk *disk;
Vinayak Menon5df373e2019-11-15 17:35:00 -080058 swp_entry_t entry;
Minchan Kim3f2b1a02016-03-22 14:24:36 -070059
60 /*
61 * There is no guarantee that the page is in swap cache - the software
62 * suspend code (at least) uses end_swap_bio_read() against a non-
63 * swapcache page. So we must check PG_swapcache before proceeding with
64 * this optimization.
65 */
66 if (unlikely(!PageSwapCache(page)))
67 return;
68
69 sis = page_swap_info(page);
Qian Cai7b37e222020-08-14 17:31:20 -070070 if (data_race(!(sis->flags & SWP_BLKDEV)))
Minchan Kim3f2b1a02016-03-22 14:24:36 -070071 return;
72
73 /*
74 * The swap subsystem performs lazy swap slot freeing,
75 * expecting that the page will be swapped out again.
76 * So we can avoid an unnecessary write if the page
77 * isn't redirtied.
78 * This is good for real swap storage because we can
79 * reduce unnecessary I/O and enhance wear-leveling
80 * if an SSD is used as the as swap device.
81 * But if in-memory swap device (eg zram) is used,
82 * this causes a duplicated copy between uncompressed
83 * data in VM-owned memory and compressed data in
84 * zram-owned memory. So let's free zram-owned memory
85 * and make the VM-owned decompressed page *dirty*,
86 * so the page should be swapped out somewhere again if
87 * we again wish to reclaim it.
88 */
89 disk = sis->bdev->bd_disk;
Vinayak Menon5df373e2019-11-15 17:35:00 -080090 entry.val = page_private(page);
91 if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
Minchan Kim3f2b1a02016-03-22 14:24:36 -070092 unsigned long offset;
93
Minchan Kim3f2b1a02016-03-22 14:24:36 -070094 offset = swp_offset(entry);
95
96 SetPageDirty(page);
97 disk->fops->swap_slot_free_notify(sis->bdev,
98 offset);
99 }
100}
101
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200102static void end_swap_bio_read(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
Ming Lei263663c2017-12-18 20:22:04 +0800104 struct page *page = bio_first_page_all(bio);
Shaohua Li23955622017-07-10 15:47:11 -0700105 struct task_struct *waiter = bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200107 if (bio->bi_status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 SetPageError(page);
109 ClearPageUptodate(page);
Georgi Djakov25eaab42021-02-24 12:03:01 -0800110 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
111 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
112 (unsigned long long)bio->bi_iter.bi_sector);
Minchan Kimb430e9d2013-07-03 15:01:24 -0700113 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 }
Minchan Kimb430e9d2013-07-03 15:01:24 -0700115
116 SetPageUptodate(page);
Minchan Kim3f2b1a02016-03-22 14:24:36 -0700117 swap_slot_free_notify(page);
Minchan Kimb430e9d2013-07-03 15:01:24 -0700118out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 unlock_page(page);
Shaohua Li23955622017-07-10 15:47:11 -0700120 WRITE_ONCE(bio->bi_private, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 bio_put(bio);
Oleg Nesterov87518532019-07-04 15:14:49 -0700122 if (waiter) {
123 blk_wake_io_task(waiter);
124 put_task_struct(waiter);
125 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
127
Mel Gormana509bc12012-07-31 16:44:57 -0700128int generic_swapfile_activate(struct swap_info_struct *sis,
129 struct file *swap_file,
130 sector_t *span)
131{
132 struct address_space *mapping = swap_file->f_mapping;
133 struct inode *inode = mapping->host;
134 unsigned blocks_per_page;
135 unsigned long page_no;
136 unsigned blkbits;
137 sector_t probe_block;
138 sector_t last_block;
139 sector_t lowest_block = -1;
140 sector_t highest_block = 0;
141 int nr_extents = 0;
142 int ret;
143
144 blkbits = inode->i_blkbits;
145 blocks_per_page = PAGE_SIZE >> blkbits;
146
147 /*
Aaron Lu4efaceb2019-07-11 20:55:41 -0700148 * Map all the blocks into the extent tree. This code doesn't try
Mel Gormana509bc12012-07-31 16:44:57 -0700149 * to be very smart.
150 */
151 probe_block = 0;
152 page_no = 0;
153 last_block = i_size_read(inode) >> blkbits;
154 while ((probe_block + blocks_per_page) <= last_block &&
155 page_no < sis->max) {
156 unsigned block_in_page;
157 sector_t first_block;
158
Mikulas Patocka7e4411b2016-07-28 15:48:47 -0700159 cond_resched();
160
Carlos Maiolino30460e12020-01-09 14:30:41 +0100161 first_block = probe_block;
162 ret = bmap(inode, &first_block);
163 if (ret || !first_block)
Mel Gormana509bc12012-07-31 16:44:57 -0700164 goto bad_bmap;
165
166 /*
167 * It must be PAGE_SIZE aligned on-disk
168 */
169 if (first_block & (blocks_per_page - 1)) {
170 probe_block++;
171 goto reprobe;
172 }
173
174 for (block_in_page = 1; block_in_page < blocks_per_page;
175 block_in_page++) {
176 sector_t block;
177
Carlos Maiolino30460e12020-01-09 14:30:41 +0100178 block = probe_block + block_in_page;
179 ret = bmap(inode, &block);
180 if (ret || !block)
Mel Gormana509bc12012-07-31 16:44:57 -0700181 goto bad_bmap;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100182
Mel Gormana509bc12012-07-31 16:44:57 -0700183 if (block != first_block + block_in_page) {
184 /* Discontiguity */
185 probe_block++;
186 goto reprobe;
187 }
188 }
189
190 first_block >>= (PAGE_SHIFT - blkbits);
191 if (page_no) { /* exclude the header page */
192 if (first_block < lowest_block)
193 lowest_block = first_block;
194 if (first_block > highest_block)
195 highest_block = first_block;
196 }
197
198 /*
199 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
200 */
201 ret = add_swap_extent(sis, page_no, 1, first_block);
202 if (ret < 0)
203 goto out;
204 nr_extents += ret;
205 page_no++;
206 probe_block += blocks_per_page;
207reprobe:
208 continue;
209 }
210 ret = nr_extents;
211 *span = 1 + highest_block - lowest_block;
212 if (page_no == 0)
213 page_no = 1; /* force Empty message */
214 sis->max = page_no;
215 sis->pages = page_no - 1;
216 sis->highest_bit = page_no - 1;
217out:
218 return ret;
219bad_bmap:
Joe Perches11705322016-03-17 14:19:50 -0700220 pr_err("swapon: swapfile has holes\n");
Mel Gormana509bc12012-07-31 16:44:57 -0700221 ret = -EINVAL;
222 goto out;
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/*
226 * We may have stale swap cache pages in memory: notice
227 * them here and get rid of the unnecessary final write.
228 */
229int swap_writepage(struct page *page, struct writeback_control *wbc)
230{
Seth Jennings2f772e62013-04-29 15:08:34 -0700231 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800233 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 unlock_page(page);
235 goto out;
236 }
Steven Price8a848022020-05-13 16:37:49 +0100237 /*
238 * Arch code may have to preserve more data than just the page
239 * contents, e.g. memory tags.
240 */
241 ret = arch_prepare_to_swap(page);
242 if (ret) {
243 set_page_dirty(page);
244 unlock_page(page);
245 goto out;
246 }
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400247 if (frontswap_store(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600248 set_page_writeback(page);
249 unlock_page(page);
250 end_page_writeback(page);
251 goto out;
252 }
Seth Jennings1eec6702013-04-29 15:08:35 -0700253 ret = __swap_writepage(page, wbc, end_swap_bio_write);
Seth Jennings2f772e62013-04-29 15:08:34 -0700254out:
255 return ret;
256}
257
Huang Ying225311a2017-09-06 16:22:30 -0700258static inline void count_swpout_vm_event(struct page *page)
259{
260#ifdef CONFIG_TRANSPARENT_HUGEPAGE
261 if (unlikely(PageTransHuge(page)))
262 count_vm_event(THP_SWPOUT);
263#endif
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -0700264 count_vm_events(PSWPOUT, thp_nr_pages(page));
Huang Ying225311a2017-09-06 16:22:30 -0700265}
266
Christoph Hellwiga18b9b12020-06-27 09:31:50 +0200267#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
268static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
269{
270 struct cgroup_subsys_state *css;
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800271 struct mem_cgroup *memcg;
Christoph Hellwiga18b9b12020-06-27 09:31:50 +0200272
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800273 memcg = page_memcg(page);
274 if (!memcg)
Christoph Hellwiga18b9b12020-06-27 09:31:50 +0200275 return;
276
277 rcu_read_lock();
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800278 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
Christoph Hellwiga18b9b12020-06-27 09:31:50 +0200279 bio_associate_blkg_from_css(bio, css);
280 rcu_read_unlock();
281}
282#else
283#define bio_associate_blkg_from_page(bio, page) do { } while (0)
284#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
285
Seth Jennings1eec6702013-04-29 15:08:35 -0700286int __swap_writepage(struct page *page, struct writeback_control *wbc,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200287 bio_end_io_t end_write_func)
Seth Jennings2f772e62013-04-29 15:08:34 -0700288{
289 struct bio *bio;
Mike Christie4e49ea42016-06-05 14:31:41 -0500290 int ret;
Seth Jennings2f772e62013-04-29 15:08:34 -0700291 struct swap_info_struct *sis = page_swap_info(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700292
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700293 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Gao Xiang32646312020-10-13 16:52:04 -0700294 if (data_race(sis->flags & SWP_FS_OPS)) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700295 struct kiocb kiocb;
296 struct file *swap_file = sis->swap_file;
297 struct address_space *mapping = swap_file->f_mapping;
Al Viro62a80672014-04-04 23:12:29 -0400298 struct bio_vec bv = {
299 .bv_page = page,
300 .bv_len = PAGE_SIZE,
301 .bv_offset = 0
Mel Gorman62c230b2012-07-31 16:44:55 -0700302 };
Al Viro05afcb72015-01-23 01:08:07 -0500303 struct iov_iter from;
Mel Gorman62c230b2012-07-31 16:44:55 -0700304
David Howellsaa563d72018-10-20 00:57:56 +0100305 iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
Mel Gorman62c230b2012-07-31 16:44:55 -0700306 init_sync_kiocb(&kiocb, swap_file);
307 kiocb.ki_pos = page_file_offset(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700308
Mel Gorman0cdc4442013-04-29 15:08:48 -0700309 set_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700310 unlock_page(page);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700311 ret = mapping->a_ops->direct_IO(&kiocb, &from);
Mel Gorman62c230b2012-07-31 16:44:55 -0700312 if (ret == PAGE_SIZE) {
313 count_vm_event(PSWPOUT);
314 ret = 0;
Jerome Marchand2d30d312013-04-29 15:08:47 -0700315 } else {
Mel Gorman0cdc4442013-04-29 15:08:48 -0700316 /*
317 * In the case of swap-over-nfs, this can be a
318 * temporary failure if the system has limited
319 * memory for allocating transmit buffers.
320 * Mark the page dirty and avoid
Matthew Wilcox (Oracle)575ced12020-12-08 01:25:39 -0500321 * folio_rotate_reclaimable but rate-limit the
Mel Gorman0cdc4442013-04-29 15:08:48 -0700322 * messages but do not flag PageError like
323 * the normal direct-to-bio case as it could
324 * be temporary.
325 */
Jerome Marchand2d30d312013-04-29 15:08:47 -0700326 set_page_dirty(page);
Mel Gorman0cdc4442013-04-29 15:08:48 -0700327 ClearPageReclaim(page);
Joe Perches11705322016-03-17 14:19:50 -0700328 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
329 page_file_offset(page));
Mel Gorman62c230b2012-07-31 16:44:55 -0700330 }
Mel Gorman0cdc4442013-04-29 15:08:48 -0700331 end_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700332 return ret;
333 }
334
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700335 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
336 if (!ret) {
Huang Ying225311a2017-09-06 16:22:30 -0700337 count_swpout_vm_event(page);
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700338 return 0;
339 }
340
Christoph Hellwig48d15432021-01-26 15:52:47 +0100341 bio = bio_alloc(GFP_NOIO, 1);
342 bio_set_dev(bio, sis->bdev);
343 bio->bi_iter.bi_sector = swap_page_sector(page);
Josef Bacik0d1e0c72018-07-03 11:14:53 -0400344 bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
Christoph Hellwig48d15432021-01-26 15:52:47 +0100345 bio->bi_end_io = end_write_func;
346 bio_add_page(bio, page, thp_size(page), 0);
347
Dennis Zhou6a7f6d82018-12-05 12:10:33 -0500348 bio_associate_blkg_from_page(bio, page);
Huang Ying225311a2017-09-06 16:22:30 -0700349 count_swpout_vm_event(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 set_page_writeback(page);
351 unlock_page(page);
Mike Christie4e49ea42016-06-05 14:31:41 -0500352 submit_bio(bio);
Miaohe Lin548d9782020-10-13 16:52:21 -0700353
354 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
Minchan Kim0bcac062017-11-15 17:33:07 -0800357int swap_readpage(struct page *page, bool synchronous)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358{
359 struct bio *bio;
360 int ret = 0;
Mel Gorman62c230b2012-07-31 16:44:55 -0700361 struct swap_info_struct *sis = page_swap_info(page);
Minchan Kim93779062019-11-30 17:58:29 -0800362 unsigned long pflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Minchan Kim0bcac062017-11-15 17:33:07 -0800364 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800365 VM_BUG_ON_PAGE(!PageLocked(page), page);
366 VM_BUG_ON_PAGE(PageUptodate(page), page);
Minchan Kim93779062019-11-30 17:58:29 -0800367
368 /*
369 * Count submission time as memory stall. When the device is congested,
370 * or the submitting cgroup IO-throttled, submission can be a
371 * significant part of overall IO time.
372 */
373 psi_memstall_enter(&pflags);
Yang Yanga3d5dc92022-01-19 18:10:02 -0800374 delayacct_swapin_start();
Minchan Kim93779062019-11-30 17:58:29 -0800375
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400376 if (frontswap_load(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600377 SetPageUptodate(page);
378 unlock_page(page);
379 goto out;
380 }
Mel Gorman62c230b2012-07-31 16:44:55 -0700381
Gao Xiang32646312020-10-13 16:52:04 -0700382 if (data_race(sis->flags & SWP_FS_OPS)) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700383 struct file *swap_file = sis->swap_file;
384 struct address_space *mapping = swap_file->f_mapping;
385
386 ret = mapping->a_ops->readpage(swap_file, page);
387 if (!ret)
388 count_vm_event(PSWPIN);
Minchan Kim93779062019-11-30 17:58:29 -0800389 goto out;
Mel Gorman62c230b2012-07-31 16:44:55 -0700390 }
391
Christoph Hellwig5115db12020-09-24 08:51:37 +0200392 if (sis->flags & SWP_SYNCHRONOUS_IO) {
393 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
394 if (!ret) {
395 if (trylock_page(page)) {
396 swap_slot_free_notify(page);
397 unlock_page(page);
398 }
Minchan Kimb06bad12016-04-28 16:18:41 -0700399
Christoph Hellwig5115db12020-09-24 08:51:37 +0200400 count_vm_event(PSWPIN);
401 goto out;
402 }
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700403 }
404
405 ret = 0;
Christoph Hellwig48d15432021-01-26 15:52:47 +0100406 bio = bio_alloc(GFP_KERNEL, 1);
407 bio_set_dev(bio, sis->bdev);
408 bio->bi_opf = REQ_OP_READ;
409 bio->bi_iter.bi_sector = swap_page_sector(page);
410 bio->bi_end_io = end_swap_bio_read;
411 bio_add_page(bio, page, thp_size(page), 0);
Tetsuo Handab0ba2d02017-08-02 13:32:09 -0700412 /*
413 * Keep this task valid during swap readpage because the oom killer may
414 * attempt to access it in the page fault retry time check.
415 */
Oleg Nesterov87518532019-07-04 15:14:49 -0700416 if (synchronous) {
Christoph Hellwig6ce913f2021-10-12 13:12:21 +0200417 bio->bi_opf |= REQ_POLLED;
Oleg Nesterov87518532019-07-04 15:14:49 -0700418 get_task_struct(current);
419 bio->bi_private = current;
420 }
Christoph Lameterf8891e52006-06-30 01:55:45 -0700421 count_vm_event(PSWPIN);
Shaohua Li23955622017-07-10 15:47:11 -0700422 bio_get(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200423 submit_bio(bio);
Minchan Kim0bcac062017-11-15 17:33:07 -0800424 while (synchronous) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800425 set_current_state(TASK_UNINTERRUPTIBLE);
Shaohua Li23955622017-07-10 15:47:11 -0700426 if (!READ_ONCE(bio->bi_private))
427 break;
428
Jens Axboe5a72e892021-10-12 09:24:29 -0600429 if (!bio_poll(bio, NULL, 0))
Xianting Tian0f190a72020-08-06 23:20:17 -0700430 blk_io_schedule();
Shaohua Li23955622017-07-10 15:47:11 -0700431 }
432 __set_current_state(TASK_RUNNING);
433 bio_put(bio);
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435out:
Minchan Kim93779062019-11-30 17:58:29 -0800436 psi_memstall_leave(&pflags);
Yang Yanga3d5dc92022-01-19 18:10:02 -0800437 delayacct_swapin_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 return ret;
439}
Mel Gorman62c230b2012-07-31 16:44:55 -0700440
441int swap_set_page_dirty(struct page *page)
442{
443 struct swap_info_struct *sis = page_swap_info(page);
444
Gao Xiang32646312020-10-13 16:52:04 -0700445 if (data_race(sis->flags & SWP_FS_OPS)) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700446 struct address_space *mapping = sis->swap_file->f_mapping;
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700447
448 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700449 return mapping->a_ops->set_page_dirty(page);
450 } else {
451 return __set_page_dirty_no_writeback(page);
452 }
453}