blob: 76965be1d40ea5a83800a03d3a4e0a31ddbe5905 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
Mel Gorman62c230b2012-07-31 16:44:55 -070021#include <linux/buffer_head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/writeback.h>
Dan Magenheimer38b5faf2012-04-09 17:08:06 -060023#include <linux/frontswap.h>
Minchan Kimb430e9d2013-07-03 15:01:24 -070024#include <linux/blkdev.h>
Minchan Kim93779062019-11-30 17:58:29 -080025#include <linux/psi.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080026#include <linux/uio.h>
Tetsuo Handab0ba2d02017-08-02 13:32:09 -070027#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/pgtable.h>
29
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080030static struct bio *get_swap_bio(gfp_t gfp_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 struct page *page, bio_end_io_t end_io)
32{
33 struct bio *bio;
34
Huang Ying1a5f4392019-06-28 12:07:18 -070035 bio = bio_alloc(gfp_flags, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 if (bio) {
Christoph Hellwig74d46992017-08-23 19:10:32 +020037 struct block_device *bdev;
38
39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
40 bio_set_dev(bio, bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -070041 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 bio->bi_end_io = end_io;
Kent Overstreet6cf66b42014-12-22 12:48:42 +010043
Huang Ying1a5f4392019-06-28 12:07:18 -070044 bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 }
46 return bio;
47}
48
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020049void end_swap_bio_write(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
Ming Lei263663c2017-12-18 20:22:04 +080051 struct page *page = bio_first_page_all(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020053 if (bio->bi_status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070055 /*
56 * We failed to write the page out to swap-space.
57 * Re-dirty the page in order to avoid it being reclaimed.
58 * Also print a dire warning that things will go BAD (tm)
59 * very quickly.
60 *
61 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
62 */
63 set_page_dirty(page);
Joe Perches11705322016-03-17 14:19:50 -070064 pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
Christoph Hellwig74d46992017-08-23 19:10:32 +020065 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
Joe Perches11705322016-03-17 14:19:50 -070066 (unsigned long long)bio->bi_iter.bi_sector);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070067 ClearPageReclaim(page);
68 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 end_page_writeback(page);
70 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Minchan Kim3f2b1a02016-03-22 14:24:36 -070073static void swap_slot_free_notify(struct page *page)
74{
75 struct swap_info_struct *sis;
76 struct gendisk *disk;
Vinayak Menon5df373e2019-11-15 17:35:00 -080077 swp_entry_t entry;
Minchan Kim3f2b1a02016-03-22 14:24:36 -070078
79 /*
80 * There is no guarantee that the page is in swap cache - the software
81 * suspend code (at least) uses end_swap_bio_read() against a non-
82 * swapcache page. So we must check PG_swapcache before proceeding with
83 * this optimization.
84 */
85 if (unlikely(!PageSwapCache(page)))
86 return;
87
88 sis = page_swap_info(page);
89 if (!(sis->flags & SWP_BLKDEV))
90 return;
91
92 /*
93 * The swap subsystem performs lazy swap slot freeing,
94 * expecting that the page will be swapped out again.
95 * So we can avoid an unnecessary write if the page
96 * isn't redirtied.
97 * This is good for real swap storage because we can
98 * reduce unnecessary I/O and enhance wear-leveling
99 * if an SSD is used as the as swap device.
100 * But if in-memory swap device (eg zram) is used,
101 * this causes a duplicated copy between uncompressed
102 * data in VM-owned memory and compressed data in
103 * zram-owned memory. So let's free zram-owned memory
104 * and make the VM-owned decompressed page *dirty*,
105 * so the page should be swapped out somewhere again if
106 * we again wish to reclaim it.
107 */
108 disk = sis->bdev->bd_disk;
Vinayak Menon5df373e2019-11-15 17:35:00 -0800109 entry.val = page_private(page);
110 if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
Minchan Kim3f2b1a02016-03-22 14:24:36 -0700111 unsigned long offset;
112
Minchan Kim3f2b1a02016-03-22 14:24:36 -0700113 offset = swp_offset(entry);
114
115 SetPageDirty(page);
116 disk->fops->swap_slot_free_notify(sis->bdev,
117 offset);
118 }
119}
120
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200121static void end_swap_bio_read(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
Ming Lei263663c2017-12-18 20:22:04 +0800123 struct page *page = bio_first_page_all(bio);
Shaohua Li23955622017-07-10 15:47:11 -0700124 struct task_struct *waiter = bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200126 if (bio->bi_status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 SetPageError(page);
128 ClearPageUptodate(page);
Joe Perches11705322016-03-17 14:19:50 -0700129 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
Christoph Hellwig74d46992017-08-23 19:10:32 +0200130 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
Joe Perches11705322016-03-17 14:19:50 -0700131 (unsigned long long)bio->bi_iter.bi_sector);
Minchan Kimb430e9d2013-07-03 15:01:24 -0700132 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 }
Minchan Kimb430e9d2013-07-03 15:01:24 -0700134
135 SetPageUptodate(page);
Minchan Kim3f2b1a02016-03-22 14:24:36 -0700136 swap_slot_free_notify(page);
Minchan Kimb430e9d2013-07-03 15:01:24 -0700137out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 unlock_page(page);
Shaohua Li23955622017-07-10 15:47:11 -0700139 WRITE_ONCE(bio->bi_private, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 bio_put(bio);
Oleg Nesterov87518532019-07-04 15:14:49 -0700141 if (waiter) {
142 blk_wake_io_task(waiter);
143 put_task_struct(waiter);
144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Mel Gormana509bc12012-07-31 16:44:57 -0700147int generic_swapfile_activate(struct swap_info_struct *sis,
148 struct file *swap_file,
149 sector_t *span)
150{
151 struct address_space *mapping = swap_file->f_mapping;
152 struct inode *inode = mapping->host;
153 unsigned blocks_per_page;
154 unsigned long page_no;
155 unsigned blkbits;
156 sector_t probe_block;
157 sector_t last_block;
158 sector_t lowest_block = -1;
159 sector_t highest_block = 0;
160 int nr_extents = 0;
161 int ret;
162
163 blkbits = inode->i_blkbits;
164 blocks_per_page = PAGE_SIZE >> blkbits;
165
166 /*
Aaron Lu4efaceb2019-07-11 20:55:41 -0700167 * Map all the blocks into the extent tree. This code doesn't try
Mel Gormana509bc12012-07-31 16:44:57 -0700168 * to be very smart.
169 */
170 probe_block = 0;
171 page_no = 0;
172 last_block = i_size_read(inode) >> blkbits;
173 while ((probe_block + blocks_per_page) <= last_block &&
174 page_no < sis->max) {
175 unsigned block_in_page;
176 sector_t first_block;
177
Mikulas Patocka7e4411b2016-07-28 15:48:47 -0700178 cond_resched();
179
Carlos Maiolino30460e12020-01-09 14:30:41 +0100180 first_block = probe_block;
181 ret = bmap(inode, &first_block);
182 if (ret || !first_block)
Mel Gormana509bc12012-07-31 16:44:57 -0700183 goto bad_bmap;
184
185 /*
186 * It must be PAGE_SIZE aligned on-disk
187 */
188 if (first_block & (blocks_per_page - 1)) {
189 probe_block++;
190 goto reprobe;
191 }
192
193 for (block_in_page = 1; block_in_page < blocks_per_page;
194 block_in_page++) {
195 sector_t block;
196
Carlos Maiolino30460e12020-01-09 14:30:41 +0100197 block = probe_block + block_in_page;
198 ret = bmap(inode, &block);
199 if (ret || !block)
Mel Gormana509bc12012-07-31 16:44:57 -0700200 goto bad_bmap;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100201
Mel Gormana509bc12012-07-31 16:44:57 -0700202 if (block != first_block + block_in_page) {
203 /* Discontiguity */
204 probe_block++;
205 goto reprobe;
206 }
207 }
208
209 first_block >>= (PAGE_SHIFT - blkbits);
210 if (page_no) { /* exclude the header page */
211 if (first_block < lowest_block)
212 lowest_block = first_block;
213 if (first_block > highest_block)
214 highest_block = first_block;
215 }
216
217 /*
218 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
219 */
220 ret = add_swap_extent(sis, page_no, 1, first_block);
221 if (ret < 0)
222 goto out;
223 nr_extents += ret;
224 page_no++;
225 probe_block += blocks_per_page;
226reprobe:
227 continue;
228 }
229 ret = nr_extents;
230 *span = 1 + highest_block - lowest_block;
231 if (page_no == 0)
232 page_no = 1; /* force Empty message */
233 sis->max = page_no;
234 sis->pages = page_no - 1;
235 sis->highest_bit = page_no - 1;
236out:
237 return ret;
238bad_bmap:
Joe Perches11705322016-03-17 14:19:50 -0700239 pr_err("swapon: swapfile has holes\n");
Mel Gormana509bc12012-07-31 16:44:57 -0700240 ret = -EINVAL;
241 goto out;
242}
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244/*
245 * We may have stale swap cache pages in memory: notice
246 * them here and get rid of the unnecessary final write.
247 */
248int swap_writepage(struct page *page, struct writeback_control *wbc)
249{
Seth Jennings2f772e62013-04-29 15:08:34 -0700250 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800252 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 unlock_page(page);
254 goto out;
255 }
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400256 if (frontswap_store(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600257 set_page_writeback(page);
258 unlock_page(page);
259 end_page_writeback(page);
260 goto out;
261 }
Seth Jennings1eec6702013-04-29 15:08:35 -0700262 ret = __swap_writepage(page, wbc, end_swap_bio_write);
Seth Jennings2f772e62013-04-29 15:08:34 -0700263out:
264 return ret;
265}
266
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700267static sector_t swap_page_sector(struct page *page)
268{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300269 return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700270}
271
Huang Ying225311a2017-09-06 16:22:30 -0700272static inline void count_swpout_vm_event(struct page *page)
273{
274#ifdef CONFIG_TRANSPARENT_HUGEPAGE
275 if (unlikely(PageTransHuge(page)))
276 count_vm_event(THP_SWPOUT);
277#endif
278 count_vm_events(PSWPOUT, hpage_nr_pages(page));
279}
280
Seth Jennings1eec6702013-04-29 15:08:35 -0700281int __swap_writepage(struct page *page, struct writeback_control *wbc,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200282 bio_end_io_t end_write_func)
Seth Jennings2f772e62013-04-29 15:08:34 -0700283{
284 struct bio *bio;
Mike Christie4e49ea42016-06-05 14:31:41 -0500285 int ret;
Seth Jennings2f772e62013-04-29 15:08:34 -0700286 struct swap_info_struct *sis = page_swap_info(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700287
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700288 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Omar Sandovalbc4ae272018-10-26 15:10:51 -0700289 if (sis->flags & SWP_FS) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700290 struct kiocb kiocb;
291 struct file *swap_file = sis->swap_file;
292 struct address_space *mapping = swap_file->f_mapping;
Al Viro62a80672014-04-04 23:12:29 -0400293 struct bio_vec bv = {
294 .bv_page = page,
295 .bv_len = PAGE_SIZE,
296 .bv_offset = 0
Mel Gorman62c230b2012-07-31 16:44:55 -0700297 };
Al Viro05afcb72015-01-23 01:08:07 -0500298 struct iov_iter from;
Mel Gorman62c230b2012-07-31 16:44:55 -0700299
David Howellsaa563d72018-10-20 00:57:56 +0100300 iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
Mel Gorman62c230b2012-07-31 16:44:55 -0700301 init_sync_kiocb(&kiocb, swap_file);
302 kiocb.ki_pos = page_file_offset(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700303
Mel Gorman0cdc4442013-04-29 15:08:48 -0700304 set_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700305 unlock_page(page);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700306 ret = mapping->a_ops->direct_IO(&kiocb, &from);
Mel Gorman62c230b2012-07-31 16:44:55 -0700307 if (ret == PAGE_SIZE) {
308 count_vm_event(PSWPOUT);
309 ret = 0;
Jerome Marchand2d30d312013-04-29 15:08:47 -0700310 } else {
Mel Gorman0cdc4442013-04-29 15:08:48 -0700311 /*
312 * In the case of swap-over-nfs, this can be a
313 * temporary failure if the system has limited
314 * memory for allocating transmit buffers.
315 * Mark the page dirty and avoid
316 * rotate_reclaimable_page but rate-limit the
317 * messages but do not flag PageError like
318 * the normal direct-to-bio case as it could
319 * be temporary.
320 */
Jerome Marchand2d30d312013-04-29 15:08:47 -0700321 set_page_dirty(page);
Mel Gorman0cdc4442013-04-29 15:08:48 -0700322 ClearPageReclaim(page);
Joe Perches11705322016-03-17 14:19:50 -0700323 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
324 page_file_offset(page));
Mel Gorman62c230b2012-07-31 16:44:55 -0700325 }
Mel Gorman0cdc4442013-04-29 15:08:48 -0700326 end_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700327 return ret;
328 }
329
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700330 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
331 if (!ret) {
Huang Ying225311a2017-09-06 16:22:30 -0700332 count_swpout_vm_event(page);
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700333 return 0;
334 }
335
336 ret = 0;
Seth Jennings1eec6702013-04-29 15:08:35 -0700337 bio = get_swap_bio(GFP_NOIO, page, end_write_func);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (bio == NULL) {
339 set_page_dirty(page);
340 unlock_page(page);
341 ret = -ENOMEM;
342 goto out;
343 }
Josef Bacik0d1e0c72018-07-03 11:14:53 -0400344 bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
Dennis Zhou6a7f6d82018-12-05 12:10:33 -0500345 bio_associate_blkg_from_page(bio, page);
Huang Ying225311a2017-09-06 16:22:30 -0700346 count_swpout_vm_event(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 set_page_writeback(page);
348 unlock_page(page);
Mike Christie4e49ea42016-06-05 14:31:41 -0500349 submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350out:
351 return ret;
352}
353
Minchan Kim0bcac062017-11-15 17:33:07 -0800354int swap_readpage(struct page *page, bool synchronous)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 struct bio *bio;
357 int ret = 0;
Mel Gorman62c230b2012-07-31 16:44:55 -0700358 struct swap_info_struct *sis = page_swap_info(page);
Shaohua Li23955622017-07-10 15:47:11 -0700359 blk_qc_t qc;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200360 struct gendisk *disk;
Minchan Kim93779062019-11-30 17:58:29 -0800361 unsigned long pflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Minchan Kim0bcac062017-11-15 17:33:07 -0800363 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800364 VM_BUG_ON_PAGE(!PageLocked(page), page);
365 VM_BUG_ON_PAGE(PageUptodate(page), page);
Minchan Kim93779062019-11-30 17:58:29 -0800366
367 /*
368 * Count submission time as memory stall. When the device is congested,
369 * or the submitting cgroup IO-throttled, submission can be a
370 * significant part of overall IO time.
371 */
372 psi_memstall_enter(&pflags);
373
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400374 if (frontswap_load(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600375 SetPageUptodate(page);
376 unlock_page(page);
377 goto out;
378 }
Mel Gorman62c230b2012-07-31 16:44:55 -0700379
Omar Sandovalbc4ae272018-10-26 15:10:51 -0700380 if (sis->flags & SWP_FS) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700381 struct file *swap_file = sis->swap_file;
382 struct address_space *mapping = swap_file->f_mapping;
383
384 ret = mapping->a_ops->readpage(swap_file, page);
385 if (!ret)
386 count_vm_event(PSWPIN);
Minchan Kim93779062019-11-30 17:58:29 -0800387 goto out;
Mel Gorman62c230b2012-07-31 16:44:55 -0700388 }
389
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700390 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
391 if (!ret) {
Minchan Kimb06bad12016-04-28 16:18:41 -0700392 if (trylock_page(page)) {
393 swap_slot_free_notify(page);
394 unlock_page(page);
395 }
396
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700397 count_vm_event(PSWPIN);
Minchan Kim93779062019-11-30 17:58:29 -0800398 goto out;
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700399 }
400
401 ret = 0;
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800402 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 if (bio == NULL) {
404 unlock_page(page);
405 ret = -ENOMEM;
406 goto out;
407 }
Christoph Hellwig74d46992017-08-23 19:10:32 +0200408 disk = bio->bi_disk;
Tetsuo Handab0ba2d02017-08-02 13:32:09 -0700409 /*
410 * Keep this task valid during swap readpage because the oom killer may
411 * attempt to access it in the page fault retry time check.
412 */
Mike Christie95fe6c12016-06-05 14:31:48 -0500413 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Oleg Nesterov87518532019-07-04 15:14:49 -0700414 if (synchronous) {
Jens Axboeb685a732019-01-03 15:29:15 -0800415 bio->bi_opf |= REQ_HIPRI;
Oleg Nesterov87518532019-07-04 15:14:49 -0700416 get_task_struct(current);
417 bio->bi_private = current;
418 }
Christoph Lameterf8891e52006-06-30 01:55:45 -0700419 count_vm_event(PSWPIN);
Shaohua Li23955622017-07-10 15:47:11 -0700420 bio_get(bio);
421 qc = submit_bio(bio);
Minchan Kim0bcac062017-11-15 17:33:07 -0800422 while (synchronous) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800423 set_current_state(TASK_UNINTERRUPTIBLE);
Shaohua Li23955622017-07-10 15:47:11 -0700424 if (!READ_ONCE(bio->bi_private))
425 break;
426
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700427 if (!blk_poll(disk->queue, qc, true))
Jens Axboeb685a732019-01-03 15:29:15 -0800428 io_schedule();
Shaohua Li23955622017-07-10 15:47:11 -0700429 }
430 __set_current_state(TASK_RUNNING);
431 bio_put(bio);
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433out:
Minchan Kim93779062019-11-30 17:58:29 -0800434 psi_memstall_leave(&pflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return ret;
436}
Mel Gorman62c230b2012-07-31 16:44:55 -0700437
438int swap_set_page_dirty(struct page *page)
439{
440 struct swap_info_struct *sis = page_swap_info(page);
441
Omar Sandovalbc4ae272018-10-26 15:10:51 -0700442 if (sis->flags & SWP_FS) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700443 struct address_space *mapping = sis->swap_file->f_mapping;
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700444
445 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700446 return mapping->a_ops->set_page_dirty(page);
447 } else {
448 return __set_page_dirty_no_writeback(page);
449 }
450}