Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/page_io.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 6 | * |
| 7 | * Swap reorganised 29.12.95, |
| 8 | * Asynchronous swapping added 30.12.95. Stephen Tweedie |
| 9 | * Removed race in async swapping. 14.4.1996. Bruno Haible |
| 10 | * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie |
| 11 | * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman |
| 12 | */ |
| 13 | |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/kernel_stat.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/pagemap.h> |
| 18 | #include <linux/swap.h> |
| 19 | #include <linux/bio.h> |
| 20 | #include <linux/swapops.h> |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 21 | #include <linux/buffer_head.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/writeback.h> |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 23 | #include <linux/frontswap.h> |
Minchan Kim | b430e9d | 2013-07-03 15:01:24 -0700 | [diff] [blame] | 24 | #include <linux/blkdev.h> |
Minchan Kim | 9377906 | 2019-11-30 17:58:29 -0800 | [diff] [blame] | 25 | #include <linux/psi.h> |
Christoph Hellwig | e2e40f2 | 2015-02-22 08:58:50 -0800 | [diff] [blame] | 26 | #include <linux/uio.h> |
Tetsuo Handa | b0ba2d0 | 2017-08-02 13:32:09 -0700 | [diff] [blame] | 27 | #include <linux/sched/task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 29 | void end_swap_bio_write(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
Ming Lei | 263663c | 2017-12-18 20:22:04 +0800 | [diff] [blame] | 31 | struct page *page = bio_first_page_all(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 33 | if (bio->bi_status) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | SetPageError(page); |
Peter Zijlstra | 6ddab3b | 2006-09-25 23:31:26 -0700 | [diff] [blame] | 35 | /* |
| 36 | * We failed to write the page out to swap-space. |
| 37 | * Re-dirty the page in order to avoid it being reclaimed. |
| 38 | * Also print a dire warning that things will go BAD (tm) |
| 39 | * very quickly. |
| 40 | * |
| 41 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() |
| 42 | */ |
| 43 | set_page_dirty(page); |
Georgi Djakov | 25eaab4 | 2021-02-24 12:03:01 -0800 | [diff] [blame] | 44 | pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", |
| 45 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
| 46 | (unsigned long long)bio->bi_iter.bi_sector); |
Peter Zijlstra | 6ddab3b | 2006-09-25 23:31:26 -0700 | [diff] [blame] | 47 | ClearPageReclaim(page); |
| 48 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | end_page_writeback(page); |
| 50 | bio_put(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | } |
| 52 | |
Minchan Kim | 3f2b1a0 | 2016-03-22 14:24:36 -0700 | [diff] [blame] | 53 | static void swap_slot_free_notify(struct page *page) |
| 54 | { |
| 55 | struct swap_info_struct *sis; |
| 56 | struct gendisk *disk; |
Vinayak Menon | 5df373e | 2019-11-15 17:35:00 -0800 | [diff] [blame] | 57 | swp_entry_t entry; |
Minchan Kim | 3f2b1a0 | 2016-03-22 14:24:36 -0700 | [diff] [blame] | 58 | |
| 59 | /* |
| 60 | * There is no guarantee that the page is in swap cache - the software |
| 61 | * suspend code (at least) uses end_swap_bio_read() against a non- |
| 62 | * swapcache page. So we must check PG_swapcache before proceeding with |
| 63 | * this optimization. |
| 64 | */ |
| 65 | if (unlikely(!PageSwapCache(page))) |
| 66 | return; |
| 67 | |
| 68 | sis = page_swap_info(page); |
Qian Cai | 7b37e22 | 2020-08-14 17:31:20 -0700 | [diff] [blame] | 69 | if (data_race(!(sis->flags & SWP_BLKDEV))) |
Minchan Kim | 3f2b1a0 | 2016-03-22 14:24:36 -0700 | [diff] [blame] | 70 | return; |
| 71 | |
| 72 | /* |
| 73 | * The swap subsystem performs lazy swap slot freeing, |
| 74 | * expecting that the page will be swapped out again. |
| 75 | * So we can avoid an unnecessary write if the page |
| 76 | * isn't redirtied. |
| 77 | * This is good for real swap storage because we can |
| 78 | * reduce unnecessary I/O and enhance wear-leveling |
| 79 | * if an SSD is used as the as swap device. |
| 80 | * But if in-memory swap device (eg zram) is used, |
| 81 | * this causes a duplicated copy between uncompressed |
| 82 | * data in VM-owned memory and compressed data in |
| 83 | * zram-owned memory. So let's free zram-owned memory |
| 84 | * and make the VM-owned decompressed page *dirty*, |
| 85 | * so the page should be swapped out somewhere again if |
| 86 | * we again wish to reclaim it. |
| 87 | */ |
| 88 | disk = sis->bdev->bd_disk; |
Vinayak Menon | 5df373e | 2019-11-15 17:35:00 -0800 | [diff] [blame] | 89 | entry.val = page_private(page); |
| 90 | if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { |
Minchan Kim | 3f2b1a0 | 2016-03-22 14:24:36 -0700 | [diff] [blame] | 91 | unsigned long offset; |
| 92 | |
Minchan Kim | 3f2b1a0 | 2016-03-22 14:24:36 -0700 | [diff] [blame] | 93 | offset = swp_offset(entry); |
| 94 | |
| 95 | SetPageDirty(page); |
| 96 | disk->fops->swap_slot_free_notify(sis->bdev, |
| 97 | offset); |
| 98 | } |
| 99 | } |
| 100 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 101 | static void end_swap_bio_read(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | { |
Ming Lei | 263663c | 2017-12-18 20:22:04 +0800 | [diff] [blame] | 103 | struct page *page = bio_first_page_all(bio); |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 104 | struct task_struct *waiter = bio->bi_private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 106 | if (bio->bi_status) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | SetPageError(page); |
| 108 | ClearPageUptodate(page); |
Georgi Djakov | 25eaab4 | 2021-02-24 12:03:01 -0800 | [diff] [blame] | 109 | pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", |
| 110 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
| 111 | (unsigned long long)bio->bi_iter.bi_sector); |
Minchan Kim | b430e9d | 2013-07-03 15:01:24 -0700 | [diff] [blame] | 112 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } |
Minchan Kim | b430e9d | 2013-07-03 15:01:24 -0700 | [diff] [blame] | 114 | |
| 115 | SetPageUptodate(page); |
Minchan Kim | 3f2b1a0 | 2016-03-22 14:24:36 -0700 | [diff] [blame] | 116 | swap_slot_free_notify(page); |
Minchan Kim | b430e9d | 2013-07-03 15:01:24 -0700 | [diff] [blame] | 117 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | unlock_page(page); |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 119 | WRITE_ONCE(bio->bi_private, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | bio_put(bio); |
Oleg Nesterov | 8751853 | 2019-07-04 15:14:49 -0700 | [diff] [blame] | 121 | if (waiter) { |
| 122 | blk_wake_io_task(waiter); |
| 123 | put_task_struct(waiter); |
| 124 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 127 | int generic_swapfile_activate(struct swap_info_struct *sis, |
| 128 | struct file *swap_file, |
| 129 | sector_t *span) |
| 130 | { |
| 131 | struct address_space *mapping = swap_file->f_mapping; |
| 132 | struct inode *inode = mapping->host; |
| 133 | unsigned blocks_per_page; |
| 134 | unsigned long page_no; |
| 135 | unsigned blkbits; |
| 136 | sector_t probe_block; |
| 137 | sector_t last_block; |
| 138 | sector_t lowest_block = -1; |
| 139 | sector_t highest_block = 0; |
| 140 | int nr_extents = 0; |
| 141 | int ret; |
| 142 | |
| 143 | blkbits = inode->i_blkbits; |
| 144 | blocks_per_page = PAGE_SIZE >> blkbits; |
| 145 | |
| 146 | /* |
Aaron Lu | 4efaceb | 2019-07-11 20:55:41 -0700 | [diff] [blame] | 147 | * Map all the blocks into the extent tree. This code doesn't try |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 148 | * to be very smart. |
| 149 | */ |
| 150 | probe_block = 0; |
| 151 | page_no = 0; |
| 152 | last_block = i_size_read(inode) >> blkbits; |
| 153 | while ((probe_block + blocks_per_page) <= last_block && |
| 154 | page_no < sis->max) { |
| 155 | unsigned block_in_page; |
| 156 | sector_t first_block; |
| 157 | |
Mikulas Patocka | 7e4411b | 2016-07-28 15:48:47 -0700 | [diff] [blame] | 158 | cond_resched(); |
| 159 | |
Carlos Maiolino | 30460e1 | 2020-01-09 14:30:41 +0100 | [diff] [blame] | 160 | first_block = probe_block; |
| 161 | ret = bmap(inode, &first_block); |
| 162 | if (ret || !first_block) |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 163 | goto bad_bmap; |
| 164 | |
| 165 | /* |
| 166 | * It must be PAGE_SIZE aligned on-disk |
| 167 | */ |
| 168 | if (first_block & (blocks_per_page - 1)) { |
| 169 | probe_block++; |
| 170 | goto reprobe; |
| 171 | } |
| 172 | |
| 173 | for (block_in_page = 1; block_in_page < blocks_per_page; |
| 174 | block_in_page++) { |
| 175 | sector_t block; |
| 176 | |
Carlos Maiolino | 30460e1 | 2020-01-09 14:30:41 +0100 | [diff] [blame] | 177 | block = probe_block + block_in_page; |
| 178 | ret = bmap(inode, &block); |
| 179 | if (ret || !block) |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 180 | goto bad_bmap; |
Carlos Maiolino | 30460e1 | 2020-01-09 14:30:41 +0100 | [diff] [blame] | 181 | |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 182 | if (block != first_block + block_in_page) { |
| 183 | /* Discontiguity */ |
| 184 | probe_block++; |
| 185 | goto reprobe; |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | first_block >>= (PAGE_SHIFT - blkbits); |
| 190 | if (page_no) { /* exclude the header page */ |
| 191 | if (first_block < lowest_block) |
| 192 | lowest_block = first_block; |
| 193 | if (first_block > highest_block) |
| 194 | highest_block = first_block; |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks |
| 199 | */ |
| 200 | ret = add_swap_extent(sis, page_no, 1, first_block); |
| 201 | if (ret < 0) |
| 202 | goto out; |
| 203 | nr_extents += ret; |
| 204 | page_no++; |
| 205 | probe_block += blocks_per_page; |
| 206 | reprobe: |
| 207 | continue; |
| 208 | } |
| 209 | ret = nr_extents; |
| 210 | *span = 1 + highest_block - lowest_block; |
| 211 | if (page_no == 0) |
| 212 | page_no = 1; /* force Empty message */ |
| 213 | sis->max = page_no; |
| 214 | sis->pages = page_no - 1; |
| 215 | sis->highest_bit = page_no - 1; |
| 216 | out: |
| 217 | return ret; |
| 218 | bad_bmap: |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 219 | pr_err("swapon: swapfile has holes\n"); |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 220 | ret = -EINVAL; |
| 221 | goto out; |
| 222 | } |
| 223 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | /* |
| 225 | * We may have stale swap cache pages in memory: notice |
| 226 | * them here and get rid of the unnecessary final write. |
| 227 | */ |
| 228 | int swap_writepage(struct page *page, struct writeback_control *wbc) |
| 229 | { |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 230 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 232 | if (try_to_free_swap(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | unlock_page(page); |
| 234 | goto out; |
| 235 | } |
Steven Price | 8a84802 | 2020-05-13 16:37:49 +0100 | [diff] [blame] | 236 | /* |
| 237 | * Arch code may have to preserve more data than just the page |
| 238 | * contents, e.g. memory tags. |
| 239 | */ |
| 240 | ret = arch_prepare_to_swap(page); |
| 241 | if (ret) { |
| 242 | set_page_dirty(page); |
| 243 | unlock_page(page); |
| 244 | goto out; |
| 245 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 246 | if (frontswap_store(page) == 0) { |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 247 | set_page_writeback(page); |
| 248 | unlock_page(page); |
| 249 | end_page_writeback(page); |
| 250 | goto out; |
| 251 | } |
Seth Jennings | 1eec670 | 2013-04-29 15:08:35 -0700 | [diff] [blame] | 252 | ret = __swap_writepage(page, wbc, end_swap_bio_write); |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 253 | out: |
| 254 | return ret; |
| 255 | } |
| 256 | |
Huang Ying | 225311a | 2017-09-06 16:22:30 -0700 | [diff] [blame] | 257 | static inline void count_swpout_vm_event(struct page *page) |
| 258 | { |
| 259 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 260 | if (unlikely(PageTransHuge(page))) |
| 261 | count_vm_event(THP_SWPOUT); |
| 262 | #endif |
Matthew Wilcox (Oracle) | 6c35784 | 2020-08-14 17:30:37 -0700 | [diff] [blame] | 263 | count_vm_events(PSWPOUT, thp_nr_pages(page)); |
Huang Ying | 225311a | 2017-09-06 16:22:30 -0700 | [diff] [blame] | 264 | } |
| 265 | |
Christoph Hellwig | a18b9b1 | 2020-06-27 09:31:50 +0200 | [diff] [blame] | 266 | #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
| 267 | static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) |
| 268 | { |
| 269 | struct cgroup_subsys_state *css; |
Roman Gushchin | bcfe06b | 2020-12-01 13:58:27 -0800 | [diff] [blame] | 270 | struct mem_cgroup *memcg; |
Christoph Hellwig | a18b9b1 | 2020-06-27 09:31:50 +0200 | [diff] [blame] | 271 | |
Roman Gushchin | bcfe06b | 2020-12-01 13:58:27 -0800 | [diff] [blame] | 272 | memcg = page_memcg(page); |
| 273 | if (!memcg) |
Christoph Hellwig | a18b9b1 | 2020-06-27 09:31:50 +0200 | [diff] [blame] | 274 | return; |
| 275 | |
| 276 | rcu_read_lock(); |
Roman Gushchin | bcfe06b | 2020-12-01 13:58:27 -0800 | [diff] [blame] | 277 | css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); |
Christoph Hellwig | a18b9b1 | 2020-06-27 09:31:50 +0200 | [diff] [blame] | 278 | bio_associate_blkg_from_css(bio, css); |
| 279 | rcu_read_unlock(); |
| 280 | } |
| 281 | #else |
| 282 | #define bio_associate_blkg_from_page(bio, page) do { } while (0) |
| 283 | #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ |
| 284 | |
Seth Jennings | 1eec670 | 2013-04-29 15:08:35 -0700 | [diff] [blame] | 285 | int __swap_writepage(struct page *page, struct writeback_control *wbc, |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 286 | bio_end_io_t end_write_func) |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 287 | { |
| 288 | struct bio *bio; |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 289 | int ret; |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 290 | struct swap_info_struct *sis = page_swap_info(page); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 291 | |
Andrew Morton | cc30c5d | 2016-10-07 17:00:52 -0700 | [diff] [blame] | 292 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
Gao Xiang | 3264631 | 2020-10-13 16:52:04 -0700 | [diff] [blame] | 293 | if (data_race(sis->flags & SWP_FS_OPS)) { |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 294 | struct kiocb kiocb; |
| 295 | struct file *swap_file = sis->swap_file; |
| 296 | struct address_space *mapping = swap_file->f_mapping; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 297 | struct bio_vec bv = { |
| 298 | .bv_page = page, |
| 299 | .bv_len = PAGE_SIZE, |
| 300 | .bv_offset = 0 |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 301 | }; |
Al Viro | 05afcb7 | 2015-01-23 01:08:07 -0500 | [diff] [blame] | 302 | struct iov_iter from; |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 303 | |
David Howells | aa563d7 | 2018-10-20 00:57:56 +0100 | [diff] [blame] | 304 | iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 305 | init_sync_kiocb(&kiocb, swap_file); |
| 306 | kiocb.ki_pos = page_file_offset(page); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 307 | |
Mel Gorman | 0cdc444 | 2013-04-29 15:08:48 -0700 | [diff] [blame] | 308 | set_page_writeback(page); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 309 | unlock_page(page); |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 310 | ret = mapping->a_ops->direct_IO(&kiocb, &from); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 311 | if (ret == PAGE_SIZE) { |
| 312 | count_vm_event(PSWPOUT); |
| 313 | ret = 0; |
Jerome Marchand | 2d30d31 | 2013-04-29 15:08:47 -0700 | [diff] [blame] | 314 | } else { |
Mel Gorman | 0cdc444 | 2013-04-29 15:08:48 -0700 | [diff] [blame] | 315 | /* |
| 316 | * In the case of swap-over-nfs, this can be a |
| 317 | * temporary failure if the system has limited |
| 318 | * memory for allocating transmit buffers. |
| 319 | * Mark the page dirty and avoid |
| 320 | * rotate_reclaimable_page but rate-limit the |
| 321 | * messages but do not flag PageError like |
| 322 | * the normal direct-to-bio case as it could |
| 323 | * be temporary. |
| 324 | */ |
Jerome Marchand | 2d30d31 | 2013-04-29 15:08:47 -0700 | [diff] [blame] | 325 | set_page_dirty(page); |
Mel Gorman | 0cdc444 | 2013-04-29 15:08:48 -0700 | [diff] [blame] | 326 | ClearPageReclaim(page); |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 327 | pr_err_ratelimited("Write error on dio swapfile (%llu)\n", |
| 328 | page_file_offset(page)); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 329 | } |
Mel Gorman | 0cdc444 | 2013-04-29 15:08:48 -0700 | [diff] [blame] | 330 | end_page_writeback(page); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 331 | return ret; |
| 332 | } |
| 333 | |
Matthew Wilcox | dd6bd0d | 2014-06-04 16:07:48 -0700 | [diff] [blame] | 334 | ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); |
| 335 | if (!ret) { |
Huang Ying | 225311a | 2017-09-06 16:22:30 -0700 | [diff] [blame] | 336 | count_swpout_vm_event(page); |
Matthew Wilcox | dd6bd0d | 2014-06-04 16:07:48 -0700 | [diff] [blame] | 337 | return 0; |
| 338 | } |
| 339 | |
Christoph Hellwig | 48d1543 | 2021-01-26 15:52:47 +0100 | [diff] [blame] | 340 | bio = bio_alloc(GFP_NOIO, 1); |
| 341 | bio_set_dev(bio, sis->bdev); |
| 342 | bio->bi_iter.bi_sector = swap_page_sector(page); |
Josef Bacik | 0d1e0c7 | 2018-07-03 11:14:53 -0400 | [diff] [blame] | 343 | bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); |
Christoph Hellwig | 48d1543 | 2021-01-26 15:52:47 +0100 | [diff] [blame] | 344 | bio->bi_end_io = end_write_func; |
| 345 | bio_add_page(bio, page, thp_size(page), 0); |
| 346 | |
Dennis Zhou | 6a7f6d8 | 2018-12-05 12:10:33 -0500 | [diff] [blame] | 347 | bio_associate_blkg_from_page(bio, page); |
Huang Ying | 225311a | 2017-09-06 16:22:30 -0700 | [diff] [blame] | 348 | count_swpout_vm_event(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | set_page_writeback(page); |
| 350 | unlock_page(page); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 351 | submit_bio(bio); |
Miaohe Lin | 548d978 | 2020-10-13 16:52:21 -0700 | [diff] [blame] | 352 | |
| 353 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | } |
| 355 | |
Minchan Kim | 0bcac06 | 2017-11-15 17:33:07 -0800 | [diff] [blame] | 356 | int swap_readpage(struct page *page, bool synchronous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | { |
| 358 | struct bio *bio; |
| 359 | int ret = 0; |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 360 | struct swap_info_struct *sis = page_swap_info(page); |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 361 | blk_qc_t qc; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 362 | struct gendisk *disk; |
Minchan Kim | 9377906 | 2019-11-30 17:58:29 -0800 | [diff] [blame] | 363 | unsigned long pflags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | |
Minchan Kim | 0bcac06 | 2017-11-15 17:33:07 -0800 | [diff] [blame] | 365 | VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 366 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 367 | VM_BUG_ON_PAGE(PageUptodate(page), page); |
Minchan Kim | 9377906 | 2019-11-30 17:58:29 -0800 | [diff] [blame] | 368 | |
| 369 | /* |
| 370 | * Count submission time as memory stall. When the device is congested, |
| 371 | * or the submitting cgroup IO-throttled, submission can be a |
| 372 | * significant part of overall IO time. |
| 373 | */ |
| 374 | psi_memstall_enter(&pflags); |
| 375 | |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 376 | if (frontswap_load(page) == 0) { |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 377 | SetPageUptodate(page); |
| 378 | unlock_page(page); |
| 379 | goto out; |
| 380 | } |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 381 | |
Gao Xiang | 3264631 | 2020-10-13 16:52:04 -0700 | [diff] [blame] | 382 | if (data_race(sis->flags & SWP_FS_OPS)) { |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 383 | struct file *swap_file = sis->swap_file; |
| 384 | struct address_space *mapping = swap_file->f_mapping; |
| 385 | |
| 386 | ret = mapping->a_ops->readpage(swap_file, page); |
| 387 | if (!ret) |
| 388 | count_vm_event(PSWPIN); |
Minchan Kim | 9377906 | 2019-11-30 17:58:29 -0800 | [diff] [blame] | 389 | goto out; |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 390 | } |
| 391 | |
Christoph Hellwig | 5115db1 | 2020-09-24 08:51:37 +0200 | [diff] [blame] | 392 | if (sis->flags & SWP_SYNCHRONOUS_IO) { |
| 393 | ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); |
| 394 | if (!ret) { |
| 395 | if (trylock_page(page)) { |
| 396 | swap_slot_free_notify(page); |
| 397 | unlock_page(page); |
| 398 | } |
Minchan Kim | b06bad1 | 2016-04-28 16:18:41 -0700 | [diff] [blame] | 399 | |
Christoph Hellwig | 5115db1 | 2020-09-24 08:51:37 +0200 | [diff] [blame] | 400 | count_vm_event(PSWPIN); |
| 401 | goto out; |
| 402 | } |
Matthew Wilcox | dd6bd0d | 2014-06-04 16:07:48 -0700 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | ret = 0; |
Christoph Hellwig | 48d1543 | 2021-01-26 15:52:47 +0100 | [diff] [blame] | 406 | bio = bio_alloc(GFP_KERNEL, 1); |
| 407 | bio_set_dev(bio, sis->bdev); |
| 408 | bio->bi_opf = REQ_OP_READ; |
| 409 | bio->bi_iter.bi_sector = swap_page_sector(page); |
| 410 | bio->bi_end_io = end_swap_bio_read; |
| 411 | bio_add_page(bio, page, thp_size(page), 0); |
| 412 | |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 413 | disk = bio->bi_bdev->bd_disk; |
Tetsuo Handa | b0ba2d0 | 2017-08-02 13:32:09 -0700 | [diff] [blame] | 414 | /* |
| 415 | * Keep this task valid during swap readpage because the oom killer may |
| 416 | * attempt to access it in the page fault retry time check. |
| 417 | */ |
Oleg Nesterov | 8751853 | 2019-07-04 15:14:49 -0700 | [diff] [blame] | 418 | if (synchronous) { |
Jens Axboe | b685a73 | 2019-01-03 15:29:15 -0800 | [diff] [blame] | 419 | bio->bi_opf |= REQ_HIPRI; |
Oleg Nesterov | 8751853 | 2019-07-04 15:14:49 -0700 | [diff] [blame] | 420 | get_task_struct(current); |
| 421 | bio->bi_private = current; |
| 422 | } |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 423 | count_vm_event(PSWPIN); |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 424 | bio_get(bio); |
| 425 | qc = submit_bio(bio); |
Minchan Kim | 0bcac06 | 2017-11-15 17:33:07 -0800 | [diff] [blame] | 426 | while (synchronous) { |
Linus Torvalds | 1ac5cd4 | 2019-01-02 10:46:03 -0800 | [diff] [blame] | 427 | set_current_state(TASK_UNINTERRUPTIBLE); |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 428 | if (!READ_ONCE(bio->bi_private)) |
| 429 | break; |
| 430 | |
Jens Axboe | 0a1b8b8 | 2018-11-26 08:24:43 -0700 | [diff] [blame] | 431 | if (!blk_poll(disk->queue, qc, true)) |
Xianting Tian | 0f190a7 | 2020-08-06 23:20:17 -0700 | [diff] [blame] | 432 | blk_io_schedule(); |
Shaohua Li | 2395562 | 2017-07-10 15:47:11 -0700 | [diff] [blame] | 433 | } |
| 434 | __set_current_state(TASK_RUNNING); |
| 435 | bio_put(bio); |
| 436 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | out: |
Minchan Kim | 9377906 | 2019-11-30 17:58:29 -0800 | [diff] [blame] | 438 | psi_memstall_leave(&pflags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | return ret; |
| 440 | } |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 441 | |
| 442 | int swap_set_page_dirty(struct page *page) |
| 443 | { |
| 444 | struct swap_info_struct *sis = page_swap_info(page); |
| 445 | |
Gao Xiang | 3264631 | 2020-10-13 16:52:04 -0700 | [diff] [blame] | 446 | if (data_race(sis->flags & SWP_FS_OPS)) { |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 447 | struct address_space *mapping = sis->swap_file->f_mapping; |
Andrew Morton | cc30c5d | 2016-10-07 17:00:52 -0700 | [diff] [blame] | 448 | |
| 449 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 450 | return mapping->a_ops->set_page_dirty(page); |
| 451 | } else { |
| 452 | return __set_page_dirty_no_writeback(page); |
| 453 | } |
| 454 | } |