Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Compatibility functions which bloat the callers too much to make inline. |
| 3 | * All of the callers of these functions should be converted to use folios |
| 4 | * eventually. |
| 5 | */ |
| 6 | |
Matthew Wilcox (Oracle) | 3417013 | 2021-05-07 07:28:40 -0400 | [diff] [blame] | 7 | #include <linux/migrate.h> |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 8 | #include <linux/pagemap.h> |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 9 | #include <linux/swap.h> |
Matthew Wilcox (Oracle) | 2f52578 | 2020-12-10 10:55:05 -0500 | [diff] [blame] | 10 | |
| 11 | struct address_space *page_mapping(struct page *page) |
| 12 | { |
| 13 | return folio_mapping(page_folio(page)); |
| 14 | } |
| 15 | EXPORT_SYMBOL(page_mapping); |
Matthew Wilcox (Oracle) | 4e13642 | 2020-12-07 15:44:35 -0500 | [diff] [blame] | 16 | |
| 17 | void unlock_page(struct page *page) |
| 18 | { |
| 19 | return folio_unlock(page_folio(page)); |
| 20 | } |
| 21 | EXPORT_SYMBOL(unlock_page); |
Matthew Wilcox (Oracle) | 4268b48 | 2021-03-03 15:21:55 -0500 | [diff] [blame] | 22 | |
| 23 | void end_page_writeback(struct page *page) |
| 24 | { |
| 25 | return folio_end_writeback(page_folio(page)); |
| 26 | } |
| 27 | EXPORT_SYMBOL(end_page_writeback); |
Matthew Wilcox (Oracle) | 490e016 | 2021-03-04 11:09:17 -0500 | [diff] [blame] | 28 | |
| 29 | void wait_on_page_writeback(struct page *page) |
| 30 | { |
| 31 | return folio_wait_writeback(page_folio(page)); |
| 32 | } |
| 33 | EXPORT_SYMBOL_GPL(wait_on_page_writeback); |
Matthew Wilcox (Oracle) | a49d0c5 | 2021-03-04 11:25:25 -0500 | [diff] [blame] | 34 | |
| 35 | void wait_for_stable_page(struct page *page) |
| 36 | { |
| 37 | return folio_wait_stable(page_folio(page)); |
| 38 | } |
| 39 | EXPORT_SYMBOL_GPL(wait_for_stable_page); |
Matthew Wilcox (Oracle) | dd10ab0 | 2021-04-12 16:45:17 -0400 | [diff] [blame] | 40 | |
| 41 | bool page_mapped(struct page *page) |
| 42 | { |
| 43 | return folio_mapped(page_folio(page)); |
| 44 | } |
| 45 | EXPORT_SYMBOL(page_mapped); |
Matthew Wilcox (Oracle) | 76580b6 | 2021-04-27 10:47:39 -0400 | [diff] [blame] | 46 | |
| 47 | void mark_page_accessed(struct page *page) |
| 48 | { |
| 49 | folio_mark_accessed(page_folio(page)); |
| 50 | } |
| 51 | EXPORT_SYMBOL(mark_page_accessed); |
Matthew Wilcox (Oracle) | 3417013 | 2021-05-07 07:28:40 -0400 | [diff] [blame] | 52 | |
| 53 | #ifdef CONFIG_MIGRATION |
| 54 | int migrate_page_move_mapping(struct address_space *mapping, |
| 55 | struct page *newpage, struct page *page, int extra_count) |
| 56 | { |
| 57 | return folio_migrate_mapping(mapping, page_folio(newpage), |
| 58 | page_folio(page), extra_count); |
| 59 | } |
| 60 | EXPORT_SYMBOL(migrate_page_move_mapping); |
Matthew Wilcox (Oracle) | 1913834 | 2021-05-07 15:26:29 -0400 | [diff] [blame] | 61 | |
| 62 | void migrate_page_states(struct page *newpage, struct page *page) |
| 63 | { |
| 64 | folio_migrate_flags(page_folio(newpage), page_folio(page)); |
| 65 | } |
| 66 | EXPORT_SYMBOL(migrate_page_states); |
Matthew Wilcox (Oracle) | 715cbfd | 2021-05-07 15:05:06 -0400 | [diff] [blame] | 67 | |
| 68 | void migrate_page_copy(struct page *newpage, struct page *page) |
| 69 | { |
| 70 | folio_migrate_copy(page_folio(newpage), page_folio(page)); |
| 71 | } |
| 72 | EXPORT_SYMBOL(migrate_page_copy); |
Matthew Wilcox (Oracle) | 3417013 | 2021-05-07 07:28:40 -0400 | [diff] [blame] | 73 | #endif |
Matthew Wilcox (Oracle) | f143f1e | 2021-04-24 12:00:48 -0400 | [diff] [blame] | 74 | |
| 75 | bool set_page_writeback(struct page *page) |
| 76 | { |
| 77 | return folio_start_writeback(page_folio(page)); |
| 78 | } |
| 79 | EXPORT_SYMBOL(set_page_writeback); |
Matthew Wilcox (Oracle) | b5e8459 | 2021-04-26 23:53:10 -0400 | [diff] [blame] | 80 | |
| 81 | bool set_page_dirty(struct page *page) |
| 82 | { |
| 83 | return folio_mark_dirty(page_folio(page)); |
| 84 | } |
| 85 | EXPORT_SYMBOL(set_page_dirty); |
Matthew Wilcox (Oracle) | 85d4d2e | 2021-05-03 23:30:44 -0400 | [diff] [blame] | 86 | |
| 87 | int __set_page_dirty_nobuffers(struct page *page) |
| 88 | { |
| 89 | return filemap_dirty_folio(page_mapping(page), page_folio(page)); |
| 90 | } |
| 91 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); |
Matthew Wilcox (Oracle) | 9350f20 | 2021-02-28 16:21:20 -0500 | [diff] [blame] | 92 | |
| 93 | bool clear_page_dirty_for_io(struct page *page) |
| 94 | { |
| 95 | return folio_clear_dirty_for_io(page_folio(page)); |
| 96 | } |
| 97 | EXPORT_SYMBOL(clear_page_dirty_for_io); |
Matthew Wilcox (Oracle) | cd78ab1 | 2021-05-02 23:22:52 -0400 | [diff] [blame] | 98 | |
| 99 | bool redirty_page_for_writepage(struct writeback_control *wbc, |
| 100 | struct page *page) |
| 101 | { |
| 102 | return folio_redirty_for_writepage(wbc, page_folio(page)); |
| 103 | } |
| 104 | EXPORT_SYMBOL(redirty_page_for_writepage); |
Matthew Wilcox (Oracle) | 0d31125 | 2021-04-29 11:09:31 -0400 | [diff] [blame] | 105 | |
| 106 | void lru_cache_add(struct page *page) |
| 107 | { |
| 108 | folio_add_lru(page_folio(page)); |
| 109 | } |
| 110 | EXPORT_SYMBOL(lru_cache_add); |
Matthew Wilcox (Oracle) | 9dd3d06 | 2020-12-08 08:56:28 -0500 | [diff] [blame] | 111 | |
| 112 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
| 113 | pgoff_t index, gfp_t gfp) |
| 114 | { |
| 115 | return filemap_add_folio(mapping, page_folio(page), index, gfp); |
| 116 | } |
| 117 | EXPORT_SYMBOL(add_to_page_cache_lru); |
Matthew Wilcox (Oracle) | 3f0c6a0 | 2021-03-08 11:45:35 -0500 | [diff] [blame] | 118 | |
Matthew Wilcox (Oracle) | b27652d9 | 2020-12-24 12:55:56 -0500 | [diff] [blame] | 119 | noinline |
Matthew Wilcox (Oracle) | 3f0c6a0 | 2021-03-08 11:45:35 -0500 | [diff] [blame] | 120 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, |
| 121 | int fgp_flags, gfp_t gfp) |
| 122 | { |
| 123 | struct folio *folio; |
| 124 | |
| 125 | folio = __filemap_get_folio(mapping, index, fgp_flags, gfp); |
| 126 | if ((fgp_flags & FGP_HEAD) || !folio || xa_is_value(folio)) |
| 127 | return &folio->page; |
| 128 | return folio_file_page(folio, index); |
| 129 | } |
| 130 | EXPORT_SYMBOL(pagecache_get_page); |
Matthew Wilcox (Oracle) | b27652d9 | 2020-12-24 12:55:56 -0500 | [diff] [blame] | 131 | |
| 132 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
| 133 | pgoff_t index, unsigned flags) |
| 134 | { |
| 135 | unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; |
| 136 | |
| 137 | if (flags & AOP_FLAG_NOFS) |
| 138 | fgp_flags |= FGP_NOFS; |
| 139 | return pagecache_get_page(mapping, index, fgp_flags, |
| 140 | mapping_gfp_mask(mapping)); |
| 141 | } |
| 142 | EXPORT_SYMBOL(grab_cache_page_write_begin); |