blob: 6250ca6a1f851d4be8d12f39e86f0d03508f6627 [file] [log] [blame]
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/swap.h>
11
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -070012/* Swapfile activation */
13
14struct iomap_swapfile_info {
15 struct iomap iomap; /* accumulated iomap */
16 struct swap_info_struct *sis;
17 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
18 uint64_t highest_ppage; /* highest physical addr seen (pages) */
19 unsigned long nr_pages; /* number of pages collected */
20 int nr_extents; /* extent count */
Christoph Hellwigad89b662021-03-26 10:55:40 -070021 struct file *file;
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -070022};
23
24/*
25 * Collect physical extents for this swap file. Physical extents reported to
26 * the swap code must be trimmed to align to a page boundary. The logical
27 * offset within the file is irrelevant since the swapfile code maps logical
28 * page numbers of the swap device to the physical page-aligned extents.
29 */
30static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
31{
32 struct iomap *iomap = &isi->iomap;
33 unsigned long nr_pages;
34 uint64_t first_ppage;
35 uint64_t first_ppage_reported;
36 uint64_t next_ppage;
37 int error;
38
39 /*
40 * Round the start up and the end down so that the physical
41 * extent aligns to a page boundary.
42 */
43 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
44 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
45 PAGE_SHIFT;
46
47 /* Skip too-short physical extents. */
48 if (first_ppage >= next_ppage)
49 return 0;
50 nr_pages = next_ppage - first_ppage;
51
52 /*
53 * Calculate how much swap space we're adding; the first page contains
54 * the swap header and doesn't count. The mm still wants that first
55 * page fed to add_swap_extent, however.
56 */
57 first_ppage_reported = first_ppage;
58 if (iomap->offset == 0)
59 first_ppage_reported++;
60 if (isi->lowest_ppage > first_ppage_reported)
61 isi->lowest_ppage = first_ppage_reported;
62 if (isi->highest_ppage < (next_ppage - 1))
63 isi->highest_ppage = next_ppage - 1;
64
65 /* Add extent, set up for the next call. */
66 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
67 if (error < 0)
68 return error;
69 isi->nr_extents += error;
70 isi->nr_pages += nr_pages;
71 return 0;
72}
73
Christoph Hellwigad89b662021-03-26 10:55:40 -070074static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str)
75{
76 char *buf, *p = ERR_PTR(-ENOMEM);
77
78 buf = kmalloc(PATH_MAX, GFP_KERNEL);
79 if (buf)
80 p = file_path(isi->file, buf, PATH_MAX);
81 pr_err("swapon: file %s %s\n", IS_ERR(p) ? "<unknown>" : p, str);
82 kfree(buf);
83 return -EINVAL;
84}
85
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -070086/*
87 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
88 * swap only cares about contiguous page-aligned physical extents and makes no
89 * distinction between written and unwritten extents.
90 */
91static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -070092 loff_t count, void *data, struct iomap *iomap,
93 struct iomap *srcmap)
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -070094{
95 struct iomap_swapfile_info *isi = data;
96 int error;
97
98 switch (iomap->type) {
99 case IOMAP_MAPPED:
100 case IOMAP_UNWRITTEN:
101 /* Only real or unwritten extents. */
102 break;
103 case IOMAP_INLINE:
104 /* No inline data. */
Christoph Hellwigad89b662021-03-26 10:55:40 -0700105 return iomap_swapfile_fail(isi, "is inline");
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -0700106 default:
Christoph Hellwigad89b662021-03-26 10:55:40 -0700107 return iomap_swapfile_fail(isi, "has unallocated extents");
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -0700108 }
109
110 /* No uncommitted metadata or shared blocks. */
Christoph Hellwigad89b662021-03-26 10:55:40 -0700111 if (iomap->flags & IOMAP_F_DIRTY)
112 return iomap_swapfile_fail(isi, "is not committed");
113 if (iomap->flags & IOMAP_F_SHARED)
114 return iomap_swapfile_fail(isi, "has shared extents");
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -0700115
116 /* Only one bdev per swap file. */
Christoph Hellwigad89b662021-03-26 10:55:40 -0700117 if (iomap->bdev != isi->sis->bdev)
118 return iomap_swapfile_fail(isi, "outside the main device");
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -0700119
120 if (isi->iomap.length == 0) {
121 /* No accumulated extent, so just store it. */
122 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
123 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
124 /* Append this to the accumulated extent. */
125 isi->iomap.length += iomap->length;
126 } else {
127 /* Otherwise, add the retained iomap and store this one. */
128 error = iomap_swapfile_add_extent(isi);
129 if (error)
130 return error;
131 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
132 }
133 return count;
134}
135
136/*
137 * Iterate a swap file's iomaps to construct physical extents that can be
138 * passed to the swapfile subsystem.
139 */
140int iomap_swapfile_activate(struct swap_info_struct *sis,
141 struct file *swap_file, sector_t *pagespan,
142 const struct iomap_ops *ops)
143{
144 struct iomap_swapfile_info isi = {
145 .sis = sis,
146 .lowest_ppage = (sector_t)-1ULL,
Christoph Hellwigad89b662021-03-26 10:55:40 -0700147 .file = swap_file,
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -0700148 };
149 struct address_space *mapping = swap_file->f_mapping;
150 struct inode *inode = mapping->host;
151 loff_t pos = 0;
152 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
153 loff_t ret;
154
155 /*
156 * Persist all file mapping metadata so that we won't have any
157 * IOMAP_F_DIRTY iomaps.
158 */
159 ret = vfs_fsync(swap_file, 1);
160 if (ret)
161 return ret;
162
163 while (len > 0) {
164 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
165 ops, &isi, iomap_swapfile_activate_actor);
166 if (ret <= 0)
167 return ret;
168
169 pos += ret;
170 len -= ret;
171 }
172
173 if (isi.iomap.length) {
174 ret = iomap_swapfile_add_extent(&isi);
175 if (ret)
176 return ret;
177 }
178
Ritesh Harjani5808fec2021-03-09 09:29:11 -0800179 /*
180 * If this swapfile doesn't contain even a single page-aligned
181 * contiguous range of blocks, reject this useless swapfile to
182 * prevent confusion later on.
183 */
184 if (isi.nr_pages == 0) {
185 pr_warn("swapon: Cannot find a single usable page in file.\n");
186 return -EINVAL;
187 }
188
Darrick J. Wonga45c0ecc2019-07-15 08:50:57 -0700189 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
190 sis->max = isi.nr_pages;
191 sis->pages = isi.nr_pages - 1;
192 sis->highest_bit = isi.nr_pages - 1;
193 return isi.nr_extents;
194}
195EXPORT_SYMBOL_GPL(iomap_swapfile_activate);