blob: 1efcb5b0c3ed422aa20b86f5980bdeb9ba26493b [file] [log] [blame]
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001/*
2 * linux/kernel/power/swap.c
3 *
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08008 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
Bojan Smojver5a21d4892012-04-29 22:42:06 +02009 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080010 *
11 * This file is released under the GPLv2.
12 *
13 */
14
Joe Perches64ec72a2017-09-27 22:01:34 -070015#define pr_fmt(fmt) "PM: " fmt
16
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080017#include <linux/module.h>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080018#include <linux/file.h>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080019#include <linux/delay.h>
20#include <linux/bitops.h>
21#include <linux/genhd.h>
22#include <linux/device.h>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080023#include <linux/bio.h>
Andrew Morton546e0d22006-09-25 23:32:44 -070024#include <linux/blkdev.h>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080025#include <linux/swap.h>
26#include <linux/swapops.h>
27#include <linux/pm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Bojan Smojverf996fc92010-09-09 23:06:23 +020029#include <linux/lzo.h>
30#include <linux/vmalloc.h>
Bojan Smojver081a9d02011-10-13 23:58:07 +020031#include <linux/cpumask.h>
32#include <linux/atomic.h>
33#include <linux/kthread.h>
34#include <linux/crc32.h>
Tina Ruchandanidb597602014-10-30 11:04:53 -070035#include <linux/ktime.h>
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080036
37#include "power.h"
38
Rafael J. Wysockibe8cd642010-12-11 21:46:44 +010039#define HIBERNATE_SIG "S1SUSPEND"
Rafael J. Wysocki61159a32006-03-23 03:00:00 -080040
Jiri Slaby51fb3522010-05-01 23:53:02 +020041/*
James Morsef6cf0542016-04-27 17:47:11 +010042 * When reading an {un,}compressed image, we may restore pages in place,
43 * in which case some architectures need these pages cleaning before they
44 * can be executed. We don't know which pages these may be, so clean the lot.
45 */
46static bool clean_pages_on_read;
47static bool clean_pages_on_decompress;
48
49/*
Jiri Slaby51fb3522010-05-01 23:53:02 +020050 * The swap map is a data structure used for keeping track of each page
51 * written to a swap partition. It consists of many swap_map_page
Cesar Eduardo Barros90133672010-06-07 22:23:12 +020052 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
Jiri Slaby51fb3522010-05-01 23:53:02 +020053 * These structures are stored on the swap and linked together with the
54 * help of the .next_swap member.
55 *
56 * The swap map is created during suspend. The swap map pages are
57 * allocated and populated one at a time, so we only need one memory
58 * page to set up the entire structure.
59 *
Bojan Smojver081a9d02011-10-13 23:58:07 +020060 * During resume we pick up all swap_map_page structures into a list.
Jiri Slaby51fb3522010-05-01 23:53:02 +020061 */
62
63#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
64
Bojan Smojverf8262d42012-04-24 23:53:28 +020065/*
66 * Number of free pages that are not high.
67 */
68static inline unsigned long low_free_pages(void)
69{
70 return nr_free_pages() - nr_free_highpages();
71}
72
73/*
74 * Number of pages required to be kept free while writing the image. Always
75 * half of all available low pages before the writing starts.
76 */
77static inline unsigned long reqd_free_pages(void)
78{
79 return low_free_pages() / 2;
80}
81
Jiri Slaby51fb3522010-05-01 23:53:02 +020082struct swap_map_page {
83 sector_t entries[MAP_PAGE_ENTRIES];
84 sector_t next_swap;
85};
86
Bojan Smojver081a9d02011-10-13 23:58:07 +020087struct swap_map_page_list {
88 struct swap_map_page *map;
89 struct swap_map_page_list *next;
90};
91
Jiri Slaby51fb3522010-05-01 23:53:02 +020092/**
93 * The swap_map_handle structure is used for handling swap in
94 * a file-alike way
95 */
96
97struct swap_map_handle {
98 struct swap_map_page *cur;
Bojan Smojver081a9d02011-10-13 23:58:07 +020099 struct swap_map_page_list *maps;
Jiri Slaby51fb3522010-05-01 23:53:02 +0200100 sector_t cur_swap;
101 sector_t first_sector;
102 unsigned int k;
Bojan Smojverf8262d42012-04-24 23:53:28 +0200103 unsigned long reqd_free_pages;
Bojan Smojver081a9d02011-10-13 23:58:07 +0200104 u32 crc32;
Jiri Slaby51fb3522010-05-01 23:53:02 +0200105};
106
Vivek Goyal1b29c162007-05-02 19:27:07 +0200107struct swsusp_header {
Bojan Smojver081a9d02011-10-13 23:58:07 +0200108 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
109 sizeof(u32)];
110 u32 crc32;
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800111 sector_t image;
Rafael J. Wysockia634cc12007-07-19 01:47:30 -0700112 unsigned int flags; /* Flags to pass to the "boot" kernel */
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800113 char orig_sig[10];
114 char sig[10];
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -0700115} __packed;
Vivek Goyal1b29c162007-05-02 19:27:07 +0200116
117static struct swsusp_header *swsusp_header;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800118
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100119/**
120 * The following functions are used for tracing the allocated
121 * swap pages, so that they can be freed in case of an error.
122 */
123
124struct swsusp_extent {
125 struct rb_node node;
126 unsigned long start;
127 unsigned long end;
128};
129
130static struct rb_root swsusp_extents = RB_ROOT;
131
132static int swsusp_extents_insert(unsigned long swap_offset)
133{
134 struct rb_node **new = &(swsusp_extents.rb_node);
135 struct rb_node *parent = NULL;
136 struct swsusp_extent *ext;
137
138 /* Figure out where to put the new node */
139 while (*new) {
Davidlohr Bueso8316bd72012-10-23 01:21:09 +0200140 ext = rb_entry(*new, struct swsusp_extent, node);
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100141 parent = *new;
142 if (swap_offset < ext->start) {
143 /* Try to merge */
144 if (swap_offset == ext->start - 1) {
145 ext->start--;
146 return 0;
147 }
148 new = &((*new)->rb_left);
149 } else if (swap_offset > ext->end) {
150 /* Try to merge */
151 if (swap_offset == ext->end + 1) {
152 ext->end++;
153 return 0;
154 }
155 new = &((*new)->rb_right);
156 } else {
157 /* It already is in the tree */
158 return -EINVAL;
159 }
160 }
161 /* Add the new node and rebalance the tree. */
162 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
163 if (!ext)
164 return -ENOMEM;
165
166 ext->start = swap_offset;
167 ext->end = swap_offset;
168 rb_link_node(&ext->node, parent, new);
169 rb_insert_color(&ext->node, &swsusp_extents);
170 return 0;
171}
172
173/**
174 * alloc_swapdev_block - allocate a swap page and register that it has
175 * been allocated, so that it can be freed in case of an error.
176 */
177
178sector_t alloc_swapdev_block(int swap)
179{
180 unsigned long offset;
181
Hugh Dickins910321e2010-09-09 16:38:07 -0700182 offset = swp_offset(get_swap_page_of_type(swap));
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100183 if (offset) {
184 if (swsusp_extents_insert(offset))
Hugh Dickins910321e2010-09-09 16:38:07 -0700185 swap_free(swp_entry(swap, offset));
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100186 else
187 return swapdev_block(swap, offset);
188 }
189 return 0;
190}
191
192/**
193 * free_all_swap_pages - free swap pages allocated for saving image data.
Cesar Eduardo Barros90133672010-06-07 22:23:12 +0200194 * It also frees the extents used to register which swap entries had been
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100195 * allocated.
196 */
197
198void free_all_swap_pages(int swap)
199{
200 struct rb_node *node;
201
202 while ((node = swsusp_extents.rb_node)) {
203 struct swsusp_extent *ext;
204 unsigned long offset;
205
Geliang Tang47087ee2016-12-19 23:03:11 +0800206 ext = rb_entry(node, struct swsusp_extent, node);
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100207 rb_erase(node, &swsusp_extents);
208 for (offset = ext->start; offset <= ext->end; offset++)
Hugh Dickins910321e2010-09-09 16:38:07 -0700209 swap_free(swp_entry(swap, offset));
Nigel Cunningham0414f2e2009-12-06 16:15:53 +0100210
211 kfree(ext);
212 }
213}
214
215int swsusp_swap_in_use(void)
216{
217 return (swsusp_extents.rb_node != NULL);
218}
219
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800220/*
Rafael J. Wysocki3fc6b342006-12-06 20:34:09 -0800221 * General things
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800222 */
223
224static unsigned short root_swap = 0xffff;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200225static struct block_device *hib_resume_bdev;
226
227struct hib_bio_batch {
228 atomic_t count;
229 wait_queue_head_t wait;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200230 blk_status_t error;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200231};
232
233static void hib_init_batch(struct hib_bio_batch *hb)
234{
235 atomic_set(&hb->count, 0);
236 init_waitqueue_head(&hb->wait);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200237 hb->error = BLK_STS_OK;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200238}
239
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200240static void hib_end_io(struct bio *bio)
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200241{
242 struct hib_bio_batch *hb = bio->bi_private;
Ming Lei263663c2017-12-18 20:22:04 +0800243 struct page *page = bio_first_page_all(bio);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200244
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200245 if (bio->bi_status) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700246 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
247 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
248 (unsigned long long)bio->bi_iter.bi_sector);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200249 }
250
251 if (bio_data_dir(bio) == WRITE)
252 put_page(page);
James Morsef6cf0542016-04-27 17:47:11 +0100253 else if (clean_pages_on_read)
254 flush_icache_range((unsigned long)page_address(page),
255 (unsigned long)page_address(page) + PAGE_SIZE);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200256
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200257 if (bio->bi_status && !hb->error)
258 hb->error = bio->bi_status;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200259 if (atomic_dec_and_test(&hb->count))
260 wake_up(&hb->wait);
261
262 bio_put(bio);
263}
264
Mike Christie162b99e2016-06-05 14:32:02 -0500265static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200266 struct hib_bio_batch *hb)
267{
268 struct page *page = virt_to_page(addr);
269 struct bio *bio;
270 int error = 0;
271
Christoph Hellwig0eb0b632018-05-09 09:54:08 +0200272 bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200273 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200274 bio_set_dev(bio, hib_resume_bdev);
Mike Christie162b99e2016-06-05 14:32:02 -0500275 bio_set_op_attrs(bio, op, op_flags);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200276
277 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700278 pr_err("Adding page to bio failed at %llu\n",
279 (unsigned long long)bio->bi_iter.bi_sector);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200280 bio_put(bio);
281 return -EFAULT;
282 }
283
284 if (hb) {
285 bio->bi_end_io = hib_end_io;
286 bio->bi_private = hb;
287 atomic_inc(&hb->count);
Mike Christie4e49ea42016-06-05 14:31:41 -0500288 submit_bio(bio);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200289 } else {
Mike Christie4e49ea42016-06-05 14:31:41 -0500290 error = submit_bio_wait(bio);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200291 bio_put(bio);
292 }
293
294 return error;
295}
296
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200297static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200298{
299 wait_event(hb->wait, atomic_read(&hb->count) == 0);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200300 return blk_status_to_errno(hb->error);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200301}
Rafael J. Wysocki3fc6b342006-12-06 20:34:09 -0800302
Rafael J. Wysocki3fc6b342006-12-06 20:34:09 -0800303/*
304 * Saving part
305 */
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800306
Jiri Slaby51fb3522010-05-01 23:53:02 +0200307static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800308{
309 int error;
310
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600311 hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
Mike Christie162b99e2016-06-05 14:32:02 -0500312 swsusp_header, NULL);
Vivek Goyal1b29c162007-05-02 19:27:07 +0200313 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
314 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
315 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
Rafael J. Wysocki3624eb02010-10-04 22:08:12 +0200316 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
Jiri Slaby51fb3522010-05-01 23:53:02 +0200317 swsusp_header->image = handle->first_sector;
Rafael J. Wysockia634cc12007-07-19 01:47:30 -0700318 swsusp_header->flags = flags;
Bojan Smojver081a9d02011-10-13 23:58:07 +0200319 if (flags & SF_CRC32_MODE)
320 swsusp_header->crc32 = handle->crc32;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600321 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
Mike Christie162b99e2016-06-05 14:32:02 -0500322 swsusp_resume_block, swsusp_header, NULL);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800323 } else {
Joe Perches64ec72a2017-09-27 22:01:34 -0700324 pr_err("Swap header not found!\n");
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800325 error = -ENODEV;
326 }
327 return error;
328}
329
330/**
331 * swsusp_swap_check - check if the resume device is a swap device
332 * and get its index (if so)
Jiri Slaby6f612af2010-05-01 23:54:02 +0200333 *
334 * This is called before saving image
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800335 */
Jiri Slaby6f612af2010-05-01 23:54:02 +0200336static int swsusp_swap_check(void)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800337{
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800338 int res;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800339
Rafael J. Wysocki7bf23682007-01-05 16:36:28 -0800340 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
Jiri Slaby8a0d6132010-05-01 23:52:34 +0200341 &hib_resume_bdev);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800342 if (res < 0)
343 return res;
344
345 root_swap = res;
Tejun Heoe525fd82010-11-13 11:55:17 +0100346 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
Rafael J. Wysocki7bf23682007-01-05 16:36:28 -0800347 if (res)
348 return res;
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800349
Jiri Slaby8a0d6132010-05-01 23:52:34 +0200350 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800351 if (res < 0)
Jiri Slaby8a0d6132010-05-01 23:52:34 +0200352 blkdev_put(hib_resume_bdev, FMODE_WRITE);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800353
Chen Yufe12c002016-07-22 10:30:47 +0800354 /*
355 * Update the resume device to the one actually used,
356 * so the test_resume mode can use it in case it is
357 * invoked from hibernate() to test the snapshot.
358 */
359 swsusp_resume_device = hib_resume_bdev->bd_dev;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800360 return res;
361}
362
363/**
364 * write_page - Write one page to given swap location.
365 * @buf: Address we're writing.
366 * @offset: Offset of the swap page we're writing to.
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200367 * @hb: bio completion batch
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800368 */
369
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200370static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800371{
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800372 void *src;
Bojan Smojver081a9d02011-10-13 23:58:07 +0200373 int ret;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800374
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800375 if (!offset)
376 return -ENOSPC;
Andrew Mortonab954162006-09-25 23:32:42 -0700377
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200378 if (hb) {
Christoph Hellwig0eb0b632018-05-09 09:54:08 +0200379 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
Bojan Smojver5a21d4892012-04-29 22:42:06 +0200380 __GFP_NORETRY);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800381 if (src) {
Jan Beulich3ecb01d2010-10-26 14:22:27 -0700382 copy_page(src, buf);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800383 } else {
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200384 ret = hib_wait_io(hb); /* Free pages */
Bojan Smojver081a9d02011-10-13 23:58:07 +0200385 if (ret)
386 return ret;
Christoph Hellwig0eb0b632018-05-09 09:54:08 +0200387 src = (void *)__get_free_page(GFP_NOIO |
Bojan Smojver5a21d4892012-04-29 22:42:06 +0200388 __GFP_NOWARN |
389 __GFP_NORETRY);
Bojan Smojver081a9d02011-10-13 23:58:07 +0200390 if (src) {
391 copy_page(src, buf);
392 } else {
393 WARN_ON_ONCE(1);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200394 hb = NULL; /* Go synchronous */
Bojan Smojver081a9d02011-10-13 23:58:07 +0200395 src = buf;
396 }
Andrew Mortonab954162006-09-25 23:32:42 -0700397 }
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800398 } else {
399 src = buf;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800400 }
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600401 return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800402}
403
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800404static void release_swap_writer(struct swap_map_handle *handle)
405{
406 if (handle->cur)
407 free_page((unsigned long)handle->cur);
408 handle->cur = NULL;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800409}
410
411static int get_swap_writer(struct swap_map_handle *handle)
412{
Jiri Slaby6f612af2010-05-01 23:54:02 +0200413 int ret;
414
415 ret = swsusp_swap_check();
416 if (ret) {
417 if (ret != -ENOSPC)
Joe Perches64ec72a2017-09-27 22:01:34 -0700418 pr_err("Cannot find swap device, try swapon -a\n");
Jiri Slaby6f612af2010-05-01 23:54:02 +0200419 return ret;
420 }
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800421 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
Jiri Slaby6f612af2010-05-01 23:54:02 +0200422 if (!handle->cur) {
423 ret = -ENOMEM;
424 goto err_close;
425 }
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700426 handle->cur_swap = alloc_swapdev_block(root_swap);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800427 if (!handle->cur_swap) {
Jiri Slaby6f612af2010-05-01 23:54:02 +0200428 ret = -ENOSPC;
429 goto err_rel;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800430 }
431 handle->k = 0;
Bojan Smojverf8262d42012-04-24 23:53:28 +0200432 handle->reqd_free_pages = reqd_free_pages();
Jiri Slaby51fb3522010-05-01 23:53:02 +0200433 handle->first_sector = handle->cur_swap;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800434 return 0;
Jiri Slaby6f612af2010-05-01 23:54:02 +0200435err_rel:
436 release_swap_writer(handle);
437err_close:
438 swsusp_close(FMODE_WRITE);
439 return ret;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800440}
441
Andrew Mortonab954162006-09-25 23:32:42 -0700442static int swap_write_page(struct swap_map_handle *handle, void *buf,
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200443 struct hib_bio_batch *hb)
Andrew Mortonab954162006-09-25 23:32:42 -0700444{
445 int error = 0;
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800446 sector_t offset;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800447
448 if (!handle->cur)
449 return -EINVAL;
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700450 offset = alloc_swapdev_block(root_swap);
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200451 error = write_page(buf, offset, hb);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800452 if (error)
453 return error;
454 handle->cur->entries[handle->k++] = offset;
455 if (handle->k >= MAP_PAGE_ENTRIES) {
Rafael J. Wysockid1d241c2007-05-06 14:50:47 -0700456 offset = alloc_swapdev_block(root_swap);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800457 if (!offset)
458 return -ENOSPC;
459 handle->cur->next_swap = offset;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200460 error = write_page(handle->cur, handle->cur_swap, hb);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800461 if (error)
Andrew Mortonab954162006-09-25 23:32:42 -0700462 goto out;
Jan Beulich3ecb01d2010-10-26 14:22:27 -0700463 clear_page(handle->cur);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800464 handle->cur_swap = offset;
465 handle->k = 0;
Bojan Smojver5a21d4892012-04-29 22:42:06 +0200466
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200467 if (hb && low_free_pages() <= handle->reqd_free_pages) {
468 error = hib_wait_io(hb);
Bojan Smojver5a21d4892012-04-29 22:42:06 +0200469 if (error)
470 goto out;
471 /*
472 * Recalculate the number of required free pages, to
473 * make sure we never take more than half.
474 */
475 handle->reqd_free_pages = reqd_free_pages();
476 }
Bojan Smojver081a9d02011-10-13 23:58:07 +0200477 }
Rafael J. Wysocki59a493352006-12-06 20:34:44 -0800478 out:
Andrew Mortonab954162006-09-25 23:32:42 -0700479 return error;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800480}
481
482static int flush_swap_writer(struct swap_map_handle *handle)
483{
484 if (handle->cur && handle->cur_swap)
Andrew Mortonab954162006-09-25 23:32:42 -0700485 return write_page(handle->cur, handle->cur_swap, NULL);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800486 else
487 return -EINVAL;
488}
489
Jiri Slaby6f612af2010-05-01 23:54:02 +0200490static int swap_writer_finish(struct swap_map_handle *handle,
491 unsigned int flags, int error)
492{
493 if (!error) {
494 flush_swap_writer(handle);
Joe Perches64ec72a2017-09-27 22:01:34 -0700495 pr_info("S");
Jiri Slaby6f612af2010-05-01 23:54:02 +0200496 error = mark_swapfiles(handle, flags);
Joe Perches64ec72a2017-09-27 22:01:34 -0700497 pr_cont("|\n");
Jiri Slaby6f612af2010-05-01 23:54:02 +0200498 }
499
500 if (error)
501 free_all_swap_pages(root_swap);
502 release_swap_writer(handle);
503 swsusp_close(FMODE_WRITE);
504
505 return error;
506}
507
Bojan Smojverf996fc92010-09-09 23:06:23 +0200508/* We need to remember how much compressed data we need to read. */
509#define LZO_HEADER sizeof(size_t)
510
511/* Number of pages/bytes we'll compress at one time. */
512#define LZO_UNC_PAGES 32
513#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
514
515/* Number of pages/bytes we need for compressed data (worst case). */
516#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
517 LZO_HEADER, PAGE_SIZE)
518#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
519
Bojan Smojver081a9d02011-10-13 23:58:07 +0200520/* Maximum number of threads for compression/decompression. */
521#define LZO_THREADS 3
522
Bojan Smojver5a21d4892012-04-29 22:42:06 +0200523/* Minimum/maximum number of pages for read buffering. */
524#define LZO_MIN_RD_PAGES 1024
525#define LZO_MAX_RD_PAGES 8192
Bojan Smojver081a9d02011-10-13 23:58:07 +0200526
527
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800528/**
529 * save_image - save the suspend image data
530 */
531
532static int save_image(struct swap_map_handle *handle,
533 struct snapshot_handle *snapshot,
Andrew Morton3a4f7572006-09-25 23:32:41 -0700534 unsigned int nr_to_write)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800535{
536 unsigned int m;
537 int ret;
Andrew Morton3a4f7572006-09-25 23:32:41 -0700538 int nr_pages;
Andrew Mortonab954162006-09-25 23:32:42 -0700539 int err2;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200540 struct hib_bio_batch hb;
Tina Ruchandanidb597602014-10-30 11:04:53 -0700541 ktime_t start;
542 ktime_t stop;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800543
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200544 hib_init_batch(&hb);
545
Joe Perches64ec72a2017-09-27 22:01:34 -0700546 pr_info("Saving image data pages (%u pages)...\n",
Rafael J. Wysocki23976722007-12-08 02:09:43 +0100547 nr_to_write);
Bojan Smojverd8150d32012-06-21 22:27:24 +0200548 m = nr_to_write / 10;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800549 if (!m)
550 m = 1;
551 nr_pages = 0;
Tina Ruchandanidb597602014-10-30 11:04:53 -0700552 start = ktime_get();
Jiri Slaby4ff277f2009-10-28 22:55:33 +0100553 while (1) {
Jiri Slabyd3c1b242010-05-01 23:52:02 +0200554 ret = snapshot_read_next(snapshot);
Jiri Slaby4ff277f2009-10-28 22:55:33 +0100555 if (ret <= 0)
556 break;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200557 ret = swap_write_page(handle, data_of(*snapshot), &hb);
Jiri Slaby4ff277f2009-10-28 22:55:33 +0100558 if (ret)
559 break;
560 if (!(nr_pages % m))
Joe Perches64ec72a2017-09-27 22:01:34 -0700561 pr_info("Image saving progress: %3d%%\n",
562 nr_pages / m * 10);
Jiri Slaby4ff277f2009-10-28 22:55:33 +0100563 nr_pages++;
564 }
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200565 err2 = hib_wait_io(&hb);
Tina Ruchandanidb597602014-10-30 11:04:53 -0700566 stop = ktime_get();
Jiri Slaby4ff277f2009-10-28 22:55:33 +0100567 if (!ret)
568 ret = err2;
569 if (!ret)
Joe Perches64ec72a2017-09-27 22:01:34 -0700570 pr_info("Image saving done\n");
Tina Ruchandanidb597602014-10-30 11:04:53 -0700571 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
Jiri Slaby4ff277f2009-10-28 22:55:33 +0100572 return ret;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800573}
574
Bojan Smojver081a9d02011-10-13 23:58:07 +0200575/**
576 * Structure used for CRC32.
577 */
578struct crc_data {
579 struct task_struct *thr; /* thread */
580 atomic_t ready; /* ready to start flag */
581 atomic_t stop; /* ready to stop flag */
582 unsigned run_threads; /* nr current threads */
583 wait_queue_head_t go; /* start crc update */
584 wait_queue_head_t done; /* crc update done */
585 u32 *crc32; /* points to handle's crc32 */
586 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
587 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
588};
589
590/**
591 * CRC32 update function that runs in its own thread.
592 */
593static int crc32_threadfn(void *data)
594{
595 struct crc_data *d = data;
596 unsigned i;
597
598 while (1) {
599 wait_event(d->go, atomic_read(&d->ready) ||
600 kthread_should_stop());
601 if (kthread_should_stop()) {
602 d->thr = NULL;
603 atomic_set(&d->stop, 1);
604 wake_up(&d->done);
605 break;
606 }
607 atomic_set(&d->ready, 0);
608
609 for (i = 0; i < d->run_threads; i++)
610 *d->crc32 = crc32_le(*d->crc32,
611 d->unc[i], *d->unc_len[i]);
612 atomic_set(&d->stop, 1);
613 wake_up(&d->done);
614 }
615 return 0;
616}
617/**
618 * Structure used for LZO data compression.
619 */
620struct cmp_data {
621 struct task_struct *thr; /* thread */
622 atomic_t ready; /* ready to start flag */
623 atomic_t stop; /* ready to stop flag */
624 int ret; /* return code */
625 wait_queue_head_t go; /* start compression */
626 wait_queue_head_t done; /* compression done */
627 size_t unc_len; /* uncompressed length */
628 size_t cmp_len; /* compressed length */
629 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
630 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
631 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
632};
633
634/**
635 * Compression function that runs in its own thread.
636 */
637static int lzo_compress_threadfn(void *data)
638{
639 struct cmp_data *d = data;
640
641 while (1) {
642 wait_event(d->go, atomic_read(&d->ready) ||
643 kthread_should_stop());
644 if (kthread_should_stop()) {
645 d->thr = NULL;
646 d->ret = -1;
647 atomic_set(&d->stop, 1);
648 wake_up(&d->done);
649 break;
650 }
651 atomic_set(&d->ready, 0);
652
653 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
654 d->cmp + LZO_HEADER, &d->cmp_len,
655 d->wrk);
656 atomic_set(&d->stop, 1);
657 wake_up(&d->done);
658 }
659 return 0;
660}
Bojan Smojverf996fc92010-09-09 23:06:23 +0200661
662/**
663 * save_image_lzo - Save the suspend image data compressed with LZO.
Niv Yehezkel057b0a72014-05-31 06:26:01 -0400664 * @handle: Swap map handle to use for saving the image.
Bojan Smojverf996fc92010-09-09 23:06:23 +0200665 * @snapshot: Image to read data from.
666 * @nr_to_write: Number of pages to save.
667 */
668static int save_image_lzo(struct swap_map_handle *handle,
669 struct snapshot_handle *snapshot,
670 unsigned int nr_to_write)
671{
672 unsigned int m;
673 int ret = 0;
674 int nr_pages;
675 int err2;
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200676 struct hib_bio_batch hb;
Tina Ruchandanidb597602014-10-30 11:04:53 -0700677 ktime_t start;
678 ktime_t stop;
Bojan Smojver081a9d02011-10-13 23:58:07 +0200679 size_t off;
680 unsigned thr, run_threads, nr_threads;
681 unsigned char *page = NULL;
682 struct cmp_data *data = NULL;
683 struct crc_data *crc = NULL;
684
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200685 hib_init_batch(&hb);
686
Bojan Smojver081a9d02011-10-13 23:58:07 +0200687 /*
688 * We'll limit the number of threads for compression to limit memory
689 * footprint.
690 */
691 nr_threads = num_online_cpus() - 1;
692 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200693
Christoph Hellwig0eb0b632018-05-09 09:54:08 +0200694 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200695 if (!page) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700696 pr_err("Failed to allocate LZO page\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200697 ret = -ENOMEM;
698 goto out_clean;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200699 }
700
Bojan Smojver081a9d02011-10-13 23:58:07 +0200701 data = vmalloc(sizeof(*data) * nr_threads);
702 if (!data) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700703 pr_err("Failed to allocate LZO data\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200704 ret = -ENOMEM;
705 goto out_clean;
706 }
707 for (thr = 0; thr < nr_threads; thr++)
708 memset(&data[thr], 0, offsetof(struct cmp_data, go));
709
710 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
711 if (!crc) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700712 pr_err("Failed to allocate crc\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200713 ret = -ENOMEM;
714 goto out_clean;
715 }
716 memset(crc, 0, offsetof(struct crc_data, go));
717
718 /*
719 * Start the compression threads.
720 */
721 for (thr = 0; thr < nr_threads; thr++) {
722 init_waitqueue_head(&data[thr].go);
723 init_waitqueue_head(&data[thr].done);
724
725 data[thr].thr = kthread_run(lzo_compress_threadfn,
726 &data[thr],
727 "image_compress/%u", thr);
728 if (IS_ERR(data[thr].thr)) {
729 data[thr].thr = NULL;
Joe Perches64ec72a2017-09-27 22:01:34 -0700730 pr_err("Cannot start compression threads\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200731 ret = -ENOMEM;
732 goto out_clean;
733 }
Bojan Smojverf996fc92010-09-09 23:06:23 +0200734 }
735
Bojan Smojver081a9d02011-10-13 23:58:07 +0200736 /*
Bojan Smojver081a9d02011-10-13 23:58:07 +0200737 * Start the CRC32 thread.
738 */
739 init_waitqueue_head(&crc->go);
740 init_waitqueue_head(&crc->done);
741
742 handle->crc32 = 0;
743 crc->crc32 = &handle->crc32;
744 for (thr = 0; thr < nr_threads; thr++) {
745 crc->unc[thr] = data[thr].unc;
746 crc->unc_len[thr] = &data[thr].unc_len;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200747 }
748
Bojan Smojver081a9d02011-10-13 23:58:07 +0200749 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
750 if (IS_ERR(crc->thr)) {
751 crc->thr = NULL;
Joe Perches64ec72a2017-09-27 22:01:34 -0700752 pr_err("Cannot start CRC32 thread\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200753 ret = -ENOMEM;
754 goto out_clean;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200755 }
756
Bojan Smojver5a21d4892012-04-29 22:42:06 +0200757 /*
758 * Adjust the number of required free pages after all allocations have
759 * been done. We don't want to run out of pages when writing.
760 */
761 handle->reqd_free_pages = reqd_free_pages();
762
Joe Perches64ec72a2017-09-27 22:01:34 -0700763 pr_info("Using %u thread(s) for compression\n", nr_threads);
764 pr_info("Compressing and saving image data (%u pages)...\n",
765 nr_to_write);
Bojan Smojverd8150d32012-06-21 22:27:24 +0200766 m = nr_to_write / 10;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200767 if (!m)
768 m = 1;
769 nr_pages = 0;
Tina Ruchandanidb597602014-10-30 11:04:53 -0700770 start = ktime_get();
Bojan Smojverf996fc92010-09-09 23:06:23 +0200771 for (;;) {
Bojan Smojver081a9d02011-10-13 23:58:07 +0200772 for (thr = 0; thr < nr_threads; thr++) {
773 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
774 ret = snapshot_read_next(snapshot);
775 if (ret < 0)
776 goto out_finish;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200777
Bojan Smojver081a9d02011-10-13 23:58:07 +0200778 if (!ret)
779 break;
780
781 memcpy(data[thr].unc + off,
782 data_of(*snapshot), PAGE_SIZE);
783
784 if (!(nr_pages % m))
Joe Perches64ec72a2017-09-27 22:01:34 -0700785 pr_info("Image saving progress: %3d%%\n",
786 nr_pages / m * 10);
Bojan Smojver081a9d02011-10-13 23:58:07 +0200787 nr_pages++;
788 }
789 if (!off)
Bojan Smojverf996fc92010-09-09 23:06:23 +0200790 break;
791
Bojan Smojver081a9d02011-10-13 23:58:07 +0200792 data[thr].unc_len = off;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200793
Bojan Smojver081a9d02011-10-13 23:58:07 +0200794 atomic_set(&data[thr].ready, 1);
795 wake_up(&data[thr].go);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200796 }
797
Bojan Smojver081a9d02011-10-13 23:58:07 +0200798 if (!thr)
Bojan Smojverf996fc92010-09-09 23:06:23 +0200799 break;
800
Bojan Smojver081a9d02011-10-13 23:58:07 +0200801 crc->run_threads = thr;
802 atomic_set(&crc->ready, 1);
803 wake_up(&crc->go);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200804
Bojan Smojver081a9d02011-10-13 23:58:07 +0200805 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
806 wait_event(data[thr].done,
807 atomic_read(&data[thr].stop));
808 atomic_set(&data[thr].stop, 0);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200809
Bojan Smojver081a9d02011-10-13 23:58:07 +0200810 ret = data[thr].ret;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200811
Bojan Smojver081a9d02011-10-13 23:58:07 +0200812 if (ret < 0) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700813 pr_err("LZO compression failed\n");
Bojan Smojverf996fc92010-09-09 23:06:23 +0200814 goto out_finish;
Bojan Smojver081a9d02011-10-13 23:58:07 +0200815 }
816
817 if (unlikely(!data[thr].cmp_len ||
818 data[thr].cmp_len >
819 lzo1x_worst_compress(data[thr].unc_len))) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700820 pr_err("Invalid LZO compressed length\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200821 ret = -1;
822 goto out_finish;
823 }
824
825 *(size_t *)data[thr].cmp = data[thr].cmp_len;
826
827 /*
828 * Given we are writing one page at a time to disk, we
829 * copy that much from the buffer, although the last
830 * bit will likely be smaller than full page. This is
831 * OK - we saved the length of the compressed data, so
832 * any garbage at the end will be discarded when we
833 * read it.
834 */
835 for (off = 0;
836 off < LZO_HEADER + data[thr].cmp_len;
837 off += PAGE_SIZE) {
838 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
839
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200840 ret = swap_write_page(handle, page, &hb);
Bojan Smojver081a9d02011-10-13 23:58:07 +0200841 if (ret)
842 goto out_finish;
843 }
Bojan Smojverf996fc92010-09-09 23:06:23 +0200844 }
Bojan Smojver081a9d02011-10-13 23:58:07 +0200845
846 wait_event(crc->done, atomic_read(&crc->stop));
847 atomic_set(&crc->stop, 0);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200848 }
849
850out_finish:
Christoph Hellwig343df3c2015-05-19 09:23:23 +0200851 err2 = hib_wait_io(&hb);
Tina Ruchandanidb597602014-10-30 11:04:53 -0700852 stop = ktime_get();
Bojan Smojverf996fc92010-09-09 23:06:23 +0200853 if (!ret)
854 ret = err2;
Bojan Smojverd8150d32012-06-21 22:27:24 +0200855 if (!ret)
Joe Perches64ec72a2017-09-27 22:01:34 -0700856 pr_info("Image saving done\n");
Tina Ruchandanidb597602014-10-30 11:04:53 -0700857 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
Bojan Smojver081a9d02011-10-13 23:58:07 +0200858out_clean:
859 if (crc) {
860 if (crc->thr)
861 kthread_stop(crc->thr);
862 kfree(crc);
863 }
864 if (data) {
865 for (thr = 0; thr < nr_threads; thr++)
866 if (data[thr].thr)
867 kthread_stop(data[thr].thr);
868 vfree(data);
869 }
870 if (page) free_page((unsigned long)page);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200871
872 return ret;
873}
874
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800875/**
876 * enough_swap - Make sure we have enough swap to save the image.
877 *
878 * Returns TRUE or FALSE after checking the total amount of swap
879 * space avaiable from the resume partition.
880 */
881
Kyungsik Lee8ffdfe32018-01-16 10:19:43 +0900882static int enough_swap(unsigned int nr_pages)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800883{
884 unsigned int free_swap = count_swap_pages(root_swap, 1);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200885 unsigned int required;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800886
Joe Perches64ec72a2017-09-27 22:01:34 -0700887 pr_debug("Free swap pages: %u\n", free_swap);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200888
Barry Songee34a372012-01-09 12:56:23 +0800889 required = PAGES_FOR_IO + nr_pages;
Bojan Smojverf996fc92010-09-09 23:06:23 +0200890 return free_swap > required;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800891}
892
893/**
894 * swsusp_write - Write entire image and metadata.
Rafael J. Wysockia634cc12007-07-19 01:47:30 -0700895 * @flags: flags to pass to the "boot" kernel in the image header
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800896 *
897 * It is important _NOT_ to umount filesystems at this point. We want
898 * them synced (in case something goes wrong) but we DO not want to mark
899 * filesystem clean: it is not. (And it does not matter, if we resume
900 * correctly, we'll mark system clean, anyway.)
901 */
902
Rafael J. Wysockia634cc12007-07-19 01:47:30 -0700903int swsusp_write(unsigned int flags)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800904{
905 struct swap_map_handle handle;
906 struct snapshot_handle snapshot;
907 struct swsusp_info *header;
Jiri Slaby6f612af2010-05-01 23:54:02 +0200908 unsigned long pages;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800909 int error;
910
Jiri Slaby6f612af2010-05-01 23:54:02 +0200911 pages = snapshot_get_image_size();
912 error = get_swap_writer(&handle);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800913 if (error) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700914 pr_err("Cannot get swap writer\n");
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800915 return error;
916 }
Barry Songee34a372012-01-09 12:56:23 +0800917 if (flags & SF_NOCOMPRESS_MODE) {
Kyungsik Lee8ffdfe32018-01-16 10:19:43 +0900918 if (!enough_swap(pages)) {
Joe Perches64ec72a2017-09-27 22:01:34 -0700919 pr_err("Not enough free swap\n");
Barry Songee34a372012-01-09 12:56:23 +0800920 error = -ENOSPC;
921 goto out_finish;
922 }
Jiri Slaby6f612af2010-05-01 23:54:02 +0200923 }
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800924 memset(&snapshot, 0, sizeof(struct snapshot_handle));
Jiri Slabyd3c1b242010-05-01 23:52:02 +0200925 error = snapshot_read_next(&snapshot);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800926 if (error < PAGE_SIZE) {
927 if (error >= 0)
928 error = -EFAULT;
929
Jiri Slaby6f612af2010-05-01 23:54:02 +0200930 goto out_finish;
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800931 }
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800932 header = (struct swsusp_info *)data_of(snapshot);
Jiri Slaby6f612af2010-05-01 23:54:02 +0200933 error = swap_write_page(&handle, header, NULL);
Bojan Smojverf996fc92010-09-09 23:06:23 +0200934 if (!error) {
935 error = (flags & SF_NOCOMPRESS_MODE) ?
936 save_image(&handle, &snapshot, pages - 1) :
937 save_image_lzo(&handle, &snapshot, pages - 1);
938 }
Jiri Slaby6f612af2010-05-01 23:54:02 +0200939out_finish:
940 error = swap_writer_finish(&handle, flags, error);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800941 return error;
942}
943
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800944/**
945 * The following functions allow us to read data using a swap map
946 * in a file-alike way
947 */
948
949static void release_swap_reader(struct swap_map_handle *handle)
950{
Bojan Smojver081a9d02011-10-13 23:58:07 +0200951 struct swap_map_page_list *tmp;
952
953 while (handle->maps) {
954 if (handle->maps->map)
955 free_page((unsigned long)handle->maps->map);
956 tmp = handle->maps;
957 handle->maps = handle->maps->next;
958 kfree(tmp);
959 }
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800960 handle->cur = NULL;
961}
962
Jiri Slaby6f612af2010-05-01 23:54:02 +0200963static int get_swap_reader(struct swap_map_handle *handle,
964 unsigned int *flags_p)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800965{
966 int error;
Bojan Smojver081a9d02011-10-13 23:58:07 +0200967 struct swap_map_page_list *tmp, *last;
968 sector_t offset;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800969
Jiri Slaby6f612af2010-05-01 23:54:02 +0200970 *flags_p = swsusp_header->flags;
971
972 if (!swsusp_header->image) /* how can this happen? */
Rafael J. Wysocki61159a32006-03-23 03:00:00 -0800973 return -EINVAL;
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800974
Bojan Smojver081a9d02011-10-13 23:58:07 +0200975 handle->cur = NULL;
976 last = handle->maps = NULL;
977 offset = swsusp_header->image;
978 while (offset) {
979 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
980 if (!tmp) {
981 release_swap_reader(handle);
982 return -ENOMEM;
983 }
984 memset(tmp, 0, sizeof(*tmp));
985 if (!handle->maps)
986 handle->maps = tmp;
987 if (last)
988 last->next = tmp;
989 last = tmp;
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800990
Bojan Smojver081a9d02011-10-13 23:58:07 +0200991 tmp->map = (struct swap_map_page *)
Christoph Hellwig0eb0b632018-05-09 09:54:08 +0200992 __get_free_page(GFP_NOIO | __GFP_HIGH);
Bojan Smojver081a9d02011-10-13 23:58:07 +0200993 if (!tmp->map) {
994 release_swap_reader(handle);
995 return -ENOMEM;
996 }
997
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600998 error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
Bojan Smojver081a9d02011-10-13 23:58:07 +0200999 if (error) {
1000 release_swap_reader(handle);
1001 return error;
1002 }
1003 offset = tmp->map->next_swap;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001004 }
1005 handle->k = 0;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001006 handle->cur = handle->maps->map;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001007 return 0;
1008}
1009
Andrew Morton546e0d22006-09-25 23:32:44 -07001010static int swap_read_page(struct swap_map_handle *handle, void *buf,
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001011 struct hib_bio_batch *hb)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001012{
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -08001013 sector_t offset;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001014 int error;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001015 struct swap_map_page_list *tmp;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001016
1017 if (!handle->cur)
1018 return -EINVAL;
1019 offset = handle->cur->entries[handle->k];
1020 if (!offset)
1021 return -EFAULT;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001022 error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001023 if (error)
1024 return error;
1025 if (++handle->k >= MAP_PAGE_ENTRIES) {
1026 handle->k = 0;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001027 free_page((unsigned long)handle->maps->map);
1028 tmp = handle->maps;
1029 handle->maps = handle->maps->next;
1030 kfree(tmp);
1031 if (!handle->maps)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001032 release_swap_reader(handle);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001033 else
1034 handle->cur = handle->maps->map;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001035 }
1036 return error;
1037}
1038
Jiri Slaby6f612af2010-05-01 23:54:02 +02001039static int swap_reader_finish(struct swap_map_handle *handle)
1040{
1041 release_swap_reader(handle);
1042
1043 return 0;
1044}
1045
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001046/**
1047 * load_image - load the image using the swap map handle
1048 * @handle and the snapshot handle @snapshot
1049 * (assume there are @nr_pages pages to load)
1050 */
1051
1052static int load_image(struct swap_map_handle *handle,
1053 struct snapshot_handle *snapshot,
Andrew Morton546e0d22006-09-25 23:32:44 -07001054 unsigned int nr_to_read)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001055{
1056 unsigned int m;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001057 int ret = 0;
Tina Ruchandanidb597602014-10-30 11:04:53 -07001058 ktime_t start;
1059 ktime_t stop;
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001060 struct hib_bio_batch hb;
Andrew Morton546e0d22006-09-25 23:32:44 -07001061 int err2;
1062 unsigned nr_pages;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001063
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001064 hib_init_batch(&hb);
1065
James Morsef6cf0542016-04-27 17:47:11 +01001066 clean_pages_on_read = true;
Joe Perches64ec72a2017-09-27 22:01:34 -07001067 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
Bojan Smojverd8150d32012-06-21 22:27:24 +02001068 m = nr_to_read / 10;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001069 if (!m)
1070 m = 1;
1071 nr_pages = 0;
Tina Ruchandanidb597602014-10-30 11:04:53 -07001072 start = ktime_get();
Andrew Morton546e0d22006-09-25 23:32:44 -07001073 for ( ; ; ) {
Bojan Smojver081a9d02011-10-13 23:58:07 +02001074 ret = snapshot_write_next(snapshot);
1075 if (ret <= 0)
Andrew Morton546e0d22006-09-25 23:32:44 -07001076 break;
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001077 ret = swap_read_page(handle, data_of(*snapshot), &hb);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001078 if (ret)
Andrew Morton546e0d22006-09-25 23:32:44 -07001079 break;
1080 if (snapshot->sync_read)
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001081 ret = hib_wait_io(&hb);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001082 if (ret)
Andrew Morton546e0d22006-09-25 23:32:44 -07001083 break;
1084 if (!(nr_pages % m))
Joe Perches64ec72a2017-09-27 22:01:34 -07001085 pr_info("Image loading progress: %3d%%\n",
1086 nr_pages / m * 10);
Andrew Morton546e0d22006-09-25 23:32:44 -07001087 nr_pages++;
1088 }
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001089 err2 = hib_wait_io(&hb);
Tina Ruchandanidb597602014-10-30 11:04:53 -07001090 stop = ktime_get();
Bojan Smojver081a9d02011-10-13 23:58:07 +02001091 if (!ret)
1092 ret = err2;
1093 if (!ret) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001094 pr_info("Image loading done\n");
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001095 snapshot_write_finalize(snapshot);
Con Kolivase655a252006-03-26 01:37:11 -08001096 if (!snapshot_image_loaded(snapshot))
Bojan Smojver081a9d02011-10-13 23:58:07 +02001097 ret = -ENODATA;
Bojan Smojverd8150d32012-06-21 22:27:24 +02001098 }
Tina Ruchandanidb597602014-10-30 11:04:53 -07001099 swsusp_show_speed(start, stop, nr_to_read, "Read");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001100 return ret;
1101}
1102
1103/**
1104 * Structure used for LZO data decompression.
1105 */
1106struct dec_data {
1107 struct task_struct *thr; /* thread */
1108 atomic_t ready; /* ready to start flag */
1109 atomic_t stop; /* ready to stop flag */
1110 int ret; /* return code */
1111 wait_queue_head_t go; /* start decompression */
1112 wait_queue_head_t done; /* decompression done */
1113 size_t unc_len; /* uncompressed length */
1114 size_t cmp_len; /* compressed length */
1115 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1116 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1117};
1118
1119/**
1120 * Deompression function that runs in its own thread.
1121 */
1122static int lzo_decompress_threadfn(void *data)
1123{
1124 struct dec_data *d = data;
1125
1126 while (1) {
1127 wait_event(d->go, atomic_read(&d->ready) ||
1128 kthread_should_stop());
1129 if (kthread_should_stop()) {
1130 d->thr = NULL;
1131 d->ret = -1;
1132 atomic_set(&d->stop, 1);
1133 wake_up(&d->done);
1134 break;
1135 }
1136 atomic_set(&d->ready, 0);
1137
1138 d->unc_len = LZO_UNC_SIZE;
1139 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1140 d->unc, &d->unc_len);
James Morsef6cf0542016-04-27 17:47:11 +01001141 if (clean_pages_on_decompress)
1142 flush_icache_range((unsigned long)d->unc,
1143 (unsigned long)d->unc + d->unc_len);
1144
Bojan Smojver081a9d02011-10-13 23:58:07 +02001145 atomic_set(&d->stop, 1);
1146 wake_up(&d->done);
1147 }
1148 return 0;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001149}
1150
Rafael J. Wysockia634cc12007-07-19 01:47:30 -07001151/**
Bojan Smojverf996fc92010-09-09 23:06:23 +02001152 * load_image_lzo - Load compressed image data and decompress them with LZO.
1153 * @handle: Swap map handle to use for loading data.
1154 * @snapshot: Image to copy uncompressed data into.
1155 * @nr_to_read: Number of pages to load.
1156 */
1157static int load_image_lzo(struct swap_map_handle *handle,
1158 struct snapshot_handle *snapshot,
1159 unsigned int nr_to_read)
1160{
1161 unsigned int m;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001162 int ret = 0;
1163 int eof = 0;
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001164 struct hib_bio_batch hb;
Tina Ruchandanidb597602014-10-30 11:04:53 -07001165 ktime_t start;
1166 ktime_t stop;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001167 unsigned nr_pages;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001168 size_t off;
1169 unsigned i, thr, run_threads, nr_threads;
1170 unsigned ring = 0, pg = 0, ring_size = 0,
1171 have = 0, want, need, asked = 0;
Bojan Smojver5a21d4892012-04-29 22:42:06 +02001172 unsigned long read_pages = 0;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001173 unsigned char **page = NULL;
1174 struct dec_data *data = NULL;
1175 struct crc_data *crc = NULL;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001176
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001177 hib_init_batch(&hb);
1178
Bojan Smojver081a9d02011-10-13 23:58:07 +02001179 /*
1180 * We'll limit the number of threads for decompression to limit memory
1181 * footprint.
1182 */
1183 nr_threads = num_online_cpus() - 1;
1184 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
Bojan Smojver9f339ca2010-11-25 23:41:39 +01001185
Bojan Smojver5a21d4892012-04-29 22:42:06 +02001186 page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001187 if (!page) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001188 pr_err("Failed to allocate LZO page\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001189 ret = -ENOMEM;
1190 goto out_clean;
1191 }
Bojan Smojver9f339ca2010-11-25 23:41:39 +01001192
Bojan Smojver081a9d02011-10-13 23:58:07 +02001193 data = vmalloc(sizeof(*data) * nr_threads);
1194 if (!data) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001195 pr_err("Failed to allocate LZO data\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001196 ret = -ENOMEM;
1197 goto out_clean;
1198 }
1199 for (thr = 0; thr < nr_threads; thr++)
1200 memset(&data[thr], 0, offsetof(struct dec_data, go));
1201
1202 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1203 if (!crc) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001204 pr_err("Failed to allocate crc\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001205 ret = -ENOMEM;
1206 goto out_clean;
1207 }
1208 memset(crc, 0, offsetof(struct crc_data, go));
1209
James Morsef6cf0542016-04-27 17:47:11 +01001210 clean_pages_on_decompress = true;
1211
Bojan Smojver081a9d02011-10-13 23:58:07 +02001212 /*
1213 * Start the decompression threads.
1214 */
1215 for (thr = 0; thr < nr_threads; thr++) {
1216 init_waitqueue_head(&data[thr].go);
1217 init_waitqueue_head(&data[thr].done);
1218
1219 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1220 &data[thr],
1221 "image_decompress/%u", thr);
1222 if (IS_ERR(data[thr].thr)) {
1223 data[thr].thr = NULL;
Joe Perches64ec72a2017-09-27 22:01:34 -07001224 pr_err("Cannot start decompression threads\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001225 ret = -ENOMEM;
1226 goto out_clean;
Bojan Smojver9f339ca2010-11-25 23:41:39 +01001227 }
Bojan Smojverf996fc92010-09-09 23:06:23 +02001228 }
1229
Bojan Smojver081a9d02011-10-13 23:58:07 +02001230 /*
1231 * Start the CRC32 thread.
1232 */
1233 init_waitqueue_head(&crc->go);
1234 init_waitqueue_head(&crc->done);
Bojan Smojver9f339ca2010-11-25 23:41:39 +01001235
Bojan Smojver081a9d02011-10-13 23:58:07 +02001236 handle->crc32 = 0;
1237 crc->crc32 = &handle->crc32;
1238 for (thr = 0; thr < nr_threads; thr++) {
1239 crc->unc[thr] = data[thr].unc;
1240 crc->unc_len[thr] = &data[thr].unc_len;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001241 }
1242
Bojan Smojver081a9d02011-10-13 23:58:07 +02001243 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1244 if (IS_ERR(crc->thr)) {
1245 crc->thr = NULL;
Joe Perches64ec72a2017-09-27 22:01:34 -07001246 pr_err("Cannot start CRC32 thread\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001247 ret = -ENOMEM;
1248 goto out_clean;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001249 }
1250
Bojan Smojver081a9d02011-10-13 23:58:07 +02001251 /*
Bojan Smojver5a21d4892012-04-29 22:42:06 +02001252 * Set the number of pages for read buffering.
1253 * This is complete guesswork, because we'll only know the real
1254 * picture once prepare_image() is called, which is much later on
1255 * during the image load phase. We'll assume the worst case and
1256 * say that none of the image pages are from high memory.
Bojan Smojver081a9d02011-10-13 23:58:07 +02001257 */
Bojan Smojver5a21d4892012-04-29 22:42:06 +02001258 if (low_free_pages() > snapshot_get_image_size())
1259 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1260 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001261
1262 for (i = 0; i < read_pages; i++) {
1263 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
Christoph Hellwig0eb0b632018-05-09 09:54:08 +02001264 GFP_NOIO | __GFP_HIGH :
1265 GFP_NOIO | __GFP_NOWARN |
Mel Gorman71baba42015-11-06 16:28:28 -08001266 __GFP_NORETRY);
Bojan Smojver5a21d4892012-04-29 22:42:06 +02001267
Bojan Smojver081a9d02011-10-13 23:58:07 +02001268 if (!page[i]) {
1269 if (i < LZO_CMP_PAGES) {
1270 ring_size = i;
Joe Perches64ec72a2017-09-27 22:01:34 -07001271 pr_err("Failed to allocate LZO pages\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001272 ret = -ENOMEM;
1273 goto out_clean;
1274 } else {
1275 break;
1276 }
1277 }
1278 }
1279 want = ring_size = i;
1280
Joe Perches64ec72a2017-09-27 22:01:34 -07001281 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1282 pr_info("Loading and decompressing image data (%u pages)...\n",
1283 nr_to_read);
Bojan Smojverd8150d32012-06-21 22:27:24 +02001284 m = nr_to_read / 10;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001285 if (!m)
1286 m = 1;
1287 nr_pages = 0;
Tina Ruchandanidb597602014-10-30 11:04:53 -07001288 start = ktime_get();
Bojan Smojverf996fc92010-09-09 23:06:23 +02001289
Bojan Smojver081a9d02011-10-13 23:58:07 +02001290 ret = snapshot_write_next(snapshot);
1291 if (ret <= 0)
Bojan Smojverf996fc92010-09-09 23:06:23 +02001292 goto out_finish;
1293
Bojan Smojver081a9d02011-10-13 23:58:07 +02001294 for(;;) {
1295 for (i = 0; !eof && i < want; i++) {
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001296 ret = swap_read_page(handle, page[ring], &hb);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001297 if (ret) {
1298 /*
1299 * On real read error, finish. On end of data,
1300 * set EOF flag and just exit the read loop.
1301 */
1302 if (handle->cur &&
1303 handle->cur->entries[handle->k]) {
1304 goto out_finish;
1305 } else {
1306 eof = 1;
1307 break;
1308 }
1309 }
1310 if (++ring >= ring_size)
1311 ring = 0;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001312 }
Bojan Smojver081a9d02011-10-13 23:58:07 +02001313 asked += i;
1314 want -= i;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001315
Bojan Smojver081a9d02011-10-13 23:58:07 +02001316 /*
1317 * We are out of data, wait for some more.
1318 */
1319 if (!have) {
1320 if (!asked)
1321 break;
1322
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001323 ret = hib_wait_io(&hb);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001324 if (ret)
Bojan Smojverf996fc92010-09-09 23:06:23 +02001325 goto out_finish;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001326 have += asked;
1327 asked = 0;
1328 if (eof)
1329 eof = 2;
Bojan Smojver9f339ca2010-11-25 23:41:39 +01001330 }
Bojan Smojverf996fc92010-09-09 23:06:23 +02001331
Bojan Smojver081a9d02011-10-13 23:58:07 +02001332 if (crc->run_threads) {
1333 wait_event(crc->done, atomic_read(&crc->stop));
1334 atomic_set(&crc->stop, 0);
1335 crc->run_threads = 0;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001336 }
1337
Bojan Smojver081a9d02011-10-13 23:58:07 +02001338 for (thr = 0; have && thr < nr_threads; thr++) {
1339 data[thr].cmp_len = *(size_t *)page[pg];
1340 if (unlikely(!data[thr].cmp_len ||
1341 data[thr].cmp_len >
1342 lzo1x_worst_compress(LZO_UNC_SIZE))) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001343 pr_err("Invalid LZO compressed length\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001344 ret = -1;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001345 goto out_finish;
Bojan Smojver081a9d02011-10-13 23:58:07 +02001346 }
1347
1348 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1349 PAGE_SIZE);
1350 if (need > have) {
1351 if (eof > 1) {
1352 ret = -1;
1353 goto out_finish;
1354 }
1355 break;
1356 }
1357
1358 for (off = 0;
1359 off < LZO_HEADER + data[thr].cmp_len;
1360 off += PAGE_SIZE) {
1361 memcpy(data[thr].cmp + off,
1362 page[pg], PAGE_SIZE);
1363 have--;
1364 want++;
1365 if (++pg >= ring_size)
1366 pg = 0;
1367 }
1368
1369 atomic_set(&data[thr].ready, 1);
1370 wake_up(&data[thr].go);
Bojan Smojverf996fc92010-09-09 23:06:23 +02001371 }
Bojan Smojver081a9d02011-10-13 23:58:07 +02001372
1373 /*
1374 * Wait for more data while we are decompressing.
1375 */
1376 if (have < LZO_CMP_PAGES && asked) {
Christoph Hellwig343df3c2015-05-19 09:23:23 +02001377 ret = hib_wait_io(&hb);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001378 if (ret)
1379 goto out_finish;
1380 have += asked;
1381 asked = 0;
1382 if (eof)
1383 eof = 2;
1384 }
1385
1386 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1387 wait_event(data[thr].done,
1388 atomic_read(&data[thr].stop));
1389 atomic_set(&data[thr].stop, 0);
1390
1391 ret = data[thr].ret;
1392
1393 if (ret < 0) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001394 pr_err("LZO decompression failed\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001395 goto out_finish;
1396 }
1397
1398 if (unlikely(!data[thr].unc_len ||
1399 data[thr].unc_len > LZO_UNC_SIZE ||
1400 data[thr].unc_len & (PAGE_SIZE - 1))) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001401 pr_err("Invalid LZO uncompressed length\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001402 ret = -1;
1403 goto out_finish;
1404 }
1405
1406 for (off = 0;
1407 off < data[thr].unc_len; off += PAGE_SIZE) {
1408 memcpy(data_of(*snapshot),
1409 data[thr].unc + off, PAGE_SIZE);
1410
1411 if (!(nr_pages % m))
Joe Perches64ec72a2017-09-27 22:01:34 -07001412 pr_info("Image loading progress: %3d%%\n",
1413 nr_pages / m * 10);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001414 nr_pages++;
1415
1416 ret = snapshot_write_next(snapshot);
1417 if (ret <= 0) {
1418 crc->run_threads = thr + 1;
1419 atomic_set(&crc->ready, 1);
1420 wake_up(&crc->go);
1421 goto out_finish;
1422 }
1423 }
1424 }
1425
1426 crc->run_threads = thr;
1427 atomic_set(&crc->ready, 1);
1428 wake_up(&crc->go);
Bojan Smojverf996fc92010-09-09 23:06:23 +02001429 }
1430
1431out_finish:
Bojan Smojver081a9d02011-10-13 23:58:07 +02001432 if (crc->run_threads) {
1433 wait_event(crc->done, atomic_read(&crc->stop));
1434 atomic_set(&crc->stop, 0);
1435 }
Tina Ruchandanidb597602014-10-30 11:04:53 -07001436 stop = ktime_get();
Bojan Smojver081a9d02011-10-13 23:58:07 +02001437 if (!ret) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001438 pr_info("Image loading done\n");
Bojan Smojverf996fc92010-09-09 23:06:23 +02001439 snapshot_write_finalize(snapshot);
1440 if (!snapshot_image_loaded(snapshot))
Bojan Smojver081a9d02011-10-13 23:58:07 +02001441 ret = -ENODATA;
1442 if (!ret) {
1443 if (swsusp_header->flags & SF_CRC32_MODE) {
1444 if(handle->crc32 != swsusp_header->crc32) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001445 pr_err("Invalid image CRC32!\n");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001446 ret = -ENODATA;
1447 }
1448 }
1449 }
Bojan Smojverd8150d32012-06-21 22:27:24 +02001450 }
Tina Ruchandanidb597602014-10-30 11:04:53 -07001451 swsusp_show_speed(start, stop, nr_to_read, "Read");
Bojan Smojver081a9d02011-10-13 23:58:07 +02001452out_clean:
1453 for (i = 0; i < ring_size; i++)
Bojan Smojver9f339ca2010-11-25 23:41:39 +01001454 free_page((unsigned long)page[i]);
Bojan Smojver081a9d02011-10-13 23:58:07 +02001455 if (crc) {
1456 if (crc->thr)
1457 kthread_stop(crc->thr);
1458 kfree(crc);
1459 }
1460 if (data) {
1461 for (thr = 0; thr < nr_threads; thr++)
1462 if (data[thr].thr)
1463 kthread_stop(data[thr].thr);
1464 vfree(data);
1465 }
Markus Elfring6c45de02014-11-16 14:18:28 +01001466 vfree(page);
Bojan Smojverf996fc92010-09-09 23:06:23 +02001467
Bojan Smojver081a9d02011-10-13 23:58:07 +02001468 return ret;
Bojan Smojverf996fc92010-09-09 23:06:23 +02001469}
1470
1471/**
Rafael J. Wysockia634cc12007-07-19 01:47:30 -07001472 * swsusp_read - read the hibernation image.
1473 * @flags_p: flags passed by the "frozen" kernel in the image header should
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001474 * be written into this memory location
Rafael J. Wysockia634cc12007-07-19 01:47:30 -07001475 */
1476
1477int swsusp_read(unsigned int *flags_p)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001478{
1479 int error;
1480 struct swap_map_handle handle;
1481 struct snapshot_handle snapshot;
1482 struct swsusp_info *header;
1483
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001484 memset(&snapshot, 0, sizeof(struct snapshot_handle));
Jiri Slabyd3c1b242010-05-01 23:52:02 +02001485 error = snapshot_write_next(&snapshot);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001486 if (error < PAGE_SIZE)
1487 return error < 0 ? error : -EFAULT;
1488 header = (struct swsusp_info *)data_of(snapshot);
Jiri Slaby6f612af2010-05-01 23:54:02 +02001489 error = get_swap_reader(&handle, flags_p);
1490 if (error)
1491 goto end;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001492 if (!error)
Andrew Morton546e0d22006-09-25 23:32:44 -07001493 error = swap_read_page(&handle, header, NULL);
Bojan Smojverf996fc92010-09-09 23:06:23 +02001494 if (!error) {
1495 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1496 load_image(&handle, &snapshot, header->pages - 1) :
1497 load_image_lzo(&handle, &snapshot, header->pages - 1);
1498 }
Jiri Slaby6f612af2010-05-01 23:54:02 +02001499 swap_reader_finish(&handle);
1500end:
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001501 if (!error)
Joe Perches64ec72a2017-09-27 22:01:34 -07001502 pr_debug("Image successfully loaded\n");
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001503 else
Joe Perches64ec72a2017-09-27 22:01:34 -07001504 pr_debug("Error %d resuming\n", error);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001505 return error;
1506}
1507
1508/**
1509 * swsusp_check - Check for swsusp signature in the resume device
1510 */
1511
1512int swsusp_check(void)
1513{
1514 int error;
1515
Tejun Heod4d77622010-11-13 11:55:18 +01001516 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1517 FMODE_READ, NULL);
Jiri Slaby8a0d6132010-05-01 23:52:34 +02001518 if (!IS_ERR(hib_resume_bdev)) {
1519 set_blocksize(hib_resume_bdev, PAGE_SIZE);
Jan Beulich3ecb01d2010-10-26 14:22:27 -07001520 clear_page(swsusp_header);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001521 error = hib_submit_io(REQ_OP_READ, 0,
Mike Christie162b99e2016-06-05 14:32:02 -05001522 swsusp_resume_block,
Vivek Goyal1b29c162007-05-02 19:27:07 +02001523 swsusp_header, NULL);
Rafael J. Wysocki9a154d92006-12-06 20:34:12 -08001524 if (error)
Jiri Slaby76b57e62009-10-07 22:37:35 +02001525 goto put;
Rafael J. Wysocki9a154d92006-12-06 20:34:12 -08001526
Rafael J. Wysocki3624eb02010-10-04 22:08:12 +02001527 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
Vivek Goyal1b29c162007-05-02 19:27:07 +02001528 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001529 /* Reset swap signature now */
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001530 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
Mike Christie162b99e2016-06-05 14:32:02 -05001531 swsusp_resume_block,
Vivek Goyal1b29c162007-05-02 19:27:07 +02001532 swsusp_header, NULL);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001533 } else {
Jiri Slaby76b57e62009-10-07 22:37:35 +02001534 error = -EINVAL;
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001535 }
Jiri Slaby76b57e62009-10-07 22:37:35 +02001536
1537put:
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001538 if (error)
Jiri Slaby8a0d6132010-05-01 23:52:34 +02001539 blkdev_put(hib_resume_bdev, FMODE_READ);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001540 else
Joe Perches64ec72a2017-09-27 22:01:34 -07001541 pr_debug("Image signature found, resuming\n");
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001542 } else {
Jiri Slaby8a0d6132010-05-01 23:52:34 +02001543 error = PTR_ERR(hib_resume_bdev);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001544 }
1545
1546 if (error)
Joe Perches64ec72a2017-09-27 22:01:34 -07001547 pr_debug("Image not found (code %d)\n", error);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001548
1549 return error;
1550}
1551
1552/**
1553 * swsusp_close - close swap device.
1554 */
1555
Al Viroc2dd0da2007-10-08 13:21:10 -04001556void swsusp_close(fmode_t mode)
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001557{
Jiri Slaby8a0d6132010-05-01 23:52:34 +02001558 if (IS_ERR(hib_resume_bdev)) {
Joe Perches64ec72a2017-09-27 22:01:34 -07001559 pr_debug("Image device not initialised\n");
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001560 return;
1561 }
1562
Jiri Slaby8a0d6132010-05-01 23:52:34 +02001563 blkdev_put(hib_resume_bdev, mode);
Rafael J. Wysocki61159a32006-03-23 03:00:00 -08001564}
Vivek Goyal1b29c162007-05-02 19:27:07 +02001565
Bojan Smojver62c552c2012-06-16 00:09:58 +02001566/**
1567 * swsusp_unmark - Unmark swsusp signature in the resume device
1568 */
1569
1570#ifdef CONFIG_SUSPEND
1571int swsusp_unmark(void)
1572{
1573 int error;
1574
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001575 hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
Mike Christie162b99e2016-06-05 14:32:02 -05001576 swsusp_header, NULL);
Bojan Smojver62c552c2012-06-16 00:09:58 +02001577 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1578 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001579 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
Mike Christie162b99e2016-06-05 14:32:02 -05001580 swsusp_resume_block,
Bojan Smojver62c552c2012-06-16 00:09:58 +02001581 swsusp_header, NULL);
1582 } else {
Joe Perches64ec72a2017-09-27 22:01:34 -07001583 pr_err("Cannot find swsusp signature!\n");
Bojan Smojver62c552c2012-06-16 00:09:58 +02001584 error = -ENODEV;
1585 }
1586
1587 /*
1588 * We just returned from suspend, we don't need the image any more.
1589 */
1590 free_all_swap_pages(root_swap);
1591
1592 return error;
1593}
1594#endif
1595
Vivek Goyal1b29c162007-05-02 19:27:07 +02001596static int swsusp_header_init(void)
1597{
1598 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1599 if (!swsusp_header)
1600 panic("Could not allocate memory for swsusp_header\n");
1601 return 0;
1602}
1603
1604core_initcall(swsusp_header_init);