blob: d9476ff877b8bf497358d6cd95dac5db7693dfba [file] [log] [blame]
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001/*
Pavel Machek96bc7ae2005-10-30 14:59:58 -08002 * linux/kernel/power/snapshot.c
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08003 *
Rafael J. Wysocki83573762006-12-06 20:34:18 -08004 * This file provides system snapshot/restore functionality for swsusp.
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08005 *
Pavel Macheka2531292010-07-18 14:27:13 +02006 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
Rafael J. Wysocki83573762006-12-06 20:34:18 -08007 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08008 *
Rafael J. Wysocki83573762006-12-06 20:34:18 -08009 * This file is released under the GPLv2.
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080010 *
11 */
12
Rafael J. Wysockif577eb32006-03-23 02:59:59 -080013#include <linux/version.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080014#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080017#include <linux/delay.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080018#include <linux/bitops.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080019#include <linux/spinlock.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080020#include <linux/kernel.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080021#include <linux/pm.h>
22#include <linux/device.h>
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -070023#include <linux/init.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080024#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
Rafael J. Wysocki846705d2008-11-26 18:00:24 -050028#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070030#include <linux/compiler.h>
Tina Ruchandanidb597602014-10-30 11:04:53 -070031#include <linux/ktime.h>
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080032
33#include <asm/uaccess.h>
34#include <asm/mmu_context.h>
35#include <asm/pgtable.h>
36#include <asm/tlbflush.h>
37#include <asm/io.h>
38
Rafael J. Wysocki25761b62005-10-30 14:59:56 -080039#include "power.h"
40
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -070041static int swsusp_page_is_free(struct page *);
42static void swsusp_set_page_forbidden(struct page *);
43static void swsusp_unset_page_forbidden(struct page *);
44
Rafael J. Wysockife419532009-06-11 23:11:17 +020045/*
Rafael J. Wysockiddeb6482011-05-15 11:38:48 +020046 * Number of bytes to reserve for memory allocations made by device drivers
47 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
48 * cause image creation to fail (tunable via /sys/power/reserved_size).
49 */
50unsigned long reserved_size;
51
52void __init hibernate_reserved_size_init(void)
53{
54 reserved_size = SPARE_PAGES * PAGE_SIZE;
55}
56
57/*
Rafael J. Wysockife419532009-06-11 23:11:17 +020058 * Preferred image size in bytes (tunable via /sys/power/image_size).
Rafael J. Wysocki1c1be3a2011-05-15 11:39:48 +020059 * When it is set to N, swsusp will do its best to ensure the image
60 * size will not exceed N bytes, but if that is impossible, it will
61 * try to create the smallest image possible.
Rafael J. Wysockife419532009-06-11 23:11:17 +020062 */
Rafael J. Wysockiac5c24ec2010-09-20 19:44:56 +020063unsigned long image_size;
64
65void __init hibernate_image_size_init(void)
66{
Rafael J. Wysocki1c1be3a2011-05-15 11:39:48 +020067 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
Rafael J. Wysockiac5c24ec2010-09-20 19:44:56 +020068}
Rafael J. Wysockife419532009-06-11 23:11:17 +020069
Rafael J. Wysocki83573762006-12-06 20:34:18 -080070/* List of PBEs needed for restoring the pages that were allocated before
71 * the suspend and included in the suspend image, but have also been
72 * allocated by the "resume" kernel, so their contents cannot be written
73 * directly to their "original" page frames.
74 */
Rafael J. Wysocki75534b52006-09-25 23:32:52 -070075struct pbe *restore_pblist;
76
Rafael J. Wysocki9c744482016-06-29 03:00:51 +020077/* struct linked_page is used to build chains of pages */
78
79#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
80
81struct linked_page {
82 struct linked_page *next;
83 char data[LINKED_PAGE_DATA_SIZE];
84} __packed;
85
86/*
87 * List of "safe" pages (ie. pages that were not used by the image kernel
88 * before hibernation) that may be used as temporary storage for image kernel
89 * memory contents.
90 */
91static struct linked_page *safe_pages_list;
92
Rafael J. Wysocki83573762006-12-06 20:34:18 -080093/* Pointer to an auxiliary buffer (1 page) */
Rafael J. Wysocki940864d2006-09-25 23:32:55 -070094static void *buffer;
Rafael J. Wysocki7088a5c2006-01-06 00:13:05 -080095
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -070096/**
97 * @safe_needed - on resume, for storing the PBE list and the image,
98 * we can only use memory pages that do not conflict with the pages
Rafael J. Wysocki83573762006-12-06 20:34:18 -080099 * used before suspend. The unsafe pages have PageNosaveFree set
100 * and we count them using unsafe_pages.
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700101 *
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800102 * Each allocated image page is marked as PageNosave and PageNosaveFree
103 * so that swsusp_free() can release it.
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700104 */
105
Rafael J. Wysocki0bcd8882006-09-25 23:32:52 -0700106#define PG_ANY 0
107#define PG_SAFE 1
108#define PG_UNSAFE_CLEAR 1
109#define PG_UNSAFE_KEEP 0
110
Rafael J. Wysocki940864d2006-09-25 23:32:55 -0700111static unsigned int allocated_unsafe_pages;
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700112
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800113static void *get_image_page(gfp_t gfp_mask, int safe_needed)
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700114{
115 void *res;
116
117 res = (void *)get_zeroed_page(gfp_mask);
118 if (safe_needed)
Rafael J. Wysocki7be98232007-05-06 14:50:42 -0700119 while (res && swsusp_page_is_free(virt_to_page(res))) {
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700120 /* The page is unsafe, mark it for swsusp_free() */
Rafael J. Wysocki7be98232007-05-06 14:50:42 -0700121 swsusp_set_page_forbidden(virt_to_page(res));
Rafael J. Wysocki940864d2006-09-25 23:32:55 -0700122 allocated_unsafe_pages++;
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700123 res = (void *)get_zeroed_page(gfp_mask);
124 }
125 if (res) {
Rafael J. Wysocki7be98232007-05-06 14:50:42 -0700126 swsusp_set_page_forbidden(virt_to_page(res));
127 swsusp_set_page_free(virt_to_page(res));
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700128 }
129 return res;
130}
131
Rafael J. Wysocki9c744482016-06-29 03:00:51 +0200132static void *__get_safe_page(gfp_t gfp_mask)
133{
134 if (safe_pages_list) {
135 void *ret = safe_pages_list;
136
137 safe_pages_list = safe_pages_list->next;
138 memset(ret, 0, PAGE_SIZE);
139 return ret;
140 }
141 return get_image_page(gfp_mask, PG_SAFE);
142}
143
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700144unsigned long get_safe_page(gfp_t gfp_mask)
145{
Rafael J. Wysocki9c744482016-06-29 03:00:51 +0200146 return (unsigned long)__get_safe_page(gfp_mask);
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800147}
148
Rafael J. Wysocki5b6d15d2006-12-06 20:34:43 -0800149static struct page *alloc_image_page(gfp_t gfp_mask)
150{
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800151 struct page *page;
152
153 page = alloc_page(gfp_mask);
154 if (page) {
Rafael J. Wysocki7be98232007-05-06 14:50:42 -0700155 swsusp_set_page_forbidden(page);
156 swsusp_set_page_free(page);
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800157 }
158 return page;
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700159}
160
161/**
162 * free_image_page - free page represented by @addr, allocated with
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800163 * get_image_page (page flags set by it must be cleared)
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700164 */
165
166static inline void free_image_page(void *addr, int clear_nosave_free)
167{
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800168 struct page *page;
169
170 BUG_ON(!virt_addr_valid(addr));
171
172 page = virt_to_page(addr);
173
Rafael J. Wysocki7be98232007-05-06 14:50:42 -0700174 swsusp_unset_page_forbidden(page);
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700175 if (clear_nosave_free)
Rafael J. Wysocki7be98232007-05-06 14:50:42 -0700176 swsusp_unset_page_free(page);
Rafael J. Wysocki83573762006-12-06 20:34:18 -0800177
178 __free_page(page);
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -0700179}
180
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700181static inline void
182free_list_of_pages(struct linked_page *list, int clear_page_nosave)
183{
184 while (list) {
185 struct linked_page *lp = list->next;
186
187 free_image_page(list, clear_page_nosave);
188 list = lp;
189 }
190}
191
192/**
193 * struct chain_allocator is used for allocating small objects out of
194 * a linked list of pages called 'the chain'.
195 *
196 * The chain grows each time when there is no room for a new object in
197 * the current page. The allocated objects cannot be freed individually.
198 * It is only possible to free them all at once, by freeing the entire
199 * chain.
200 *
201 * NOTE: The chain allocator may be inefficient if the allocated objects
202 * are not much smaller than PAGE_SIZE.
203 */
204
205struct chain_allocator {
206 struct linked_page *chain; /* the chain */
207 unsigned int used_space; /* total size of objects allocated out
208 * of the current page
209 */
210 gfp_t gfp_mask; /* mask for allocating pages */
211 int safe_needed; /* if set, only "safe" pages are allocated */
212};
213
214static void
215chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
216{
217 ca->chain = NULL;
218 ca->used_space = LINKED_PAGE_DATA_SIZE;
219 ca->gfp_mask = gfp_mask;
220 ca->safe_needed = safe_needed;
221}
222
223static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
224{
225 void *ret;
226
227 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
228 struct linked_page *lp;
229
Rafael J. Wysocki9c744482016-06-29 03:00:51 +0200230 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
231 get_image_page(ca->gfp_mask, PG_ANY);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700232 if (!lp)
233 return NULL;
234
235 lp->next = ca->chain;
236 ca->chain = lp;
237 ca->used_space = 0;
238 }
239 ret = ca->chain->data + ca->used_space;
240 ca->used_space += size;
241 return ret;
242}
243
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700244/**
245 * Data types related to memory bitmaps.
246 *
247 * Memory bitmap is a structure consiting of many linked lists of
248 * objects. The main list's elements are of type struct zone_bitmap
249 * and each of them corresonds to one zone. For each zone bitmap
250 * object there is a list of objects of type struct bm_block that
Akinobu Mita0d833042008-07-23 21:28:38 -0700251 * represent each blocks of bitmap in which information is stored.
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700252 *
253 * struct memory_bitmap contains a pointer to the main list of zone
254 * bitmap objects, a struct bm_position used for browsing the bitmap,
255 * and a pointer to the list of pages used for allocating all of the
256 * zone bitmap objects and bitmap block objects.
257 *
258 * NOTE: It has to be possible to lay out the bitmap in memory
259 * using only allocations of order 0. Additionally, the bitmap is
260 * designed to work with arbitrary number of zones (this is over the
261 * top for now, but let's avoid making unnecessary assumptions ;-).
262 *
263 * struct zone_bitmap contains a pointer to a list of bitmap block
264 * objects and a pointer to the bitmap block object that has been
265 * most recently used for setting bits. Additionally, it contains the
266 * pfns that correspond to the start and end of the represented zone.
267 *
268 * struct bm_block contains a pointer to the memory page in which
Akinobu Mita0d833042008-07-23 21:28:38 -0700269 * information is stored (in the form of a block of bitmap)
270 * It also contains the pfns that correspond to the start and end of
271 * the represented memory area.
Joerg Roedelf469f022014-07-21 12:26:57 +0200272 *
273 * The memory bitmap is organized as a radix tree to guarantee fast random
274 * access to the bits. There is one radix tree for each zone (as returned
275 * from create_mem_extents).
276 *
277 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
278 * two linked lists for the nodes of the tree, one for the inner nodes and
279 * one for the leave nodes. The linked leave nodes are used for fast linear
280 * access of the memory bitmap.
281 *
282 * The struct rtree_node represents one node of the radix tree.
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700283 */
284
285#define BM_END_OF_MAP (~0UL)
286
Wu Fengguang8de03072009-07-22 19:56:10 +0200287#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
Joerg Roedelf469f022014-07-21 12:26:57 +0200288#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
289#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700290
Joerg Roedelf469f022014-07-21 12:26:57 +0200291/*
292 * struct rtree_node is a wrapper struct to link the nodes
293 * of the rtree together for easy linear iteration over
294 * bits and easy freeing
295 */
296struct rtree_node {
297 struct list_head list;
298 unsigned long *data;
299};
300
301/*
302 * struct mem_zone_bm_rtree represents a bitmap used for one
303 * populated memory zone.
304 */
305struct mem_zone_bm_rtree {
306 struct list_head list; /* Link Zones together */
307 struct list_head nodes; /* Radix Tree inner nodes */
308 struct list_head leaves; /* Radix Tree leaves */
309 unsigned long start_pfn; /* Zone start page frame */
310 unsigned long end_pfn; /* Zone end page frame + 1 */
311 struct rtree_node *rtree; /* Radix Tree Root */
312 int levels; /* Number of Radix Tree Levels */
313 unsigned int blocks; /* Number of Bitmap Blocks */
314};
315
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700316/* strcut bm_position is used for browsing memory bitmaps */
317
318struct bm_position {
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200319 struct mem_zone_bm_rtree *zone;
320 struct rtree_node *node;
321 unsigned long node_pfn;
322 int node_bit;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700323};
324
325struct memory_bitmap {
Joerg Roedelf469f022014-07-21 12:26:57 +0200326 struct list_head zones;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700327 struct linked_page *p_list; /* list of pages used to store zone
328 * bitmap objects and bitmap block
329 * objects
330 */
331 struct bm_position cur; /* most recently used bit position */
332};
333
334/* Functions that operate on memory bitmaps */
335
Joerg Roedelf469f022014-07-21 12:26:57 +0200336#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
337#if BITS_PER_LONG == 32
338#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
339#else
340#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
341#endif
342#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
343
344/*
345 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
346 *
347 * This function is used to allocate inner nodes as well as the
348 * leave nodes of the radix tree. It also adds the node to the
349 * corresponding linked list passed in by the *list parameter.
350 */
351static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
352 struct chain_allocator *ca,
353 struct list_head *list)
354{
355 struct rtree_node *node;
356
357 node = chain_alloc(ca, sizeof(struct rtree_node));
358 if (!node)
359 return NULL;
360
361 node->data = get_image_page(gfp_mask, safe_needed);
362 if (!node->data)
363 return NULL;
364
365 list_add_tail(&node->list, list);
366
367 return node;
368}
369
370/*
371 * add_rtree_block - Add a new leave node to the radix tree
372 *
373 * The leave nodes need to be allocated in order to keep the leaves
374 * linked list in order. This is guaranteed by the zone->blocks
375 * counter.
376 */
377static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
378 int safe_needed, struct chain_allocator *ca)
379{
380 struct rtree_node *node, *block, **dst;
381 unsigned int levels_needed, block_nr;
382 int i;
383
384 block_nr = zone->blocks;
385 levels_needed = 0;
386
387 /* How many levels do we need for this block nr? */
388 while (block_nr) {
389 levels_needed += 1;
390 block_nr >>= BM_RTREE_LEVEL_SHIFT;
391 }
392
393 /* Make sure the rtree has enough levels */
394 for (i = zone->levels; i < levels_needed; i++) {
395 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
396 &zone->nodes);
397 if (!node)
398 return -ENOMEM;
399
400 node->data[0] = (unsigned long)zone->rtree;
401 zone->rtree = node;
402 zone->levels += 1;
403 }
404
405 /* Allocate new block */
406 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
407 if (!block)
408 return -ENOMEM;
409
410 /* Now walk the rtree to insert the block */
411 node = zone->rtree;
412 dst = &zone->rtree;
413 block_nr = zone->blocks;
414 for (i = zone->levels; i > 0; i--) {
415 int index;
416
417 if (!node) {
418 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
419 &zone->nodes);
420 if (!node)
421 return -ENOMEM;
422 *dst = node;
423 }
424
425 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
426 index &= BM_RTREE_LEVEL_MASK;
427 dst = (struct rtree_node **)&((*dst)->data[index]);
428 node = *dst;
429 }
430
431 zone->blocks += 1;
432 *dst = block;
433
434 return 0;
435}
436
437static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
438 int clear_nosave_free);
439
440/*
441 * create_zone_bm_rtree - create a radix tree for one zone
442 *
443 * Allocated the mem_zone_bm_rtree structure and initializes it.
444 * This function also allocated and builds the radix tree for the
445 * zone.
446 */
447static struct mem_zone_bm_rtree *
448create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
449 struct chain_allocator *ca,
450 unsigned long start, unsigned long end)
451{
452 struct mem_zone_bm_rtree *zone;
453 unsigned int i, nr_blocks;
454 unsigned long pages;
455
456 pages = end - start;
457 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
458 if (!zone)
459 return NULL;
460
461 INIT_LIST_HEAD(&zone->nodes);
462 INIT_LIST_HEAD(&zone->leaves);
463 zone->start_pfn = start;
464 zone->end_pfn = end;
465 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
466
467 for (i = 0; i < nr_blocks; i++) {
468 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
469 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
470 return NULL;
471 }
472 }
473
474 return zone;
475}
476
477/*
478 * free_zone_bm_rtree - Free the memory of the radix tree
479 *
480 * Free all node pages of the radix tree. The mem_zone_bm_rtree
481 * structure itself is not freed here nor are the rtree_node
482 * structs.
483 */
484static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
485 int clear_nosave_free)
486{
487 struct rtree_node *node;
488
489 list_for_each_entry(node, &zone->nodes, list)
490 free_image_page(node->data, clear_nosave_free);
491
492 list_for_each_entry(node, &zone->leaves, list)
493 free_image_page(node->data, clear_nosave_free);
494}
495
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700496static void memory_bm_position_reset(struct memory_bitmap *bm)
497{
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200498 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
499 list);
500 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
501 struct rtree_node, list);
502 bm->cur.node_pfn = 0;
503 bm->cur.node_bit = 0;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700504}
505
506static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
507
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500508struct mem_extent {
509 struct list_head hook;
510 unsigned long start;
511 unsigned long end;
512};
513
514/**
515 * free_mem_extents - free a list of memory extents
516 * @list - list of extents to empty
517 */
518static void free_mem_extents(struct list_head *list)
519{
520 struct mem_extent *ext, *aux;
521
522 list_for_each_entry_safe(ext, aux, list, hook) {
523 list_del(&ext->hook);
524 kfree(ext);
525 }
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700526}
527
528/**
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500529 * create_mem_extents - create a list of memory extents representing
530 * contiguous ranges of PFNs
531 * @list - list to put the extents into
532 * @gfp_mask - mask to use for memory allocations
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700533 */
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500534static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700535{
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500536 struct zone *zone;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700537
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500538 INIT_LIST_HEAD(list);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700539
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700540 for_each_populated_zone(zone) {
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500541 unsigned long zone_start, zone_end;
542 struct mem_extent *ext, *cur, *aux;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700543
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500544 zone_start = zone->zone_start_pfn;
Xishi Qiuc33bc312013-09-11 14:21:44 -0700545 zone_end = zone_end_pfn(zone);
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500546
547 list_for_each_entry(ext, list, hook)
548 if (zone_start <= ext->end)
549 break;
550
551 if (&ext->hook == list || zone_end < ext->start) {
552 /* New extent is necessary */
553 struct mem_extent *new_ext;
554
555 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
556 if (!new_ext) {
557 free_mem_extents(list);
558 return -ENOMEM;
559 }
560 new_ext->start = zone_start;
561 new_ext->end = zone_end;
562 list_add_tail(&new_ext->hook, &ext->hook);
563 continue;
564 }
565
566 /* Merge this zone's range of PFNs with the existing one */
567 if (zone_start < ext->start)
568 ext->start = zone_start;
569 if (zone_end > ext->end)
570 ext->end = zone_end;
571
572 /* More merging may be possible */
573 cur = ext;
574 list_for_each_entry_safe_continue(cur, aux, list, hook) {
575 if (zone_end < cur->start)
576 break;
577 if (zone_end < cur->end)
578 ext->end = cur->end;
579 list_del(&cur->hook);
580 kfree(cur);
581 }
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700582 }
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500583
584 return 0;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700585}
586
587/**
588 * memory_bm_create - allocate memory for a memory bitmap
589 */
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700590static int
591memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
592{
593 struct chain_allocator ca;
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500594 struct list_head mem_extents;
595 struct mem_extent *ext;
596 int error;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700597
598 chain_init(&ca, gfp_mask, safe_needed);
Joerg Roedelf469f022014-07-21 12:26:57 +0200599 INIT_LIST_HEAD(&bm->zones);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700600
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500601 error = create_mem_extents(&mem_extents, gfp_mask);
602 if (error)
603 return error;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700604
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500605 list_for_each_entry(ext, &mem_extents, hook) {
Joerg Roedelf469f022014-07-21 12:26:57 +0200606 struct mem_zone_bm_rtree *zone;
Joerg Roedelf469f022014-07-21 12:26:57 +0200607
608 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
609 ext->start, ext->end);
Joerg Roedel9047eb62014-07-21 12:27:01 +0200610 if (!zone) {
611 error = -ENOMEM;
Joerg Roedelf469f022014-07-21 12:26:57 +0200612 goto Error;
Joerg Roedel9047eb62014-07-21 12:27:01 +0200613 }
Joerg Roedelf469f022014-07-21 12:26:57 +0200614 list_add_tail(&zone->list, &bm->zones);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700615 }
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500616
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700617 bm->p_list = ca.chain;
618 memory_bm_position_reset(bm);
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500619 Exit:
620 free_mem_extents(&mem_extents);
621 return error;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700622
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500623 Error:
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700624 bm->p_list = ca.chain;
625 memory_bm_free(bm, PG_UNSAFE_CLEAR);
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500626 goto Exit;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700627}
628
629/**
630 * memory_bm_free - free memory occupied by the memory bitmap @bm
631 */
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700632static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
633{
Joerg Roedelf469f022014-07-21 12:26:57 +0200634 struct mem_zone_bm_rtree *zone;
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700635
Joerg Roedelf469f022014-07-21 12:26:57 +0200636 list_for_each_entry(zone, &bm->zones, list)
637 free_zone_bm_rtree(zone, clear_nosave_free);
638
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700639 free_list_of_pages(bm->p_list, clear_nosave_free);
Rafael J. Wysocki846705d2008-11-26 18:00:24 -0500640
Joerg Roedelf469f022014-07-21 12:26:57 +0200641 INIT_LIST_HEAD(&bm->zones);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700642}
643
644/**
Joerg Roedel9047eb62014-07-21 12:27:01 +0200645 * memory_bm_find_bit - Find the bit for pfn in the memory
646 * bitmap
Joerg Roedel07a33822014-07-21 12:26:58 +0200647 *
Joerg Roedel9047eb62014-07-21 12:27:01 +0200648 * Find the bit in the bitmap @bm that corresponds to given pfn.
649 * The cur.zone, cur.block and cur.node_pfn member of @bm are
650 * updated.
651 * It walks the radix tree to find the page which contains the bit for
Joerg Roedel07a33822014-07-21 12:26:58 +0200652 * pfn and returns the bit position in **addr and *bit_nr.
653 */
Joerg Roedel9047eb62014-07-21 12:27:01 +0200654static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
655 void **addr, unsigned int *bit_nr)
Joerg Roedel07a33822014-07-21 12:26:58 +0200656{
657 struct mem_zone_bm_rtree *curr, *zone;
658 struct rtree_node *node;
659 int i, block_nr;
660
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200661 zone = bm->cur.zone;
662
663 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
664 goto zone_found;
665
Joerg Roedel07a33822014-07-21 12:26:58 +0200666 zone = NULL;
667
668 /* Find the right zone */
669 list_for_each_entry(curr, &bm->zones, list) {
670 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
671 zone = curr;
672 break;
673 }
674 }
675
676 if (!zone)
677 return -EFAULT;
678
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200679zone_found:
Joerg Roedel07a33822014-07-21 12:26:58 +0200680 /*
681 * We have a zone. Now walk the radix tree to find the leave
682 * node for our pfn.
683 */
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200684
685 node = bm->cur.node;
686 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
687 goto node_found;
688
Joerg Roedel07a33822014-07-21 12:26:58 +0200689 node = zone->rtree;
690 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
691
692 for (i = zone->levels; i > 0; i--) {
693 int index;
694
695 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
696 index &= BM_RTREE_LEVEL_MASK;
697 BUG_ON(node->data[index] == 0);
698 node = (struct rtree_node *)node->data[index];
699 }
700
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200701node_found:
702 /* Update last position */
703 bm->cur.zone = zone;
704 bm->cur.node = node;
705 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
706
Joerg Roedel07a33822014-07-21 12:26:58 +0200707 /* Set return values */
708 *addr = node->data;
709 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
710
711 return 0;
712}
713
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700714static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
715{
716 void *addr;
717 unsigned int bit;
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100718 int error;
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700719
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100720 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
721 BUG_ON(error);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700722 set_bit(bit, addr);
723}
724
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100725static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
726{
727 void *addr;
728 unsigned int bit;
729 int error;
730
731 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
732 if (!error)
733 set_bit(bit, addr);
Joerg Roedel07a33822014-07-21 12:26:58 +0200734
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100735 return error;
736}
737
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700738static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
739{
740 void *addr;
741 unsigned int bit;
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100742 int error;
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700743
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100744 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
745 BUG_ON(error);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700746 clear_bit(bit, addr);
747}
748
Joerg Roedelfdd64ed2014-09-30 13:31:29 +0200749static void memory_bm_clear_current(struct memory_bitmap *bm)
750{
751 int bit;
752
753 bit = max(bm->cur.node_bit - 1, 0);
754 clear_bit(bit, bm->cur.node->data);
755}
756
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700757static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
758{
759 void *addr;
760 unsigned int bit;
Joerg Roedel9047eb62014-07-21 12:27:01 +0200761 int error;
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700762
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100763 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
764 BUG_ON(error);
Joerg Roedel9047eb62014-07-21 12:27:01 +0200765 return test_bit(bit, addr);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700766}
767
Rafael J. Wysocki69643272008-11-11 21:32:44 +0100768static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
769{
770 void *addr;
771 unsigned int bit;
772
Joerg Roedel9047eb62014-07-21 12:27:01 +0200773 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700774}
775
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200776/*
777 * rtree_next_node - Jumps to the next leave node
778 *
779 * Sets the position to the beginning of the next node in the
780 * memory bitmap. This is either the next node in the current
781 * zone's radix tree or the first node in the radix tree of the
782 * next zone.
783 *
784 * Returns true if there is a next node, false otherwise.
785 */
786static bool rtree_next_node(struct memory_bitmap *bm)
787{
788 bm->cur.node = list_entry(bm->cur.node->list.next,
789 struct rtree_node, list);
790 if (&bm->cur.node->list != &bm->cur.zone->leaves) {
791 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
792 bm->cur.node_bit = 0;
Joerg Roedel0f7d83e2014-07-21 12:27:02 +0200793 touch_softlockup_watchdog();
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200794 return true;
795 }
796
797 /* No more nodes, goto next zone */
798 bm->cur.zone = list_entry(bm->cur.zone->list.next,
799 struct mem_zone_bm_rtree, list);
800 if (&bm->cur.zone->list != &bm->zones) {
801 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
802 struct rtree_node, list);
803 bm->cur.node_pfn = 0;
804 bm->cur.node_bit = 0;
805 return true;
806 }
807
808 /* No more zones */
809 return false;
810}
811
Joerg Roedel9047eb62014-07-21 12:27:01 +0200812/**
813 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200814 *
815 * Starting from the last returned position this function searches
816 * for the next set bit in the memory bitmap and returns its
817 * number. If no more bit is set BM_END_OF_MAP is returned.
Joerg Roedel9047eb62014-07-21 12:27:01 +0200818 *
819 * It is required to run memory_bm_position_reset() before the
820 * first call to this function.
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200821 */
Joerg Roedel9047eb62014-07-21 12:27:01 +0200822static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
Joerg Roedel3a20cb12014-07-21 12:26:59 +0200823{
824 unsigned long bits, pfn, pages;
825 int bit;
826
827 do {
828 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
829 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
830 bit = find_next_bit(bm->cur.node->data, bits,
831 bm->cur.node_bit);
832 if (bit < bits) {
833 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
834 bm->cur.node_bit = bit + 1;
835 return pfn;
836 }
837 } while (rtree_next_node(bm));
838
839 return BM_END_OF_MAP;
840}
841
Rafael J. Wysockib788db72006-09-25 23:32:54 -0700842/**
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700843 * This structure represents a range of page frames the contents of which
844 * should not be saved during the suspend.
845 */
846
847struct nosave_region {
848 struct list_head list;
849 unsigned long start_pfn;
850 unsigned long end_pfn;
851};
852
853static LIST_HEAD(nosave_regions);
854
855/**
856 * register_nosave_region - register a range of page frames the contents
857 * of which should not be saved during the suspend (to be used in the early
858 * initialization code)
859 */
860
861void __init
Johannes Berg940d67f2007-05-08 19:23:49 +1000862__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
863 int use_kmalloc)
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700864{
865 struct nosave_region *region;
866
867 if (start_pfn >= end_pfn)
868 return;
869
870 if (!list_empty(&nosave_regions)) {
871 /* Try to extend the previous region (they should be sorted) */
872 region = list_entry(nosave_regions.prev,
873 struct nosave_region, list);
874 if (region->end_pfn == start_pfn) {
875 region->end_pfn = end_pfn;
876 goto Report;
877 }
878 }
Johannes Berg940d67f2007-05-08 19:23:49 +1000879 if (use_kmalloc) {
880 /* during init, this shouldn't fail */
881 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
882 BUG_ON(!region);
883 } else
884 /* This allocation cannot fail */
Santosh Shilimkarc2f69cd2014-01-21 15:50:27 -0800885 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700886 region->start_pfn = start_pfn;
887 region->end_pfn = end_pfn;
888 list_add_tail(&region->list, &nosave_regions);
889 Report:
Bjorn Helgaascd38ca82013-06-03 18:20:29 +0000890 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
891 (unsigned long long) start_pfn << PAGE_SHIFT,
892 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700893}
894
895/*
896 * Set bits in this map correspond to the page frames the contents of which
897 * should not be saved during the suspend.
898 */
899static struct memory_bitmap *forbidden_pages_map;
900
901/* Set bits in this map correspond to free page frames. */
902static struct memory_bitmap *free_pages_map;
903
904/*
905 * Each page frame allocated for creating the image is marked by setting the
906 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
907 */
908
909void swsusp_set_page_free(struct page *page)
910{
911 if (free_pages_map)
912 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
913}
914
915static int swsusp_page_is_free(struct page *page)
916{
917 return free_pages_map ?
918 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
919}
920
921void swsusp_unset_page_free(struct page *page)
922{
923 if (free_pages_map)
924 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
925}
926
927static void swsusp_set_page_forbidden(struct page *page)
928{
929 if (forbidden_pages_map)
930 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
931}
932
933int swsusp_page_is_forbidden(struct page *page)
934{
935 return forbidden_pages_map ?
936 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
937}
938
939static void swsusp_unset_page_forbidden(struct page *page)
940{
941 if (forbidden_pages_map)
942 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
943}
944
945/**
946 * mark_nosave_pages - set bits corresponding to the page frames the
947 * contents of which should not be saved in a given bitmap.
948 */
949
950static void mark_nosave_pages(struct memory_bitmap *bm)
951{
952 struct nosave_region *region;
953
954 if (list_empty(&nosave_regions))
955 return;
956
957 list_for_each_entry(region, &nosave_regions, list) {
958 unsigned long pfn;
959
Bjorn Helgaas69f1d472012-02-14 22:20:52 +0100960 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
961 (unsigned long long) region->start_pfn << PAGE_SHIFT,
962 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
963 - 1);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700964
965 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
Rafael J. Wysockia82f7112008-03-12 00:34:57 +0100966 if (pfn_valid(pfn)) {
967 /*
968 * It is safe to ignore the result of
969 * mem_bm_set_bit_check() here, since we won't
970 * touch the PFNs for which the error is
971 * returned anyway.
972 */
973 mem_bm_set_bit_check(bm, pfn);
974 }
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700975 }
976}
977
978/**
979 * create_basic_memory_bitmaps - create bitmaps needed for marking page
980 * frames that should not be saved and free page frames. The pointers
981 * forbidden_pages_map and free_pages_map are only modified if everything
982 * goes well, because we don't want the bits to be used before both bitmaps
983 * are set up.
984 */
985
986int create_basic_memory_bitmaps(void)
987{
988 struct memory_bitmap *bm1, *bm2;
989 int error = 0;
990
Rafael J. Wysockiaab17282013-09-30 19:40:56 +0200991 if (forbidden_pages_map && free_pages_map)
992 return 0;
993 else
994 BUG_ON(forbidden_pages_map || free_pages_map);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700995
Rafael J. Wysocki0709db62007-05-06 14:50:45 -0700996 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -0700997 if (!bm1)
998 return -ENOMEM;
999
Rafael J. Wysocki0709db62007-05-06 14:50:45 -07001000 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -07001001 if (error)
1002 goto Free_first_object;
1003
Rafael J. Wysocki0709db62007-05-06 14:50:45 -07001004 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -07001005 if (!bm2)
1006 goto Free_first_bitmap;
1007
Rafael J. Wysocki0709db62007-05-06 14:50:45 -07001008 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -07001009 if (error)
1010 goto Free_second_object;
1011
1012 forbidden_pages_map = bm1;
1013 free_pages_map = bm2;
1014 mark_nosave_pages(forbidden_pages_map);
1015
Rafael J. Wysocki23976722007-12-08 02:09:43 +01001016 pr_debug("PM: Basic memory bitmaps created\n");
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -07001017
1018 return 0;
1019
1020 Free_second_object:
1021 kfree(bm2);
1022 Free_first_bitmap:
1023 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1024 Free_first_object:
1025 kfree(bm1);
1026 return -ENOMEM;
1027}
1028
1029/**
1030 * free_basic_memory_bitmaps - free memory bitmaps allocated by
1031 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
1032 * so that the bitmaps themselves are not referred to while they are being
1033 * freed.
1034 */
1035
1036void free_basic_memory_bitmaps(void)
1037{
1038 struct memory_bitmap *bm1, *bm2;
1039
Rafael J. Wysocki6a0c7cd2013-11-14 23:26:58 +01001040 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1041 return;
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -07001042
1043 bm1 = forbidden_pages_map;
1044 bm2 = free_pages_map;
1045 forbidden_pages_map = NULL;
1046 free_pages_map = NULL;
1047 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1048 kfree(bm1);
1049 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1050 kfree(bm2);
1051
Rafael J. Wysocki23976722007-12-08 02:09:43 +01001052 pr_debug("PM: Basic memory bitmaps freed\n");
Rafael J. Wysocki74dfd662007-05-06 14:50:43 -07001053}
1054
1055/**
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001056 * snapshot_additional_pages - estimate the number of additional pages
1057 * be needed for setting up the suspend image data structures for given
1058 * zone (usually the returned value is greater than the exact number)
1059 */
1060
1061unsigned int snapshot_additional_pages(struct zone *zone)
1062{
Joerg Roedelf469f022014-07-21 12:26:57 +02001063 unsigned int rtree, nodes;
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001064
Joerg Roedelf469f022014-07-21 12:26:57 +02001065 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1066 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1067 LINKED_PAGE_DATA_SIZE);
1068 while (nodes > 1) {
1069 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1070 rtree += nodes;
1071 }
1072
Joerg Roedel9047eb62014-07-21 12:27:01 +02001073 return 2 * rtree;
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001074}
1075
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001076#ifdef CONFIG_HIGHMEM
1077/**
1078 * count_free_highmem_pages - compute the total number of free highmem
1079 * pages, system-wide.
1080 */
1081
1082static unsigned int count_free_highmem_pages(void)
1083{
1084 struct zone *zone;
1085 unsigned int cnt = 0;
1086
KOSAKI Motohiroee99c712009-03-31 15:19:31 -07001087 for_each_populated_zone(zone)
1088 if (is_highmem(zone))
Christoph Lameterd23ad422007-02-10 01:43:02 -08001089 cnt += zone_page_state(zone, NR_FREE_PAGES);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001090
1091 return cnt;
1092}
1093
1094/**
1095 * saveable_highmem_page - Determine whether a highmem page should be
1096 * included in the suspend image.
1097 *
1098 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1099 * and it isn't a part of a free chunk of pages.
1100 */
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001101static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001102{
1103 struct page *page;
1104
1105 if (!pfn_valid(pfn))
1106 return NULL;
1107
1108 page = pfn_to_page(pfn);
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001109 if (page_zone(page) != zone)
1110 return NULL;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001111
1112 BUG_ON(!PageHighMem(page));
1113
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07001114 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1115 PageReserved(page))
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001116 return NULL;
1117
Stanislaw Gruszkac6968e72012-01-10 15:07:31 -08001118 if (page_is_guard(page))
1119 return NULL;
1120
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001121 return page;
1122}
1123
1124/**
1125 * count_highmem_pages - compute the total number of saveable highmem
1126 * pages.
1127 */
1128
Rafael J. Wysockife419532009-06-11 23:11:17 +02001129static unsigned int count_highmem_pages(void)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001130{
1131 struct zone *zone;
1132 unsigned int n = 0;
1133
Gerald Schaefer98e73dc2009-07-22 00:36:56 +02001134 for_each_populated_zone(zone) {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001135 unsigned long pfn, max_zone_pfn;
1136
1137 if (!is_highmem(zone))
1138 continue;
1139
1140 mark_free_pages(zone);
Xishi Qiuc33bc312013-09-11 14:21:44 -07001141 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001142 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001143 if (saveable_highmem_page(zone, pfn))
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001144 n++;
1145 }
1146 return n;
1147}
1148#else
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001149static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1150{
1151 return NULL;
1152}
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001153#endif /* CONFIG_HIGHMEM */
1154
Rafael J. Wysockif6143aa2006-09-25 23:32:50 -07001155/**
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001156 * saveable_page - Determine whether a non-highmem page should be included
1157 * in the suspend image.
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001158 *
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001159 * We should save the page if it isn't Nosave, and is not in the range
1160 * of pages statically defined as 'unsaveable', and it isn't a part of
1161 * a free chunk of pages.
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001162 */
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001163static struct page *saveable_page(struct zone *zone, unsigned long pfn)
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001164{
Pavel Machekde491862005-10-30 14:59:59 -08001165 struct page *page;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001166
1167 if (!pfn_valid(pfn))
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001168 return NULL;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001169
1170 page = pfn_to_page(pfn);
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001171 if (page_zone(page) != zone)
1172 return NULL;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001173
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001174 BUG_ON(PageHighMem(page));
1175
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07001176 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001177 return NULL;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001178
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001179 if (PageReserved(page)
1180 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001181 return NULL;
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001182
Stanislaw Gruszkac6968e72012-01-10 15:07:31 -08001183 if (page_is_guard(page))
1184 return NULL;
1185
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001186 return page;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001187}
1188
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001189/**
1190 * count_data_pages - compute the total number of saveable non-highmem
1191 * pages.
1192 */
1193
Rafael J. Wysockife419532009-06-11 23:11:17 +02001194static unsigned int count_data_pages(void)
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001195{
1196 struct zone *zone;
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001197 unsigned long pfn, max_zone_pfn;
Pavel Machekdc19d502005-11-07 00:58:40 -08001198 unsigned int n = 0;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001199
Gerald Schaefer98e73dc2009-07-22 00:36:56 +02001200 for_each_populated_zone(zone) {
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001201 if (is_highmem(zone))
1202 continue;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001203
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001204 mark_free_pages(zone);
Xishi Qiuc33bc312013-09-11 14:21:44 -07001205 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001206 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001207 if (saveable_page(zone, pfn))
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001208 n++;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001209 }
Rafael J. Wysockia0f49652005-10-30 14:59:57 -08001210 return n;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001211}
1212
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001213/* This is needed, because copy_page and memcpy are not usable for copying
1214 * task structs.
1215 */
1216static inline void do_copy_page(long *dst, long *src)
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001217{
1218 int n;
1219
Rafael J. Wysockif623f0d2006-09-25 23:32:49 -07001220 for (n = PAGE_SIZE / sizeof(long); n; n--)
1221 *dst++ = *src++;
1222}
1223
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001224
1225/**
1226 * safe_copy_page - check if the page we are going to copy is marked as
1227 * present in the kernel page tables (this always is the case if
1228 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
1229 * kernel_page_present() always returns 'true').
1230 */
1231static void safe_copy_page(void *dst, struct page *s_page)
1232{
1233 if (kernel_page_present(s_page)) {
1234 do_copy_page(dst, page_address(s_page));
1235 } else {
1236 kernel_map_pages(s_page, 1, 1);
1237 do_copy_page(dst, page_address(s_page));
1238 kernel_map_pages(s_page, 1, 0);
1239 }
1240}
1241
1242
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001243#ifdef CONFIG_HIGHMEM
1244static inline struct page *
1245page_is_saveable(struct zone *zone, unsigned long pfn)
1246{
1247 return is_highmem(zone) ?
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001248 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001249}
1250
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001251static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001252{
1253 struct page *s_page, *d_page;
1254 void *src, *dst;
1255
1256 s_page = pfn_to_page(src_pfn);
1257 d_page = pfn_to_page(dst_pfn);
1258 if (PageHighMem(s_page)) {
Cong Wang0de9a1e2011-11-25 23:14:38 +08001259 src = kmap_atomic(s_page);
1260 dst = kmap_atomic(d_page);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001261 do_copy_page(dst, src);
Cong Wang0de9a1e2011-11-25 23:14:38 +08001262 kunmap_atomic(dst);
1263 kunmap_atomic(src);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001264 } else {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001265 if (PageHighMem(d_page)) {
1266 /* Page pointed to by src may contain some kernel
1267 * data modified by kmap_atomic()
1268 */
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001269 safe_copy_page(buffer, s_page);
Cong Wang0de9a1e2011-11-25 23:14:38 +08001270 dst = kmap_atomic(d_page);
Jan Beulich3ecb01d2010-10-26 14:22:27 -07001271 copy_page(dst, buffer);
Cong Wang0de9a1e2011-11-25 23:14:38 +08001272 kunmap_atomic(dst);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001273 } else {
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001274 safe_copy_page(page_address(d_page), s_page);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001275 }
1276 }
1277}
1278#else
Rafael J. Wysocki846705d2008-11-26 18:00:24 -05001279#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001280
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001281static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001282{
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001283 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1284 pfn_to_page(src_pfn));
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001285}
1286#endif /* CONFIG_HIGHMEM */
1287
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001288static void
1289copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001290{
1291 struct zone *zone;
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001292 unsigned long pfn;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001293
Gerald Schaefer98e73dc2009-07-22 00:36:56 +02001294 for_each_populated_zone(zone) {
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001295 unsigned long max_zone_pfn;
1296
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001297 mark_free_pages(zone);
Xishi Qiuc33bc312013-09-11 14:21:44 -07001298 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001299 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001300 if (page_is_saveable(zone, pfn))
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001301 memory_bm_set_bit(orig_bm, pfn);
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001302 }
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001303 memory_bm_position_reset(orig_bm);
1304 memory_bm_position_reset(copy_bm);
Fengguang Wudf7c4872007-10-20 02:26:04 +02001305 for(;;) {
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001306 pfn = memory_bm_next_pfn(orig_bm);
Fengguang Wudf7c4872007-10-20 02:26:04 +02001307 if (unlikely(pfn == BM_END_OF_MAP))
1308 break;
1309 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1310 }
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001311}
1312
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001313/* Total number of image pages */
1314static unsigned int nr_copy_pages;
1315/* Number of pages needed for saving the original pfns of the image pages */
1316static unsigned int nr_meta_pages;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001317/*
1318 * Numbers of normal and highmem page frames allocated for hibernation image
1319 * before suspending devices.
1320 */
1321unsigned int alloc_normal, alloc_highmem;
1322/*
1323 * Memory bitmap used for marking saveable pages (during hibernation) or
1324 * hibernation image pages (during restore)
1325 */
1326static struct memory_bitmap orig_bm;
1327/*
1328 * Memory bitmap used during hibernation for marking allocated page frames that
1329 * will contain copies of saveable pages. During restore it is initially used
1330 * for marking hibernation image pages, but then the set bits from it are
1331 * duplicated in @orig_bm and it is released. On highmem systems it is next
1332 * used for marking "safe" highmem pages, but it has to be reinitialized for
1333 * this purpose.
1334 */
1335static struct memory_bitmap copy_bm;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001336
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001337/**
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001338 * swsusp_free - free pages allocated for the suspend.
Rafael J. Wysockicd560bb2006-09-25 23:32:50 -07001339 *
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001340 * Suspend pages are alocated before the atomic copy is made, so we
1341 * need to release them after the resume.
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001342 */
1343
1344void swsusp_free(void)
1345{
Joerg Roedelfdd64ed2014-09-30 13:31:29 +02001346 unsigned long fb_pfn, fr_pfn;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001347
Joerg Roedelfdd64ed2014-09-30 13:31:29 +02001348 if (!forbidden_pages_map || !free_pages_map)
1349 goto out;
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07001350
Joerg Roedelfdd64ed2014-09-30 13:31:29 +02001351 memory_bm_position_reset(forbidden_pages_map);
1352 memory_bm_position_reset(free_pages_map);
1353
1354loop:
1355 fr_pfn = memory_bm_next_pfn(free_pages_map);
1356 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1357
1358 /*
1359 * Find the next bit set in both bitmaps. This is guaranteed to
1360 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1361 */
1362 do {
1363 if (fb_pfn < fr_pfn)
1364 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1365 if (fr_pfn < fb_pfn)
1366 fr_pfn = memory_bm_next_pfn(free_pages_map);
1367 } while (fb_pfn != fr_pfn);
1368
1369 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1370 struct page *page = pfn_to_page(fr_pfn);
1371
1372 memory_bm_clear_current(forbidden_pages_map);
1373 memory_bm_clear_current(free_pages_map);
1374 __free_page(page);
1375 goto loop;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001376 }
Joerg Roedelfdd64ed2014-09-30 13:31:29 +02001377
1378out:
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001379 nr_copy_pages = 0;
1380 nr_meta_pages = 0;
Rafael J. Wysocki75534b52006-09-25 23:32:52 -07001381 restore_pblist = NULL;
Rafael J. Wysocki6e1819d2006-03-23 03:00:03 -08001382 buffer = NULL;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001383 alloc_normal = 0;
1384 alloc_highmem = 0;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001385}
1386
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001387/* Helper functions used for the shrinking of memory. */
Rafael J. Wysockife419532009-06-11 23:11:17 +02001388
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001389#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1390
1391/**
1392 * preallocate_image_pages - Allocate a number of pages for hibernation image
1393 * @nr_pages: Number of page frames to allocate.
1394 * @mask: GFP flags to use for the allocation.
1395 *
1396 * Return value: Number of page frames actually allocated
1397 */
1398static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
Rafael J. Wysockife419532009-06-11 23:11:17 +02001399{
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001400 unsigned long nr_alloc = 0;
1401
1402 while (nr_pages > 0) {
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001403 struct page *page;
1404
1405 page = alloc_image_page(mask);
1406 if (!page)
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001407 break;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001408 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1409 if (PageHighMem(page))
1410 alloc_highmem++;
1411 else
1412 alloc_normal++;
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001413 nr_pages--;
1414 nr_alloc++;
1415 }
1416
1417 return nr_alloc;
Rafael J. Wysockife419532009-06-11 23:11:17 +02001418}
1419
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001420static unsigned long preallocate_image_memory(unsigned long nr_pages,
1421 unsigned long avail_normal)
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001422{
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001423 unsigned long alloc;
1424
1425 if (avail_normal <= alloc_normal)
1426 return 0;
1427
1428 alloc = avail_normal - alloc_normal;
1429 if (nr_pages < alloc)
1430 alloc = nr_pages;
1431
1432 return preallocate_image_pages(alloc, GFP_IMAGE);
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001433}
1434
1435#ifdef CONFIG_HIGHMEM
1436static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1437{
1438 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1439}
1440
1441/**
1442 * __fraction - Compute (an approximation of) x * (multiplier / base)
1443 */
1444static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1445{
1446 x *= multiplier;
1447 do_div(x, base);
1448 return (unsigned long)x;
1449}
1450
1451static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1452 unsigned long highmem,
1453 unsigned long total)
1454{
1455 unsigned long alloc = __fraction(nr_pages, highmem, total);
1456
1457 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1458}
1459#else /* CONFIG_HIGHMEM */
1460static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1461{
1462 return 0;
1463}
1464
1465static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1466 unsigned long highmem,
1467 unsigned long total)
1468{
1469 return 0;
1470}
1471#endif /* CONFIG_HIGHMEM */
1472
1473/**
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001474 * free_unnecessary_pages - Release preallocated pages not needed for the image
1475 */
Wonhong Kwona64fc822015-02-03 17:22:00 +09001476static unsigned long free_unnecessary_pages(void)
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001477{
Wonhong Kwona64fc822015-02-03 17:22:00 +09001478 unsigned long save, to_free_normal, to_free_highmem, free;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001479
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001480 save = count_data_pages();
1481 if (alloc_normal >= save) {
1482 to_free_normal = alloc_normal - save;
1483 save = 0;
1484 } else {
1485 to_free_normal = 0;
1486 save -= alloc_normal;
1487 }
1488 save += count_highmem_pages();
1489 if (alloc_highmem >= save) {
1490 to_free_highmem = alloc_highmem - save;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001491 } else {
1492 to_free_highmem = 0;
Rafael J. Wysocki4d4cf232011-07-06 20:15:23 +02001493 save -= alloc_highmem;
1494 if (to_free_normal > save)
1495 to_free_normal -= save;
1496 else
1497 to_free_normal = 0;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001498 }
Wonhong Kwona64fc822015-02-03 17:22:00 +09001499 free = to_free_normal + to_free_highmem;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001500
1501 memory_bm_position_reset(&copy_bm);
1502
Rafael J. Wysockia9c9b442010-02-25 22:32:37 +01001503 while (to_free_normal > 0 || to_free_highmem > 0) {
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001504 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1505 struct page *page = pfn_to_page(pfn);
1506
1507 if (PageHighMem(page)) {
1508 if (!to_free_highmem)
1509 continue;
1510 to_free_highmem--;
1511 alloc_highmem--;
1512 } else {
1513 if (!to_free_normal)
1514 continue;
1515 to_free_normal--;
1516 alloc_normal--;
1517 }
1518 memory_bm_clear_bit(&copy_bm, pfn);
1519 swsusp_unset_page_forbidden(page);
1520 swsusp_unset_page_free(page);
1521 __free_page(page);
1522 }
Wonhong Kwona64fc822015-02-03 17:22:00 +09001523
1524 return free;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001525}
1526
1527/**
Rafael J. Wysockief4aede2009-07-08 13:24:12 +02001528 * minimum_image_size - Estimate the minimum acceptable size of an image
1529 * @saveable: Number of saveable pages in the system.
1530 *
1531 * We want to avoid attempting to free too much memory too hard, so estimate the
1532 * minimum acceptable size of a hibernation image to use as the lower limit for
1533 * preallocating memory.
1534 *
1535 * We assume that the minimum image size should be proportional to
1536 *
1537 * [number of saveable pages] - [number of pages that can be freed in theory]
1538 *
1539 * where the second term is the sum of (1) reclaimable slab pages, (2) active
Geert Uytterhoeven4d434822014-03-11 11:23:41 +01001540 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
Rafael J. Wysockief4aede2009-07-08 13:24:12 +02001541 * minus mapped file pages.
1542 */
1543static unsigned long minimum_image_size(unsigned long saveable)
1544{
1545 unsigned long size;
1546
1547 size = global_page_state(NR_SLAB_RECLAIMABLE)
1548 + global_page_state(NR_ACTIVE_ANON)
1549 + global_page_state(NR_INACTIVE_ANON)
1550 + global_page_state(NR_ACTIVE_FILE)
1551 + global_page_state(NR_INACTIVE_FILE)
1552 - global_page_state(NR_FILE_MAPPED);
1553
1554 return saveable <= size ? 0 : saveable - size;
1555}
1556
1557/**
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001558 * hibernate_preallocate_memory - Preallocate memory for hibernation image
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001559 *
1560 * To create a hibernation image it is necessary to make a copy of every page
1561 * frame in use. We also need a number of page frames to be free during
1562 * hibernation for allocations made while saving the image and for device
1563 * drivers, in case they need to allocate memory from their hibernation
Rafael J. Wysockiddeb6482011-05-15 11:38:48 +02001564 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1565 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1566 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1567 * total number of available page frames and allocate at least
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001568 *
Rafael J. Wysockiddeb6482011-05-15 11:38:48 +02001569 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1570 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001571 *
1572 * of them, which corresponds to the maximum size of a hibernation image.
1573 *
1574 * If image_size is set below the number following from the above formula,
1575 * the preallocation of memory is continued until the total number of saveable
Rafael J. Wysockief4aede2009-07-08 13:24:12 +02001576 * pages in the system is below the requested image size or the minimum
1577 * acceptable image size returned by minimum_image_size(), whichever is greater.
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001578 */
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001579int hibernate_preallocate_memory(void)
Rafael J. Wysockife419532009-06-11 23:11:17 +02001580{
Rafael J. Wysockife419532009-06-11 23:11:17 +02001581 struct zone *zone;
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001582 unsigned long saveable, size, max_size, count, highmem, pages = 0;
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001583 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
Tina Ruchandanidb597602014-10-30 11:04:53 -07001584 ktime_t start, stop;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001585 int error;
Rafael J. Wysockife419532009-06-11 23:11:17 +02001586
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001587 printk(KERN_INFO "PM: Preallocating image memory... ");
Tina Ruchandanidb597602014-10-30 11:04:53 -07001588 start = ktime_get();
Rafael J. Wysockife419532009-06-11 23:11:17 +02001589
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001590 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1591 if (error)
1592 goto err_out;
1593
1594 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1595 if (error)
1596 goto err_out;
1597
1598 alloc_normal = 0;
1599 alloc_highmem = 0;
1600
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001601 /* Count the number of saveable data pages. */
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001602 save_highmem = count_highmem_pages();
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001603 saveable = count_data_pages();
Rafael J. Wysockife419532009-06-11 23:11:17 +02001604
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001605 /*
1606 * Compute the total number of page frames we can use (count) and the
1607 * number of pages needed for image metadata (size).
1608 */
1609 count = saveable;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001610 saveable += save_highmem;
1611 highmem = save_highmem;
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001612 size = 0;
1613 for_each_populated_zone(zone) {
1614 size += snapshot_additional_pages(zone);
1615 if (is_highmem(zone))
1616 highmem += zone_page_state(zone, NR_FREE_PAGES);
1617 else
1618 count += zone_page_state(zone, NR_FREE_PAGES);
1619 }
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001620 avail_normal = count;
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001621 count += highmem;
1622 count -= totalreserve_pages;
Rafael J. Wysockife419532009-06-11 23:11:17 +02001623
Martin Schwidefsky85055dd2011-08-17 20:42:24 +02001624 /* Add number of pages required for page keys (s390 only). */
1625 size += page_key_additional_pages(saveable);
1626
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001627 /* Compute the maximum number of saveable pages to leave in memory. */
Rafael J. Wysockiddeb6482011-05-15 11:38:48 +02001628 max_size = (count - (size + PAGES_FOR_IO)) / 2
1629 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
Rafael J. Wysocki266f1a22010-09-20 19:44:38 +02001630 /* Compute the desired number of image pages specified by image_size. */
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001631 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1632 if (size > max_size)
1633 size = max_size;
1634 /*
Rafael J. Wysocki266f1a22010-09-20 19:44:38 +02001635 * If the desired number of image pages is at least as large as the
1636 * current number of saveable pages in memory, allocate page frames for
1637 * the image and we're done.
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001638 */
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001639 if (size >= saveable) {
1640 pages = preallocate_image_highmem(save_highmem);
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001641 pages += preallocate_image_memory(saveable - pages, avail_normal);
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001642 goto out;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001643 }
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001644
Rafael J. Wysockief4aede2009-07-08 13:24:12 +02001645 /* Estimate the minimum size of the image. */
1646 pages = minimum_image_size(saveable);
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001647 /*
1648 * To avoid excessive pressure on the normal zone, leave room in it to
1649 * accommodate an image of the minimum size (unless it's already too
1650 * small, in which case don't preallocate pages from it at all).
1651 */
1652 if (avail_normal > pages)
1653 avail_normal -= pages;
1654 else
1655 avail_normal = 0;
Rafael J. Wysockief4aede2009-07-08 13:24:12 +02001656 if (size < pages)
1657 size = min_t(unsigned long, pages, max_size);
1658
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001659 /*
1660 * Let the memory management subsystem know that we're going to need a
1661 * large number of page frames to allocate and make it free some memory.
1662 * NOTE: If this is not done, performance will be hurt badly in some
1663 * test cases.
1664 */
1665 shrink_all_memory(saveable - size);
1666
1667 /*
1668 * The number of saveable pages in memory was too high, so apply some
1669 * pressure to decrease it. First, make room for the largest possible
1670 * image and fail if that doesn't work. Next, try to decrease the size
Rafael J. Wysockief4aede2009-07-08 13:24:12 +02001671 * of the image as much as indicated by 'size' using allocations from
1672 * highmem and non-highmem zones separately.
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001673 */
1674 pages_highmem = preallocate_image_highmem(highmem / 2);
Aaron Lufd432b92013-11-06 08:41:31 +08001675 alloc = count - max_size;
1676 if (alloc > pages_highmem)
1677 alloc -= pages_highmem;
1678 else
1679 alloc = 0;
Rafael J. Wysocki67150452010-09-11 20:58:27 +02001680 pages = preallocate_image_memory(alloc, avail_normal);
1681 if (pages < alloc) {
1682 /* We have exhausted non-highmem pages, try highmem. */
1683 alloc -= pages;
1684 pages += pages_highmem;
1685 pages_highmem = preallocate_image_highmem(alloc);
1686 if (pages_highmem < alloc)
1687 goto err_out;
1688 pages += pages_highmem;
1689 /*
1690 * size is the desired number of saveable pages to leave in
1691 * memory, so try to preallocate (all memory - size) pages.
1692 */
1693 alloc = (count - pages) - size;
1694 pages += preallocate_image_highmem(alloc);
1695 } else {
1696 /*
1697 * There are approximately max_size saveable pages at this point
1698 * and we want to reduce this number down to size.
1699 */
1700 alloc = max_size - size;
1701 size = preallocate_highmem_fraction(alloc, highmem, count);
1702 pages_highmem += size;
1703 alloc -= size;
1704 size = preallocate_image_memory(alloc, avail_normal);
1705 pages_highmem += preallocate_image_highmem(alloc - size);
1706 pages += pages_highmem + size;
1707 }
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001708
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001709 /*
1710 * We only need as many page frames for the image as there are saveable
1711 * pages in memory, but we have allocated more. Release the excessive
1712 * ones now.
1713 */
Wonhong Kwona64fc822015-02-03 17:22:00 +09001714 pages -= free_unnecessary_pages();
Rafael J. Wysocki4bb33432009-07-08 13:23:51 +02001715
1716 out:
Tina Ruchandanidb597602014-10-30 11:04:53 -07001717 stop = ktime_get();
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001718 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
Tina Ruchandanidb597602014-10-30 11:04:53 -07001719 swsusp_show_speed(start, stop, pages, "Allocated");
Rafael J. Wysockife419532009-06-11 23:11:17 +02001720
1721 return 0;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001722
1723 err_out:
1724 printk(KERN_CONT "\n");
1725 swsusp_free();
1726 return -ENOMEM;
Rafael J. Wysockife419532009-06-11 23:11:17 +02001727}
1728
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001729#ifdef CONFIG_HIGHMEM
1730/**
1731 * count_pages_for_highmem - compute the number of non-highmem pages
1732 * that will be necessary for creating copies of highmem pages.
1733 */
1734
1735static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1736{
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001737 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001738
1739 if (free_highmem >= nr_highmem)
1740 nr_highmem = 0;
1741 else
1742 nr_highmem -= free_highmem;
1743
1744 return nr_highmem;
1745}
1746#else
1747static unsigned int
1748count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1749#endif /* CONFIG_HIGHMEM */
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001750
1751/**
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001752 * enough_free_mem - Make sure we have enough free memory for the
1753 * snapshot image.
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001754 */
1755
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001756static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001757{
Rafael J. Wysockie5e2fa72006-01-06 00:14:20 -08001758 struct zone *zone;
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001759 unsigned int free = alloc_normal;
Rafael J. Wysockie5e2fa72006-01-06 00:14:20 -08001760
Gerald Schaefer98e73dc2009-07-22 00:36:56 +02001761 for_each_populated_zone(zone)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001762 if (!is_highmem(zone))
Christoph Lameterd23ad422007-02-10 01:43:02 -08001763 free += zone_page_state(zone, NR_FREE_PAGES);
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001764
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001765 nr_pages += count_pages_for_highmem(nr_highmem);
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001766 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1767 nr_pages, PAGES_FOR_IO, free);
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001768
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001769 return free > nr_pages + PAGES_FOR_IO;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001770}
1771
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001772#ifdef CONFIG_HIGHMEM
1773/**
1774 * get_highmem_buffer - if there are some highmem pages in the suspend
1775 * image, we may need the buffer to copy them and/or load their data.
1776 */
1777
1778static inline int get_highmem_buffer(int safe_needed)
1779{
1780 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1781 return buffer ? 0 : -ENOMEM;
1782}
1783
1784/**
1785 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1786 * Try to allocate as many pages as needed, but if the number of free
1787 * highmem pages is lesser than that, allocate them all.
1788 */
1789
1790static inline unsigned int
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001791alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001792{
1793 unsigned int to_alloc = count_free_highmem_pages();
1794
1795 if (to_alloc > nr_highmem)
1796 to_alloc = nr_highmem;
1797
1798 nr_highmem -= to_alloc;
1799 while (to_alloc-- > 0) {
1800 struct page *page;
1801
Mel Gormand0164ad2015-11-06 16:28:21 -08001802 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001803 memory_bm_set_bit(bm, page_to_pfn(page));
1804 }
1805 return nr_highmem;
1806}
1807#else
1808static inline int get_highmem_buffer(int safe_needed) { return 0; }
1809
1810static inline unsigned int
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001811alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001812#endif /* CONFIG_HIGHMEM */
1813
1814/**
1815 * swsusp_alloc - allocate memory for the suspend image
1816 *
1817 * We first try to allocate as many highmem pages as there are
1818 * saveable highmem pages in the system. If that fails, we allocate
1819 * non-highmem pages for the copies of the remaining highmem ones.
1820 *
1821 * In this approach it is likely that the copies of highmem pages will
1822 * also be located in the high memory, because of the way in which
1823 * copy_data_pages() works.
1824 */
1825
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001826static int
1827swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001828 unsigned int nr_pages, unsigned int nr_highmem)
Rafael J. Wysocki054bd4c2005-11-08 21:34:39 -08001829{
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001830 if (nr_highmem > 0) {
Stanislaw Gruszka2e725a02011-02-12 21:06:51 +01001831 if (get_highmem_buffer(PG_ANY))
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001832 goto err_out;
1833 if (nr_highmem > alloc_highmem) {
1834 nr_highmem -= alloc_highmem;
1835 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1836 }
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001837 }
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001838 if (nr_pages > alloc_normal) {
1839 nr_pages -= alloc_normal;
1840 while (nr_pages-- > 0) {
1841 struct page *page;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001842
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001843 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1844 if (!page)
1845 goto err_out;
1846 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1847 }
Rafael J. Wysocki054bd4c2005-11-08 21:34:39 -08001848 }
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001849
Rafael J. Wysocki054bd4c2005-11-08 21:34:39 -08001850 return 0;
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001851
Rafael J. Wysocki64a473c2009-07-08 13:24:05 +02001852 err_out:
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001853 swsusp_free();
Stanislaw Gruszka2e725a02011-02-12 21:06:51 +01001854 return -ENOMEM;
Rafael J. Wysocki054bd4c2005-11-08 21:34:39 -08001855}
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001856
Andi Kleen722a9f92014-05-02 00:44:38 +02001857asmlinkage __visible int swsusp_save(void)
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001858{
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001859 unsigned int nr_pages, nr_highmem;
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001860
Frans Pop07c3bb52010-02-11 23:09:08 +01001861 printk(KERN_INFO "PM: Creating hibernation image:\n");
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001862
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001863 drain_local_pages(NULL);
Rafael J. Wysockia0f49652005-10-30 14:59:57 -08001864 nr_pages = count_data_pages();
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001865 nr_highmem = count_highmem_pages();
Rafael J. Wysocki23976722007-12-08 02:09:43 +01001866 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001867
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001868 if (!enough_free_mem(nr_pages, nr_highmem)) {
Rafael J. Wysocki23976722007-12-08 02:09:43 +01001869 printk(KERN_ERR "PM: Not enough free memory\n");
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001870 return -ENOMEM;
1871 }
1872
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001873 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
Rafael J. Wysocki23976722007-12-08 02:09:43 +01001874 printk(KERN_ERR "PM: Memory allocation failed\n");
Rafael J. Wysockia0f49652005-10-30 14:59:57 -08001875 return -ENOMEM;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001876 }
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001877
1878 /* During allocating of suspend pagedir, new cold pages may appear.
1879 * Kill them.
1880 */
Christoph Lameter9f8f2172008-02-04 22:29:11 -08001881 drain_local_pages(NULL);
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001882 copy_data_pages(&copy_bm, &orig_bm);
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001883
1884 /*
1885 * End of critical section. From now on, we can write to memory,
1886 * but we should not touch disk. This specially means we must _not_
1887 * touch swap space! Except we must write out our image of course.
1888 */
1889
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001890 nr_pages += nr_highmem;
Rafael J. Wysockia0f49652005-10-30 14:59:57 -08001891 nr_copy_pages = nr_pages;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001892 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
Rafael J. Wysockia0f49652005-10-30 14:59:57 -08001893
Rafael J. Wysocki23976722007-12-08 02:09:43 +01001894 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1895 nr_pages);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001896
Rafael J. Wysocki25761b62005-10-30 14:59:56 -08001897 return 0;
1898}
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001899
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07001900#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1901static int init_header_complete(struct swsusp_info *info)
1902{
1903 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1904 info->version_code = LINUX_VERSION_CODE;
1905 return 0;
1906}
1907
1908static char *check_image_kernel(struct swsusp_info *info)
1909{
1910 if (info->version_code != LINUX_VERSION_CODE)
1911 return "kernel version";
1912 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1913 return "system type";
1914 if (strcmp(info->uts.release,init_utsname()->release))
1915 return "kernel release";
1916 if (strcmp(info->uts.version,init_utsname()->version))
1917 return "version";
1918 if (strcmp(info->uts.machine,init_utsname()->machine))
1919 return "machine";
1920 return NULL;
1921}
1922#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1923
Rafael J. Wysockiaf508b32007-10-26 00:59:31 +02001924unsigned long snapshot_get_image_size(void)
1925{
1926 return nr_copy_pages + nr_meta_pages + 1;
1927}
1928
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07001929static int init_header(struct swsusp_info *info)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001930{
1931 memset(info, 0, sizeof(struct swsusp_info));
Jiang Liu0ed5fd12013-07-03 15:03:43 -07001932 info->num_physpages = get_num_physpages();
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001933 info->image_pages = nr_copy_pages;
Rafael J. Wysockiaf508b32007-10-26 00:59:31 +02001934 info->pages = snapshot_get_image_size();
Rafael J. Wysocki6e1819d2006-03-23 03:00:03 -08001935 info->size = info->pages;
1936 info->size <<= PAGE_SHIFT;
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07001937 return init_header_complete(info);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001938}
1939
1940/**
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001941 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1942 * are stored in the array @buf[] (1 page at a time)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001943 */
1944
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001945static inline void
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001946pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001947{
1948 int j;
1949
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001950 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07001951 buf[j] = memory_bm_next_pfn(bm);
1952 if (unlikely(buf[j] == BM_END_OF_MAP))
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001953 break;
Martin Schwidefsky85055dd2011-08-17 20:42:24 +02001954 /* Save page key for data page (s390 only). */
1955 page_key_read(buf + j);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001956 }
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001957}
1958
1959/**
1960 * snapshot_read_next - used for reading the system memory snapshot.
1961 *
1962 * On the first call to it @handle should point to a zeroed
1963 * snapshot_handle structure. The structure gets updated and a pointer
1964 * to it should be passed to this function every next time.
1965 *
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001966 * On success the function returns a positive number. Then, the caller
1967 * is allowed to read up to the returned number of bytes from the memory
Jiri Slabyd3c1b242010-05-01 23:52:02 +02001968 * location computed by the data_of() macro.
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001969 *
1970 * The function returns 0 to indicate the end of data stream condition,
1971 * and a negative number is returned on error. In such cases the
1972 * structure pointed to by @handle is not updated and should not be used
1973 * any more.
1974 */
1975
Jiri Slabyd3c1b242010-05-01 23:52:02 +02001976int snapshot_read_next(struct snapshot_handle *handle)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001977{
Rafael J. Wysockifb13a282006-09-25 23:32:46 -07001978 if (handle->cur > nr_meta_pages + nr_copy_pages)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001979 return 0;
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001980
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001981 if (!buffer) {
1982 /* This makes the buffer be freed by swsusp_free() */
Rafael J. Wysocki83573762006-12-06 20:34:18 -08001983 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001984 if (!buffer)
1985 return -ENOMEM;
1986 }
Jiri Slabyd3c1b242010-05-01 23:52:02 +02001987 if (!handle->cur) {
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07001988 int error;
1989
1990 error = init_header((struct swsusp_info *)buffer);
1991 if (error)
1992 return error;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001993 handle->buffer = buffer;
Rafael J. Wysockib788db72006-09-25 23:32:54 -07001994 memory_bm_position_reset(&orig_bm);
1995 memory_bm_position_reset(&copy_bm);
Jiri Slabyd3c1b242010-05-01 23:52:02 +02001996 } else if (handle->cur <= nr_meta_pages) {
Jan Beulich3ecb01d2010-10-26 14:22:27 -07001997 clear_page(buffer);
Jiri Slabyd3c1b242010-05-01 23:52:02 +02001998 pack_pfns(buffer, &orig_bm);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08001999 } else {
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002000 struct page *page;
2001
2002 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2003 if (PageHighMem(page)) {
2004 /* Highmem pages are copied to the buffer,
2005 * because we can't return with a kmapped
2006 * highmem page (we may not be called again).
2007 */
2008 void *kaddr;
2009
Cong Wang0de9a1e2011-11-25 23:14:38 +08002010 kaddr = kmap_atomic(page);
Jan Beulich3ecb01d2010-10-26 14:22:27 -07002011 copy_page(buffer, kaddr);
Cong Wang0de9a1e2011-11-25 23:14:38 +08002012 kunmap_atomic(kaddr);
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002013 handle->buffer = buffer;
2014 } else {
2015 handle->buffer = page_address(page);
2016 }
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002017 }
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002018 handle->cur++;
2019 return PAGE_SIZE;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002020}
2021
2022/**
2023 * mark_unsafe_pages - mark the pages that cannot be used for storing
2024 * the image during resume, because they conflict with the pages that
2025 * had been used before suspend
2026 */
2027
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002028static int mark_unsafe_pages(struct memory_bitmap *bm)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002029{
2030 struct zone *zone;
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07002031 unsigned long pfn, max_zone_pfn;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002032
2033 /* Clear page flags */
Gerald Schaefer98e73dc2009-07-22 00:36:56 +02002034 for_each_populated_zone(zone) {
Xishi Qiuc33bc312013-09-11 14:21:44 -07002035 max_zone_pfn = zone_end_pfn(zone);
Rafael J. Wysockiae83c5ee2006-09-25 23:32:45 -07002036 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2037 if (pfn_valid(pfn))
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002038 swsusp_unset_page_free(pfn_to_page(pfn));
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002039 }
2040
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002041 /* Mark pages that correspond to the "original" pfns as "unsafe" */
2042 memory_bm_position_reset(bm);
2043 do {
2044 pfn = memory_bm_next_pfn(bm);
2045 if (likely(pfn != BM_END_OF_MAP)) {
Rafael J. Wysockif82daee2015-04-07 01:07:39 +02002046 if (likely(pfn_valid(pfn)))
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002047 swsusp_set_page_free(pfn_to_page(pfn));
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002048 else
2049 return -EFAULT;
2050 }
2051 } while (pfn != BM_END_OF_MAP);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002052
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002053 allocated_unsafe_pages = 0;
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002054
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002055 return 0;
2056}
2057
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002058static void
2059duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002060{
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002061 unsigned long pfn;
2062
2063 memory_bm_position_reset(src);
2064 pfn = memory_bm_next_pfn(src);
2065 while (pfn != BM_END_OF_MAP) {
2066 memory_bm_set_bit(dst, pfn);
2067 pfn = memory_bm_next_pfn(src);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002068 }
2069}
2070
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07002071static int check_header(struct swsusp_info *info)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002072{
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07002073 char *reason;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002074
Rafael J. Wysockid307c4a2007-10-18 03:04:52 -07002075 reason = check_image_kernel(info);
Jiang Liu0ed5fd12013-07-03 15:03:43 -07002076 if (!reason && info->num_physpages != get_num_physpages())
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002077 reason = "memory size";
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002078 if (reason) {
Rafael J. Wysocki23976722007-12-08 02:09:43 +01002079 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002080 return -EPERM;
2081 }
2082 return 0;
2083}
2084
2085/**
2086 * load header - check the image header and copy data from it
2087 */
2088
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002089static int
2090load_header(struct swsusp_info *info)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002091{
2092 int error;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002093
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002094 restore_pblist = NULL;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002095 error = check_header(info);
2096 if (!error) {
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002097 nr_copy_pages = info->image_pages;
2098 nr_meta_pages = info->pages - info->image_pages - 1;
2099 }
2100 return error;
2101}
2102
2103/**
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002104 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2105 * the corresponding bit in the memory bitmap @bm
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002106 */
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002107static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002108{
2109 int j;
2110
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002111 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2112 if (unlikely(buf[j] == BM_END_OF_MAP))
2113 break;
2114
Martin Schwidefsky85055dd2011-08-17 20:42:24 +02002115 /* Extract and buffer page key for data page (s390 only). */
2116 page_key_memorize(buf + j);
2117
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002118 if (memory_bm_pfn_present(bm, buf[j]))
2119 memory_bm_set_bit(bm, buf[j]);
2120 else
2121 return -EFAULT;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002122 }
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002123
2124 return 0;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002125}
2126
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002127#ifdef CONFIG_HIGHMEM
2128/* struct highmem_pbe is used for creating the list of highmem pages that
2129 * should be restored atomically during the resume from disk, because the page
2130 * frames they have occupied before the suspend are in use.
2131 */
2132struct highmem_pbe {
2133 struct page *copy_page; /* data is here now */
2134 struct page *orig_page; /* data was here before the suspend */
2135 struct highmem_pbe *next;
2136};
2137
2138/* List of highmem PBEs needed for restoring the highmem pages that were
2139 * allocated before the suspend and included in the suspend image, but have
2140 * also been allocated by the "resume" kernel, so their contents cannot be
2141 * written directly to their "original" page frames.
2142 */
2143static struct highmem_pbe *highmem_pblist;
2144
2145/**
2146 * count_highmem_image_pages - compute the number of highmem pages in the
2147 * suspend image. The bits in the memory bitmap @bm that correspond to the
2148 * image pages are assumed to be set.
2149 */
2150
2151static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2152{
2153 unsigned long pfn;
2154 unsigned int cnt = 0;
2155
2156 memory_bm_position_reset(bm);
2157 pfn = memory_bm_next_pfn(bm);
2158 while (pfn != BM_END_OF_MAP) {
2159 if (PageHighMem(pfn_to_page(pfn)))
2160 cnt++;
2161
2162 pfn = memory_bm_next_pfn(bm);
2163 }
2164 return cnt;
2165}
2166
2167/**
2168 * prepare_highmem_image - try to allocate as many highmem pages as
2169 * there are highmem image pages (@nr_highmem_p points to the variable
2170 * containing the number of highmem image pages). The pages that are
2171 * "safe" (ie. will not be overwritten when the suspend image is
2172 * restored) have the corresponding bits set in @bm (it must be
2173 * unitialized).
2174 *
2175 * NOTE: This function should not be called if there are no highmem
2176 * image pages.
2177 */
2178
2179static unsigned int safe_highmem_pages;
2180
2181static struct memory_bitmap *safe_highmem_bm;
2182
2183static int
2184prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2185{
2186 unsigned int to_alloc;
2187
2188 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2189 return -ENOMEM;
2190
2191 if (get_highmem_buffer(PG_SAFE))
2192 return -ENOMEM;
2193
2194 to_alloc = count_free_highmem_pages();
2195 if (to_alloc > *nr_highmem_p)
2196 to_alloc = *nr_highmem_p;
2197 else
2198 *nr_highmem_p = to_alloc;
2199
2200 safe_highmem_pages = 0;
2201 while (to_alloc-- > 0) {
2202 struct page *page;
2203
2204 page = alloc_page(__GFP_HIGHMEM);
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002205 if (!swsusp_page_is_free(page)) {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002206 /* The page is "safe", set its bit the bitmap */
2207 memory_bm_set_bit(bm, page_to_pfn(page));
2208 safe_highmem_pages++;
2209 }
2210 /* Mark the page as allocated */
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002211 swsusp_set_page_forbidden(page);
2212 swsusp_set_page_free(page);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002213 }
2214 memory_bm_position_reset(bm);
2215 safe_highmem_bm = bm;
2216 return 0;
2217}
2218
2219/**
2220 * get_highmem_page_buffer - for given highmem image page find the buffer
2221 * that suspend_write_next() should set for its caller to write to.
2222 *
2223 * If the page is to be saved to its "original" page frame or a copy of
2224 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2225 * the copy of the page is to be made in normal memory, so the address of
2226 * the copy is returned.
2227 *
2228 * If @buffer is returned, the caller of suspend_write_next() will write
2229 * the page's contents to @buffer, so they will have to be copied to the
2230 * right location on the next call to suspend_write_next() and it is done
2231 * with the help of copy_last_highmem_page(). For this purpose, if
2232 * @buffer is returned, @last_highmem page is set to the page to which
2233 * the data will have to be copied from @buffer.
2234 */
2235
2236static struct page *last_highmem_page;
2237
2238static void *
2239get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2240{
2241 struct highmem_pbe *pbe;
2242 void *kaddr;
2243
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002244 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002245 /* We have allocated the "original" page frame and we can
2246 * use it directly to store the loaded page.
2247 */
2248 last_highmem_page = page;
2249 return buffer;
2250 }
2251 /* The "original" page frame has not been allocated and we have to
2252 * use a "safe" page frame to store the loaded page.
2253 */
2254 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2255 if (!pbe) {
2256 swsusp_free();
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002257 return ERR_PTR(-ENOMEM);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002258 }
2259 pbe->orig_page = page;
2260 if (safe_highmem_pages > 0) {
2261 struct page *tmp;
2262
2263 /* Copy of the page will be stored in high memory */
2264 kaddr = buffer;
2265 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2266 safe_highmem_pages--;
2267 last_highmem_page = tmp;
2268 pbe->copy_page = tmp;
2269 } else {
2270 /* Copy of the page will be stored in normal memory */
2271 kaddr = safe_pages_list;
2272 safe_pages_list = safe_pages_list->next;
2273 pbe->copy_page = virt_to_page(kaddr);
2274 }
2275 pbe->next = highmem_pblist;
2276 highmem_pblist = pbe;
2277 return kaddr;
2278}
2279
2280/**
2281 * copy_last_highmem_page - copy the contents of a highmem image from
2282 * @buffer, where the caller of snapshot_write_next() has place them,
2283 * to the right location represented by @last_highmem_page .
2284 */
2285
2286static void copy_last_highmem_page(void)
2287{
2288 if (last_highmem_page) {
2289 void *dst;
2290
Cong Wang0de9a1e2011-11-25 23:14:38 +08002291 dst = kmap_atomic(last_highmem_page);
Jan Beulich3ecb01d2010-10-26 14:22:27 -07002292 copy_page(dst, buffer);
Cong Wang0de9a1e2011-11-25 23:14:38 +08002293 kunmap_atomic(dst);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002294 last_highmem_page = NULL;
2295 }
2296}
2297
2298static inline int last_highmem_page_copied(void)
2299{
2300 return !last_highmem_page;
2301}
2302
2303static inline void free_highmem_data(void)
2304{
2305 if (safe_highmem_bm)
2306 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2307
2308 if (buffer)
2309 free_image_page(buffer, PG_UNSAFE_CLEAR);
2310}
2311#else
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002312static unsigned int
2313count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2314
2315static inline int
2316prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2317{
2318 return 0;
2319}
2320
2321static inline void *
2322get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2323{
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002324 return ERR_PTR(-EINVAL);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002325}
2326
2327static inline void copy_last_highmem_page(void) {}
2328static inline int last_highmem_page_copied(void) { return 1; }
2329static inline void free_highmem_data(void) {}
2330#endif /* CONFIG_HIGHMEM */
2331
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002332/**
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002333 * prepare_image - use the memory bitmap @bm to mark the pages that will
2334 * be overwritten in the process of restoring the system memory state
2335 * from the suspend image ("unsafe" pages) and allocate memory for the
2336 * image.
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002337 *
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002338 * The idea is to allocate a new memory bitmap first and then allocate
2339 * as many pages as needed for the image data, but not to assign these
2340 * pages to specific tasks initially. Instead, we just mark them as
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002341 * allocated and create a lists of "safe" pages that will be used
2342 * later. On systems with high memory a list of "safe" highmem pages is
2343 * also created.
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002344 */
2345
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002346#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002347
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002348static int
2349prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002350{
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002351 unsigned int nr_pages, nr_highmem;
Rafael J. Wysocki9c744482016-06-29 03:00:51 +02002352 struct linked_page *lp;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002353 int error;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002354
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002355 /* If there is no highmem, the buffer will not be necessary */
2356 free_image_page(buffer, PG_UNSAFE_CLEAR);
2357 buffer = NULL;
2358
2359 nr_highmem = count_highmem_image_pages(bm);
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002360 error = mark_unsafe_pages(bm);
2361 if (error)
2362 goto Free;
2363
2364 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2365 if (error)
2366 goto Free;
2367
2368 duplicate_memory_bitmap(new_bm, bm);
2369 memory_bm_free(bm, PG_UNSAFE_KEEP);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002370 if (nr_highmem > 0) {
2371 error = prepare_highmem_image(bm, &nr_highmem);
2372 if (error)
2373 goto Free;
2374 }
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002375 /* Reserve some safe pages for potential later use.
2376 *
2377 * NOTE: This way we make sure there will be enough safe pages for the
2378 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2379 * nr_copy_pages cannot be greater than 50% of the memory anyway.
Rafael J. Wysocki9c744482016-06-29 03:00:51 +02002380 *
2381 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002382 */
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002383 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002384 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2385 while (nr_pages > 0) {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002386 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002387 if (!lp) {
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002388 error = -ENOMEM;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002389 goto Free;
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002390 }
Rafael J. Wysocki9c744482016-06-29 03:00:51 +02002391 lp->next = safe_pages_list;
2392 safe_pages_list = lp;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002393 nr_pages--;
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002394 }
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002395 /* Preallocate memory for the image */
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002396 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002397 while (nr_pages > 0) {
2398 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2399 if (!lp) {
2400 error = -ENOMEM;
2401 goto Free;
2402 }
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002403 if (!swsusp_page_is_free(virt_to_page(lp))) {
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002404 /* The page is "safe", add it to the list */
2405 lp->next = safe_pages_list;
2406 safe_pages_list = lp;
2407 }
2408 /* Mark the page as allocated */
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002409 swsusp_set_page_forbidden(virt_to_page(lp));
2410 swsusp_set_page_free(virt_to_page(lp));
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002411 nr_pages--;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002412 }
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002413 return 0;
2414
Rafael J. Wysocki59a493352006-12-06 20:34:44 -08002415 Free:
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002416 swsusp_free();
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002417 return error;
2418}
2419
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002420/**
2421 * get_buffer - compute the address that snapshot_write_next() should
2422 * set for its caller to write to.
2423 */
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002424
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002425static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2426{
2427 struct pbe *pbe;
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002428 struct page *page;
2429 unsigned long pfn = memory_bm_next_pfn(bm);
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002430
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002431 if (pfn == BM_END_OF_MAP)
2432 return ERR_PTR(-EFAULT);
2433
2434 page = pfn_to_page(pfn);
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002435 if (PageHighMem(page))
2436 return get_highmem_page_buffer(page, ca);
2437
Rafael J. Wysocki7be98232007-05-06 14:50:42 -07002438 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002439 /* We have allocated the "original" page frame and we can
2440 * use it directly to store the loaded page.
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002441 */
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002442 return page_address(page);
2443
2444 /* The "original" page frame has not been allocated and we have to
2445 * use a "safe" page frame to store the loaded page.
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002446 */
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002447 pbe = chain_alloc(ca, sizeof(struct pbe));
2448 if (!pbe) {
2449 swsusp_free();
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002450 return ERR_PTR(-ENOMEM);
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002451 }
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002452 pbe->orig_address = page_address(page);
2453 pbe->address = safe_pages_list;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002454 safe_pages_list = safe_pages_list->next;
2455 pbe->next = restore_pblist;
2456 restore_pblist = pbe;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002457 return pbe->address;
Rafael J. Wysocki968808b82006-06-23 02:04:48 -07002458}
2459
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002460/**
2461 * snapshot_write_next - used for writing the system memory snapshot.
2462 *
2463 * On the first call to it @handle should point to a zeroed
2464 * snapshot_handle structure. The structure gets updated and a pointer
2465 * to it should be passed to this function every next time.
2466 *
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002467 * On success the function returns a positive number. Then, the caller
2468 * is allowed to write up to the returned number of bytes to the memory
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002469 * location computed by the data_of() macro.
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002470 *
2471 * The function returns 0 to indicate the "end of file" condition,
2472 * and a negative number is returned on error. In such cases the
2473 * structure pointed to by @handle is not updated and should not be used
2474 * any more.
2475 */
2476
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002477int snapshot_write_next(struct snapshot_handle *handle)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002478{
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002479 static struct chain_allocator ca;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002480 int error = 0;
2481
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002482 /* Check if we have already loaded the entire image */
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002483 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002484 return 0;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002485
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002486 handle->sync_read = 1;
2487
2488 if (!handle->cur) {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002489 if (!buffer)
2490 /* This makes the buffer be freed by swsusp_free() */
2491 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2492
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002493 if (!buffer)
2494 return -ENOMEM;
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002495
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002496 handle->buffer = buffer;
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002497 } else if (handle->cur == 1) {
2498 error = load_header(buffer);
2499 if (error)
2500 return error;
2501
Rafael J. Wysocki9c744482016-06-29 03:00:51 +02002502 safe_pages_list = NULL;
2503
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002504 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2505 if (error)
2506 return error;
2507
Martin Schwidefsky85055dd2011-08-17 20:42:24 +02002508 /* Allocate buffer for page keys. */
2509 error = page_key_alloc(nr_copy_pages);
2510 if (error)
2511 return error;
2512
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002513 } else if (handle->cur <= nr_meta_pages + 1) {
2514 error = unpack_orig_pfns(buffer, &copy_bm);
2515 if (error)
2516 return error;
2517
2518 if (handle->cur == nr_meta_pages + 1) {
2519 error = prepare_image(&orig_bm, &copy_bm);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002520 if (error)
2521 return error;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002522
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002523 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2524 memory_bm_position_reset(&orig_bm);
2525 restore_pblist = NULL;
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002526 handle->buffer = get_buffer(&orig_bm, &ca);
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002527 handle->sync_read = 0;
Rafael J. Wysocki69643272008-11-11 21:32:44 +01002528 if (IS_ERR(handle->buffer))
2529 return PTR_ERR(handle->buffer);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002530 }
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002531 } else {
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002532 copy_last_highmem_page();
Martin Schwidefsky85055dd2011-08-17 20:42:24 +02002533 /* Restore page key for data page (s390 only). */
2534 page_key_write(handle->buffer);
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002535 handle->buffer = get_buffer(&orig_bm, &ca);
2536 if (IS_ERR(handle->buffer))
2537 return PTR_ERR(handle->buffer);
2538 if (handle->buffer != buffer)
2539 handle->sync_read = 0;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002540 }
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002541 handle->cur++;
2542 return PAGE_SIZE;
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002543}
2544
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002545/**
2546 * snapshot_write_finalize - must be called after the last call to
2547 * snapshot_write_next() in case the last page in the image happens
2548 * to be a highmem page and its contents should be stored in the
2549 * highmem. Additionally, it releases the memory that will not be
2550 * used any more.
2551 */
2552
2553void snapshot_write_finalize(struct snapshot_handle *handle)
2554{
2555 copy_last_highmem_page();
Martin Schwidefsky85055dd2011-08-17 20:42:24 +02002556 /* Restore page key for data page (s390 only). */
2557 page_key_write(handle->buffer);
2558 page_key_free();
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002559 /* Free only if we have loaded the image entirely */
Jiri Slabyd3c1b242010-05-01 23:52:02 +02002560 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002561 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2562 free_highmem_data();
2563 }
2564}
2565
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002566int snapshot_image_loaded(struct snapshot_handle *handle)
2567{
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002568 return !(!nr_copy_pages || !last_highmem_page_copied() ||
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002569 handle->cur <= nr_meta_pages + nr_copy_pages);
2570}
2571
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002572#ifdef CONFIG_HIGHMEM
2573/* Assumes that @buf is ready and points to a "safe" page */
2574static inline void
2575swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
Rafael J. Wysocki940864d2006-09-25 23:32:55 -07002576{
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002577 void *kaddr1, *kaddr2;
2578
Cong Wang0de9a1e2011-11-25 23:14:38 +08002579 kaddr1 = kmap_atomic(p1);
2580 kaddr2 = kmap_atomic(p2);
Jan Beulich3ecb01d2010-10-26 14:22:27 -07002581 copy_page(buf, kaddr1);
2582 copy_page(kaddr1, kaddr2);
2583 copy_page(kaddr2, buf);
Cong Wang0de9a1e2011-11-25 23:14:38 +08002584 kunmap_atomic(kaddr2);
2585 kunmap_atomic(kaddr1);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -08002586}
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002587
2588/**
2589 * restore_highmem - for each highmem page that was allocated before
2590 * the suspend and included in the suspend image, and also has been
2591 * allocated by the "resume" kernel swap its current (ie. "before
2592 * resume") contents with the previous (ie. "before suspend") one.
2593 *
2594 * If the resume eventually fails, we can call this function once
2595 * again and restore the "before resume" highmem state.
2596 */
2597
2598int restore_highmem(void)
2599{
2600 struct highmem_pbe *pbe = highmem_pblist;
2601 void *buf;
2602
2603 if (!pbe)
2604 return 0;
2605
2606 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2607 if (!buf)
2608 return -ENOMEM;
2609
2610 while (pbe) {
2611 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2612 pbe = pbe->next;
2613 }
2614 free_image_page(buf, PG_UNSAFE_CLEAR);
2615 return 0;
2616}
2617#endif /* CONFIG_HIGHMEM */