Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 1 | /* |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 2 | * Copyright IBM Corp. 2008 |
| 3 | * |
| 4 | * Guest page hinting for unused pages. |
| 5 | * |
| 6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | |
| 16 | #define ESSA_SET_STABLE 1 |
| 17 | #define ESSA_SET_UNUSED 2 |
| 18 | |
Heiko Carstens | 2ddddf3 | 2009-09-11 10:29:01 +0200 | [diff] [blame] | 19 | static int cmma_flag = 1; |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 20 | |
| 21 | static int __init cmma(char *str) |
| 22 | { |
| 23 | char *parm; |
Heiko Carstens | 2ddddf3 | 2009-09-11 10:29:01 +0200 | [diff] [blame] | 24 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 25 | parm = strstrip(str); |
| 26 | if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { |
| 27 | cmma_flag = 1; |
| 28 | return 1; |
| 29 | } |
| 30 | cmma_flag = 0; |
| 31 | if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) |
| 32 | return 1; |
| 33 | return 0; |
| 34 | } |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 35 | __setup("cmma=", cmma); |
| 36 | |
| 37 | void __init cmma_init(void) |
| 38 | { |
| 39 | register unsigned long tmp asm("0") = 0; |
| 40 | register int rc asm("1") = -EOPNOTSUPP; |
| 41 | |
| 42 | if (!cmma_flag) |
| 43 | return; |
| 44 | asm volatile( |
| 45 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" |
| 46 | "0: la %0,0\n" |
| 47 | "1:\n" |
| 48 | EX_TABLE(0b,1b) |
| 49 | : "+&d" (rc), "+&d" (tmp)); |
| 50 | if (rc) |
| 51 | cmma_flag = 0; |
| 52 | } |
| 53 | |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 54 | static inline void set_page_unstable(struct page *page, int order) |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 55 | { |
| 56 | int i, rc; |
| 57 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 58 | for (i = 0; i < (1 << order); i++) |
| 59 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" |
| 60 | : "=&d" (rc) |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 61 | : "a" (page_to_phys(page + i)), |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 62 | "i" (ESSA_SET_UNUSED)); |
| 63 | } |
| 64 | |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 65 | void arch_free_page(struct page *page, int order) |
| 66 | { |
| 67 | if (!cmma_flag) |
| 68 | return; |
| 69 | set_page_unstable(page, order); |
| 70 | } |
| 71 | |
| 72 | static inline void set_page_stable(struct page *page, int order) |
| 73 | { |
| 74 | int i, rc; |
| 75 | |
| 76 | for (i = 0; i < (1 << order); i++) |
| 77 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" |
| 78 | : "=&d" (rc) |
| 79 | : "a" (page_to_phys(page + i)), |
| 80 | "i" (ESSA_SET_STABLE)); |
| 81 | } |
| 82 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 83 | void arch_alloc_page(struct page *page, int order) |
| 84 | { |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 85 | if (!cmma_flag) |
| 86 | return; |
| 87 | set_page_stable(page, order); |
| 88 | } |
| 89 | |
| 90 | void arch_set_page_states(int make_stable) |
| 91 | { |
| 92 | unsigned long flags, order, t; |
| 93 | struct list_head *l; |
| 94 | struct page *page; |
| 95 | struct zone *zone; |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 96 | |
| 97 | if (!cmma_flag) |
| 98 | return; |
Heiko Carstens | 846955c | 2009-09-22 22:58:44 +0200 | [diff] [blame] | 99 | if (make_stable) |
| 100 | drain_local_pages(NULL); |
| 101 | for_each_populated_zone(zone) { |
| 102 | spin_lock_irqsave(&zone->lock, flags); |
| 103 | for_each_migratetype_order(order, t) { |
| 104 | list_for_each(l, &zone->free_area[order].free_list[t]) { |
| 105 | page = list_entry(l, struct page, lru); |
| 106 | if (make_stable) |
| 107 | set_page_stable(page, order); |
| 108 | else |
| 109 | set_page_unstable(page, order); |
| 110 | } |
| 111 | } |
| 112 | spin_unlock_irqrestore(&zone->lock, flags); |
| 113 | } |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 114 | } |