Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * pSeries_lpar.c |
| 3 | * Copyright (C) 2001 Todd Inglett, IBM Corporation |
| 4 | * |
| 5 | * pSeries LPAR support. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; either version 2 of the License, or |
| 10 | * (at your option) any later version. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public License |
| 18 | * along with this program; if not, write to the Free Software |
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 20 | */ |
| 21 | |
Michael Ellerman | f7ebf35 | 2008-04-24 15:13:19 +1000 | [diff] [blame] | 22 | /* Enables debugging of low-level hash table routines - careful! */ |
| 23 | #undef DEBUG |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/kernel.h> |
| 26 | #include <linux/dma-mapping.h> |
Benjamin Herrenschmidt | 463ce0e | 2005-11-23 17:56:06 +1100 | [diff] [blame] | 27 | #include <linux/console.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 28 | #include <linux/export.h> |
Anton Blanchard | 58995a9 | 2015-04-09 13:51:32 +1000 | [diff] [blame] | 29 | #include <linux/jump_label.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/processor.h> |
| 31 | #include <asm/mmu.h> |
| 32 | #include <asm/page.h> |
| 33 | #include <asm/pgtable.h> |
| 34 | #include <asm/machdep.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/iommu.h> |
| 37 | #include <asm/tlbflush.h> |
| 38 | #include <asm/tlb.h> |
| 39 | #include <asm/prom.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <asm/cputable.h> |
David Gibson | dcad47f | 2005-11-07 09:49:43 +1100 | [diff] [blame] | 41 | #include <asm/udbg.h> |
Paul Mackerras | 2249ca9 | 2005-11-07 13:18:13 +1100 | [diff] [blame] | 42 | #include <asm/smp.h> |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 43 | #include <asm/trace.h> |
Stephen Rothwell | f533927 | 2012-03-15 18:18:00 +0000 | [diff] [blame] | 44 | #include <asm/firmware.h> |
Deepthi Dharwar | 212bebb | 2013-08-22 15:23:52 +0530 | [diff] [blame] | 45 | #include <asm/plpar_wrappers.h> |
Hari Bathini | c1caae3 | 2014-12-18 23:36:55 +0530 | [diff] [blame] | 46 | #include <asm/kexec.h> |
Hari Bathini | 408cddd | 2014-10-01 12:32:30 +0530 | [diff] [blame] | 47 | #include <asm/fadump.h> |
Michael Ellerman | a121872 | 2005-11-03 15:33:31 +1100 | [diff] [blame] | 48 | |
Michael Ellerman | 21cf913 | 2008-04-16 13:51:48 +1000 | [diff] [blame] | 49 | #include "pseries.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 51 | /* Flag bits for H_BULK_REMOVE */ |
| 52 | #define HBR_REQUEST 0x4000000000000000UL |
| 53 | #define HBR_RESPONSE 0x8000000000000000UL |
| 54 | #define HBR_END 0xc000000000000000UL |
| 55 | #define HBR_AVPN 0x0200000000000000UL |
| 56 | #define HBR_ANDCOND 0x0100000000000000UL |
| 57 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Anton Blanchard | b9377ff | 2006-07-19 08:01:28 +1000 | [diff] [blame] | 59 | /* in hvCall.S */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | EXPORT_SYMBOL(plpar_hcall); |
Anton Blanchard | b9377ff | 2006-07-19 08:01:28 +1000 | [diff] [blame] | 61 | EXPORT_SYMBOL(plpar_hcall9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | EXPORT_SYMBOL(plpar_hcall_norets); |
Anton Blanchard | b9377ff | 2006-07-19 08:01:28 +1000 | [diff] [blame] | 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | void vpa_init(int cpu) |
| 65 | { |
| 66 | int hwcpu = get_hard_smp_processor_id(cpu); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 67 | unsigned long addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | long ret; |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 69 | struct paca_struct *pp; |
| 70 | struct dtl_entry *dtl; |
Olof Johansson | 233ccd0 | 2005-09-03 15:55:59 -0700 | [diff] [blame] | 71 | |
Michael Ellerman | b89bdfb | 2013-08-15 15:22:15 +1000 | [diff] [blame] | 72 | /* |
| 73 | * The spec says it "may be problematic" if CPU x registers the VPA of |
| 74 | * CPU y. We should never do that, but wail if we ever do. |
| 75 | */ |
| 76 | WARN_ON(cpu != smp_processor_id()); |
| 77 | |
Olof Johansson | 233ccd0 | 2005-09-03 15:55:59 -0700 | [diff] [blame] | 78 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
Paul Mackerras | 8154c5d | 2010-08-12 20:18:15 +0000 | [diff] [blame] | 79 | lppaca_of(cpu).vmxregs_in_use = 1; |
Olof Johansson | 233ccd0 | 2005-09-03 15:55:59 -0700 | [diff] [blame] | 80 | |
Michael Ellerman | 6e0b8bc9 | 2013-06-28 18:15:18 +1000 | [diff] [blame] | 81 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 82 | lppaca_of(cpu).ebb_regs_in_use = 1; |
| 83 | |
Paul Mackerras | 8154c5d | 2010-08-12 20:18:15 +0000 | [diff] [blame] | 84 | addr = __pa(&lppaca_of(cpu)); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 85 | ret = register_vpa(hwcpu, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 87 | if (ret) { |
Anton Blanchard | 711ef84 | 2011-07-25 01:46:33 +0000 | [diff] [blame] | 88 | pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " |
| 89 | "%lx failed with %ld\n", cpu, hwcpu, addr, ret); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 90 | return; |
| 91 | } |
| 92 | /* |
| 93 | * PAPR says this feature is SLB-Buffer but firmware never |
| 94 | * reports that. All SPLPAR support SLB shadow buffer. |
| 95 | */ |
Jeremy Kerr | 1a8f6f97 | 2013-12-05 11:31:08 +0800 | [diff] [blame] | 96 | addr = __pa(paca[cpu].slb_shadow_ptr); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 97 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
| 98 | ret = register_slb_shadow(hwcpu, addr); |
| 99 | if (ret) |
Anton Blanchard | 711ef84 | 2011-07-25 01:46:33 +0000 | [diff] [blame] | 100 | pr_err("WARNING: SLB shadow buffer registration for " |
| 101 | "cpu %d (hw %d) of area %lx failed with %ld\n", |
| 102 | cpu, hwcpu, addr, ret); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 103 | } |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * Register dispatch trace log, if one has been allocated. |
| 107 | */ |
| 108 | pp = &paca[cpu]; |
| 109 | dtl = pp->dispatch_log; |
| 110 | if (dtl) { |
| 111 | pp->dtl_ridx = 0; |
| 112 | pp->dtl_curr = dtl; |
| 113 | lppaca_of(cpu).dtl_idx = 0; |
| 114 | |
| 115 | /* hypervisor reads buffer length from this field */ |
Anton Blanchard | 7ffcf8e | 2013-08-07 02:01:46 +1000 | [diff] [blame] | 116 | dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 117 | ret = register_dtl(hwcpu, __pa(dtl)); |
| 118 | if (ret) |
Anton Blanchard | 711ef84 | 2011-07-25 01:46:33 +0000 | [diff] [blame] | 119 | pr_err("WARNING: DTL registration of cpu %d (hw %d) " |
| 120 | "failed with %ld\n", smp_processor_id(), |
| 121 | hwcpu, ret); |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 122 | lppaca_of(cpu).dtl_enable_mask = 2; |
| 123 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } |
| 125 | |
Geoff Levand | 035223f | 2006-10-05 11:35:10 -0700 | [diff] [blame] | 126 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 127 | unsigned long vpn, unsigned long pa, |
| 128 | unsigned long rflags, unsigned long vflags, |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 129 | int psize, int apsize, int ssize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | unsigned long lpar_rc; |
| 132 | unsigned long flags; |
| 133 | unsigned long slot; |
David Gibson | 96e2844 | 2005-07-13 01:11:42 -0700 | [diff] [blame] | 134 | unsigned long hpte_v, hpte_r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 136 | if (!(vflags & HPTE_V_BOLTED)) |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 137 | pr_devel("hpte_insert(group=%lx, vpn=%016lx, " |
| 138 | "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", |
| 139 | hpte_group, vpn, pa, rflags, vflags, psize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 141 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
| 142 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 143 | |
| 144 | if (!(vflags & HPTE_V_BOLTED)) |
Michael Ellerman | 551a232 | 2009-06-17 18:13:50 +0000 | [diff] [blame] | 145 | pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | /* Now fill in the actual HPTE */ |
| 148 | /* Set CEC cookie to 0 */ |
| 149 | /* Zero page = 0 */ |
| 150 | /* I-cache Invalidate = 0 */ |
| 151 | /* I-cache synchronize = 0 */ |
| 152 | /* Exact = 0 */ |
| 153 | flags = 0; |
| 154 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 155 | /* Make pHyp happy */ |
Denis Kirjanov | ad92c61 | 2013-07-23 15:28:03 +0400 | [diff] [blame] | 156 | if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU)) |
Aneesh Kumar K.V | 92c08a0 | 2013-11-18 14:58:09 +0530 | [diff] [blame] | 157 | hpte_r &= ~HPTE_R_M; |
| 158 | |
Brian King | 9ee820f | 2011-05-04 16:01:20 +1000 | [diff] [blame] | 159 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) |
| 160 | flags |= H_COALESCE_CAND; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | |
Anton Blanchard | b9377ff | 2006-07-19 08:01:28 +1000 | [diff] [blame] | 162 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 163 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 164 | if (!(vflags & HPTE_V_BOLTED)) |
Michael Ellerman | 551a232 | 2009-06-17 18:13:50 +0000 | [diff] [blame] | 165 | pr_devel(" full\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | return -1; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 167 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * Since we try and ioremap PHBs we don't own, the pte insert |
| 171 | * will fail. However we must catch the failure in hash_page |
| 172 | * or we will loop forever, so return -2 in this case. |
| 173 | */ |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 174 | if (unlikely(lpar_rc != H_SUCCESS)) { |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 175 | if (!(vflags & HPTE_V_BOLTED)) |
Aneesh Kumar K.V | 4b8f63d | 2013-04-28 09:37:25 +0000 | [diff] [blame] | 176 | pr_devel(" lpar err %ld\n", lpar_rc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | return -2; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 178 | } |
| 179 | if (!(vflags & HPTE_V_BOLTED)) |
Michael Ellerman | 551a232 | 2009-06-17 18:13:50 +0000 | [diff] [blame] | 180 | pr_devel(" -> slot: %lu\n", slot & 7); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
| 182 | /* Because of iSeries, we have to pass down the secondary |
| 183 | * bucket bit here as well |
| 184 | */ |
David Gibson | 96e2844 | 2005-07-13 01:11:42 -0700 | [diff] [blame] | 185 | return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); |
| 189 | |
| 190 | static long pSeries_lpar_hpte_remove(unsigned long hpte_group) |
| 191 | { |
| 192 | unsigned long slot_offset; |
| 193 | unsigned long lpar_rc; |
| 194 | int i; |
| 195 | unsigned long dummy1, dummy2; |
| 196 | |
| 197 | /* pick a random slot to start at */ |
| 198 | slot_offset = mftb() & 0x7; |
| 199 | |
| 200 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
| 201 | |
| 202 | /* don't remove a bolted entry */ |
| 203 | lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, |
| 204 | (0x1UL << 4), &dummy1, &dummy2); |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 205 | if (lpar_rc == H_SUCCESS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | return i; |
Michael Wolf | 9fb2640 | 2013-04-05 10:41:40 +0000 | [diff] [blame] | 207 | |
| 208 | /* |
| 209 | * The test for adjunct partition is performed before the |
| 210 | * ANDCOND test. H_RESOURCE may be returned, so we need to |
| 211 | * check for that as well. |
| 212 | */ |
| 213 | BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
| 215 | slot_offset++; |
| 216 | slot_offset &= 0x7; |
| 217 | } |
| 218 | |
| 219 | return -1; |
| 220 | } |
| 221 | |
| 222 | static void pSeries_lpar_hptab_clear(void) |
| 223 | { |
| 224 | unsigned long size_bytes = 1UL << ppc64_pft_size; |
| 225 | unsigned long hpte_count = size_bytes >> 4; |
Michael Neuling | d504bed | 2010-05-10 20:28:26 +0000 | [diff] [blame] | 226 | struct { |
| 227 | unsigned long pteh; |
| 228 | unsigned long ptel; |
| 229 | } ptes[4]; |
Sachin P. Sant | b7abc5c | 2007-06-14 15:31:34 +1000 | [diff] [blame] | 230 | long lpar_rc; |
Anton Blanchard | bed9a31 | 2011-07-26 18:15:03 +0000 | [diff] [blame] | 231 | unsigned long i, j; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Michael Neuling | d504bed | 2010-05-10 20:28:26 +0000 | [diff] [blame] | 233 | /* Read in batches of 4, |
| 234 | * invalidate only valid entries not in the VRMA |
| 235 | * hpte_count will be a multiple of 4 |
| 236 | */ |
| 237 | for (i = 0; i < hpte_count; i += 4) { |
| 238 | lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); |
| 239 | if (lpar_rc != H_SUCCESS) |
| 240 | continue; |
| 241 | for (j = 0; j < 4; j++){ |
| 242 | if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == |
| 243 | HPTE_V_VRMA_MASK) |
| 244 | continue; |
| 245 | if (ptes[j].pteh & HPTE_V_VALID) |
| 246 | plpar_pte_remove_raw(0, i + j, 0, |
| 247 | &(ptes[j].pteh), &(ptes[j].ptel)); |
Sachin P. Sant | b7abc5c | 2007-06-14 15:31:34 +1000 | [diff] [blame] | 248 | } |
| 249 | } |
Anton Blanchard | e844b1e | 2013-11-20 22:14:59 +1100 | [diff] [blame] | 250 | |
| 251 | #ifdef __LITTLE_ENDIAN__ |
Hari Bathini | 408cddd | 2014-10-01 12:32:30 +0530 | [diff] [blame] | 252 | /* |
| 253 | * Reset exceptions to big endian. |
| 254 | * |
| 255 | * FIXME this is a hack for kexec, we need to reset the exception |
| 256 | * endian before starting the new kernel and this is a convenient place |
| 257 | * to do it. |
| 258 | * |
| 259 | * This is also called on boot when a fadump happens. In that case we |
| 260 | * must not change the exception endian mode. |
| 261 | */ |
| 262 | if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) { |
Anton Blanchard | e844b1e | 2013-11-20 22:14:59 +1100 | [diff] [blame] | 263 | long rc; |
| 264 | |
| 265 | rc = pseries_big_endian_exceptions(); |
| 266 | /* |
| 267 | * At this point it is unlikely panic() will get anything |
| 268 | * out to the user, but at least this will stop us from |
| 269 | * continuing on further and creating an even more |
| 270 | * difficult to debug situation. |
Hari Bathini | c1caae3 | 2014-12-18 23:36:55 +0530 | [diff] [blame] | 271 | * |
| 272 | * There is a known problem when kdump'ing, if cpus are offline |
| 273 | * the above call will fail. Rather than panicking again, keep |
| 274 | * going and hope the kdump kernel is also little endian, which |
| 275 | * it usually is. |
Anton Blanchard | e844b1e | 2013-11-20 22:14:59 +1100 | [diff] [blame] | 276 | */ |
Hari Bathini | c1caae3 | 2014-12-18 23:36:55 +0530 | [diff] [blame] | 277 | if (rc && !kdump_in_progress()) |
Anton Blanchard | e844b1e | 2013-11-20 22:14:59 +1100 | [diff] [blame] | 278 | panic("Could not enable big endian exceptions"); |
| 279 | } |
| 280 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | /* |
| 284 | * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and |
| 285 | * the low 3 bits of flags happen to line up. So no transform is needed. |
| 286 | * We can probably optimize here and assume the high bits of newpp are |
| 287 | * already zero. For now I am paranoid. |
| 288 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 289 | static long pSeries_lpar_hpte_updatepp(unsigned long slot, |
| 290 | unsigned long newpp, |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 291 | unsigned long vpn, |
Aneesh Kumar K.V | db3d853 | 2013-06-20 14:30:13 +0530 | [diff] [blame] | 292 | int psize, int apsize, |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 293 | int ssize, unsigned long inv_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | { |
| 295 | unsigned long lpar_rc; |
| 296 | unsigned long flags = (newpp & 7) | H_AVPN; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 297 | unsigned long want_v; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 299 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | |
Michael Ellerman | 551a232 | 2009-06-17 18:13:50 +0000 | [diff] [blame] | 301 | pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", |
Michael Ellerman | f7ebf35 | 2008-04-24 15:13:19 +1000 | [diff] [blame] | 302 | want_v, slot, flags, psize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 304 | lpar_rc = plpar_pte_protect(flags, slot, want_v); |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 305 | |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 306 | if (lpar_rc == H_NOT_FOUND) { |
Michael Ellerman | 551a232 | 2009-06-17 18:13:50 +0000 | [diff] [blame] | 307 | pr_devel("not found !\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | return -1; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 309 | } |
| 310 | |
Michael Ellerman | 551a232 | 2009-06-17 18:13:50 +0000 | [diff] [blame] | 311 | pr_devel("ok\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 313 | BUG_ON(lpar_rc != H_SUCCESS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | |
| 315 | return 0; |
| 316 | } |
| 317 | |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 318 | static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | { |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 320 | long lpar_rc; |
| 321 | unsigned long i, j; |
| 322 | struct { |
| 323 | unsigned long pteh; |
| 324 | unsigned long ptel; |
| 325 | } ptes[4]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 327 | for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 329 | lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); |
| 330 | if (lpar_rc != H_SUCCESS) |
| 331 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 333 | for (j = 0; j < 4; j++) { |
| 334 | if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && |
| 335 | (ptes[j].pteh & HPTE_V_VALID)) |
| 336 | return i + j; |
| 337 | } |
| 338 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 340 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
| 342 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 343 | static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | long slot; |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 346 | unsigned long hash; |
| 347 | unsigned long want_v; |
| 348 | unsigned long hpte_group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 350 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
| 351 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 353 | /* Bolted entries are always in the primary group */ |
Aneesh Kumar K.V | 4ad90c8 | 2015-12-01 09:06:59 +0530 | [diff] [blame] | 354 | hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| 355 | slot = __pSeries_lpar_hpte_find(want_v, hpte_group); |
| 356 | if (slot < 0) |
| 357 | return -1; |
| 358 | return hpte_group + slot; |
| 359 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 362 | unsigned long ea, |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 363 | int psize, int ssize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 365 | unsigned long vpn; |
| 366 | unsigned long lpar_rc, slot, vsid, flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 368 | vsid = get_kernel_vsid(ea, ssize); |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 369 | vpn = hpt_vpn(ea, vsid, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 371 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | BUG_ON(slot == -1); |
| 373 | |
| 374 | flags = newpp & 7; |
| 375 | lpar_rc = plpar_pte_protect(flags, slot, 0); |
| 376 | |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 377 | BUG_ON(lpar_rc != H_SUCCESS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | } |
| 379 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 380 | static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, |
Aneesh Kumar K.V | db3d853 | 2013-06-20 14:30:13 +0530 | [diff] [blame] | 381 | int psize, int apsize, |
| 382 | int ssize, int local) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | { |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 384 | unsigned long want_v; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | unsigned long lpar_rc; |
| 386 | unsigned long dummy1, dummy2; |
| 387 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 388 | pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", |
| 389 | slot, vpn, psize, local); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 391 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 392 | lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 393 | if (lpar_rc == H_NOT_FOUND) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | return; |
| 395 | |
Segher Boessenkool | 706c8c9 | 2006-03-30 14:49:40 +0200 | [diff] [blame] | 396 | BUG_ON(lpar_rc != H_SUCCESS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | } |
| 398 | |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 399 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 400 | /* |
| 401 | * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need |
| 402 | * to make sure that we avoid bouncing the hypervisor tlbie lock. |
| 403 | */ |
| 404 | #define PPC64_HUGE_HPTE_BATCH 12 |
| 405 | |
| 406 | static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, |
| 407 | unsigned long *vpn, int count, |
| 408 | int psize, int ssize) |
| 409 | { |
| 410 | unsigned long param[8]; |
| 411 | int i = 0, pix = 0, rc; |
| 412 | unsigned long flags = 0; |
| 413 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 414 | |
| 415 | if (lock_tlbie) |
| 416 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); |
| 417 | |
| 418 | for (i = 0; i < count; i++) { |
| 419 | |
| 420 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
| 421 | pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, |
| 422 | ssize, 0); |
| 423 | } else { |
| 424 | param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; |
| 425 | param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); |
| 426 | pix += 2; |
| 427 | if (pix == 8) { |
| 428 | rc = plpar_hcall9(H_BULK_REMOVE, param, |
| 429 | param[0], param[1], param[2], |
| 430 | param[3], param[4], param[5], |
| 431 | param[6], param[7]); |
| 432 | BUG_ON(rc != H_SUCCESS); |
| 433 | pix = 0; |
| 434 | } |
| 435 | } |
| 436 | } |
| 437 | if (pix) { |
| 438 | param[pix] = HBR_END; |
| 439 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], |
| 440 | param[2], param[3], param[4], param[5], |
| 441 | param[6], param[7]); |
| 442 | BUG_ON(rc != H_SUCCESS); |
| 443 | } |
| 444 | |
| 445 | if (lock_tlbie) |
| 446 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); |
| 447 | } |
| 448 | |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 449 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, |
| 450 | unsigned long addr, |
| 451 | unsigned char *hpte_slot_array, |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 452 | int psize, int ssize, int local) |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 453 | { |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 454 | int i, index = 0; |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 455 | unsigned long s_addr = addr; |
| 456 | unsigned int max_hpte_count, valid; |
| 457 | unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; |
| 458 | unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 459 | unsigned long shift, hidx, vpn = 0, hash, slot; |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 460 | |
| 461 | shift = mmu_psize_defs[psize].shift; |
| 462 | max_hpte_count = 1U << (PMD_SHIFT - shift); |
| 463 | |
| 464 | for (i = 0; i < max_hpte_count; i++) { |
| 465 | valid = hpte_valid(hpte_slot_array, i); |
| 466 | if (!valid) |
| 467 | continue; |
| 468 | hidx = hpte_hash_index(hpte_slot_array, i); |
| 469 | |
| 470 | /* get the vpn */ |
| 471 | addr = s_addr + (i * (1ul << shift)); |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 472 | vpn = hpt_vpn(addr, vsid, ssize); |
| 473 | hash = hpt_hash(vpn, shift, ssize); |
| 474 | if (hidx & _PTEIDX_SECONDARY) |
| 475 | hash = ~hash; |
| 476 | |
| 477 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| 478 | slot += hidx & _PTEIDX_GROUP_IX; |
| 479 | |
| 480 | slot_array[index] = slot; |
| 481 | vpn_array[index] = vpn; |
| 482 | if (index == PPC64_HUGE_HPTE_BATCH - 1) { |
| 483 | /* |
| 484 | * Now do a bluk invalidate |
| 485 | */ |
| 486 | __pSeries_lpar_hugepage_invalidate(slot_array, |
| 487 | vpn_array, |
| 488 | PPC64_HUGE_HPTE_BATCH, |
| 489 | psize, ssize); |
| 490 | index = 0; |
| 491 | } else |
| 492 | index++; |
| 493 | } |
| 494 | if (index) |
| 495 | __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, |
| 496 | index, psize, ssize); |
| 497 | } |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 498 | #else |
| 499 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, |
| 500 | unsigned long addr, |
| 501 | unsigned char *hpte_slot_array, |
| 502 | int psize, int ssize, int local) |
| 503 | { |
| 504 | WARN(1, "%s called without THP support\n", __func__); |
| 505 | } |
| 506 | #endif |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 507 | |
David Gibson | 27828f9 | 2016-02-09 13:32:41 +1000 | [diff] [blame^] | 508 | static int pSeries_lpar_hpte_removebolted(unsigned long ea, |
| 509 | int psize, int ssize) |
Badari Pulavarty | f8c8803 | 2008-01-29 09:19:24 +1100 | [diff] [blame] | 510 | { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 511 | unsigned long vpn; |
| 512 | unsigned long slot, vsid; |
Badari Pulavarty | f8c8803 | 2008-01-29 09:19:24 +1100 | [diff] [blame] | 513 | |
| 514 | vsid = get_kernel_vsid(ea, ssize); |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 515 | vpn = hpt_vpn(ea, vsid, ssize); |
Badari Pulavarty | f8c8803 | 2008-01-29 09:19:24 +1100 | [diff] [blame] | 516 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 517 | slot = pSeries_lpar_hpte_find(vpn, psize, ssize); |
David Gibson | 27828f9 | 2016-02-09 13:32:41 +1000 | [diff] [blame^] | 518 | if (slot == -1) |
| 519 | return -ENOENT; |
| 520 | |
Aneesh Kumar K.V | db3d853 | 2013-06-20 14:30:13 +0530 | [diff] [blame] | 521 | /* |
| 522 | * lpar doesn't use the passed actual page size |
| 523 | */ |
| 524 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); |
David Gibson | 27828f9 | 2016-02-09 13:32:41 +1000 | [diff] [blame^] | 525 | return 0; |
Badari Pulavarty | f8c8803 | 2008-01-29 09:19:24 +1100 | [diff] [blame] | 526 | } |
| 527 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | /* |
| 529 | * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie |
| 530 | * lock. |
| 531 | */ |
Geoff Levand | 035223f | 2006-10-05 11:35:10 -0700 | [diff] [blame] | 532 | static void pSeries_lpar_flush_hash_range(unsigned long number, int local) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 534 | unsigned long vpn; |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 535 | unsigned long i, pix, rc; |
Paul Mackerras | 12e86f9 | 2007-02-08 15:02:35 +1100 | [diff] [blame] | 536 | unsigned long flags = 0; |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 537 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
Matt Evans | 44ae3ab | 2011-04-06 19:48:50 +0000 | [diff] [blame] | 538 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 539 | unsigned long param[9]; |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 540 | unsigned long hash, index, shift, hidx, slot; |
| 541 | real_pte_t pte; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 542 | int psize, ssize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | |
| 544 | if (lock_tlbie) |
| 545 | spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); |
| 546 | |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 547 | psize = batch->psize; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 548 | ssize = batch->ssize; |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 549 | pix = 0; |
| 550 | for (i = 0; i < number; i++) { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 551 | vpn = batch->vpn[i]; |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 552 | pte = batch->pte[i]; |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 553 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
| 554 | hash = hpt_hash(vpn, shift, ssize); |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 555 | hidx = __rpte_to_hidx(pte, index); |
| 556 | if (hidx & _PTEIDX_SECONDARY) |
| 557 | hash = ~hash; |
| 558 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| 559 | slot += hidx & _PTEIDX_GROUP_IX; |
Paul Mackerras | 12e86f9 | 2007-02-08 15:02:35 +1100 | [diff] [blame] | 560 | if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
Aneesh Kumar K.V | db3d853 | 2013-06-20 14:30:13 +0530 | [diff] [blame] | 561 | /* |
| 562 | * lpar doesn't use the passed actual page size |
| 563 | */ |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 564 | pSeries_lpar_hpte_invalidate(slot, vpn, psize, |
Aneesh Kumar K.V | db3d853 | 2013-06-20 14:30:13 +0530 | [diff] [blame] | 565 | 0, ssize, local); |
Paul Mackerras | 12e86f9 | 2007-02-08 15:02:35 +1100 | [diff] [blame] | 566 | } else { |
| 567 | param[pix] = HBR_REQUEST | HBR_AVPN | slot; |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 568 | param[pix+1] = hpte_encode_avpn(vpn, psize, |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 569 | ssize); |
Paul Mackerras | 12e86f9 | 2007-02-08 15:02:35 +1100 | [diff] [blame] | 570 | pix += 2; |
| 571 | if (pix == 8) { |
| 572 | rc = plpar_hcall9(H_BULK_REMOVE, param, |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 573 | param[0], param[1], param[2], |
| 574 | param[3], param[4], param[5], |
| 575 | param[6], param[7]); |
Paul Mackerras | 12e86f9 | 2007-02-08 15:02:35 +1100 | [diff] [blame] | 576 | BUG_ON(rc != H_SUCCESS); |
| 577 | pix = 0; |
| 578 | } |
Paul Mackerras | f03e64f | 2007-02-06 21:10:31 +1100 | [diff] [blame] | 579 | } |
| 580 | } pte_iterate_hashed_end(); |
| 581 | } |
| 582 | if (pix) { |
| 583 | param[pix] = HBR_END; |
| 584 | rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], |
| 585 | param[2], param[3], param[4], param[5], |
| 586 | param[6], param[7]); |
| 587 | BUG_ON(rc != H_SUCCESS); |
| 588 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
| 590 | if (lock_tlbie) |
| 591 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); |
| 592 | } |
| 593 | |
Will Schmidt | 4e89a2d | 2010-09-28 15:33:12 +0000 | [diff] [blame] | 594 | static int __init disable_bulk_remove(char *str) |
| 595 | { |
| 596 | if (strcmp(str, "off") == 0 && |
| 597 | firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { |
| 598 | printk(KERN_INFO "Disabling BULK_REMOVE firmware feature"); |
| 599 | powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; |
| 600 | } |
| 601 | return 1; |
| 602 | } |
| 603 | |
| 604 | __setup("bulk_remove=", disable_bulk_remove); |
| 605 | |
Michael Ellerman | 7d0daae | 2006-06-23 18:16:38 +1000 | [diff] [blame] | 606 | void __init hpte_init_lpar(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | { |
| 608 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
| 609 | ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; |
| 610 | ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; |
| 611 | ppc_md.hpte_insert = pSeries_lpar_hpte_insert; |
| 612 | ppc_md.hpte_remove = pSeries_lpar_hpte_remove; |
Badari Pulavarty | f8c8803 | 2008-01-29 09:19:24 +1100 | [diff] [blame] | 613 | ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; |
| 615 | ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; |
Aneesh Kumar K.V | 1a52728 | 2013-06-20 14:30:27 +0530 | [diff] [blame] | 616 | ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | } |
Robert Jennings | 14f966e | 2009-04-15 05:55:32 +0000 | [diff] [blame] | 618 | |
| 619 | #ifdef CONFIG_PPC_SMLPAR |
| 620 | #define CMO_FREE_HINT_DEFAULT 1 |
| 621 | static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; |
| 622 | |
| 623 | static int __init cmo_free_hint(char *str) |
| 624 | { |
| 625 | char *parm; |
| 626 | parm = strstrip(str); |
| 627 | |
| 628 | if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { |
| 629 | printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n"); |
| 630 | cmo_free_hint_flag = 0; |
| 631 | return 1; |
| 632 | } |
| 633 | |
| 634 | cmo_free_hint_flag = 1; |
| 635 | printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n"); |
| 636 | |
| 637 | if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) |
| 638 | return 1; |
| 639 | |
| 640 | return 0; |
| 641 | } |
| 642 | |
| 643 | __setup("cmo_free_hint=", cmo_free_hint); |
| 644 | |
| 645 | static void pSeries_set_page_state(struct page *page, int order, |
| 646 | unsigned long state) |
| 647 | { |
| 648 | int i, j; |
| 649 | unsigned long cmo_page_sz, addr; |
| 650 | |
| 651 | cmo_page_sz = cmo_get_page_size(); |
| 652 | addr = __pa((unsigned long)page_address(page)); |
| 653 | |
| 654 | for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { |
| 655 | for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) |
| 656 | plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); |
| 657 | } |
| 658 | } |
| 659 | |
| 660 | void arch_free_page(struct page *page, int order) |
| 661 | { |
| 662 | if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) |
| 663 | return; |
| 664 | |
| 665 | pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); |
| 666 | } |
| 667 | EXPORT_SYMBOL(arch_free_page); |
| 668 | |
| 669 | #endif |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 670 | |
| 671 | #ifdef CONFIG_TRACEPOINTS |
Zhouyi Zhou | d4fe096 | 2014-08-21 10:41:23 +0800 | [diff] [blame] | 672 | #ifdef HAVE_JUMP_LABEL |
Anton Blanchard | cc1adb5 | 2014-07-03 15:52:03 +1000 | [diff] [blame] | 673 | struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; |
| 674 | |
| 675 | void hcall_tracepoint_regfunc(void) |
| 676 | { |
| 677 | static_key_slow_inc(&hcall_tracepoint_key); |
| 678 | } |
| 679 | |
| 680 | void hcall_tracepoint_unregfunc(void) |
| 681 | { |
| 682 | static_key_slow_dec(&hcall_tracepoint_key); |
| 683 | } |
| 684 | #else |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 685 | /* |
| 686 | * We optimise our hcall path by placing hcall_tracepoint_refcount |
| 687 | * directly in the TOC so we can check if the hcall tracepoints are |
| 688 | * enabled via a single load. |
| 689 | */ |
| 690 | |
| 691 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
| 692 | extern long hcall_tracepoint_refcount; |
| 693 | |
| 694 | void hcall_tracepoint_regfunc(void) |
| 695 | { |
| 696 | hcall_tracepoint_refcount++; |
| 697 | } |
| 698 | |
| 699 | void hcall_tracepoint_unregfunc(void) |
| 700 | { |
| 701 | hcall_tracepoint_refcount--; |
| 702 | } |
Anton Blanchard | cc1adb5 | 2014-07-03 15:52:03 +1000 | [diff] [blame] | 703 | #endif |
| 704 | |
| 705 | /* |
| 706 | * Since the tracing code might execute hcalls we need to guard against |
| 707 | * recursion. One example of this are spinlocks calling H_YIELD on |
| 708 | * shared processor partitions. |
| 709 | */ |
| 710 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); |
| 711 | |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 712 | |
Anton Blanchard | 6f26353 | 2009-10-26 18:51:09 +0000 | [diff] [blame] | 713 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 714 | { |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 715 | unsigned long flags; |
| 716 | unsigned int *depth; |
| 717 | |
Anton Blanchard | a5ccfee | 2012-01-09 14:29:15 +0000 | [diff] [blame] | 718 | /* |
| 719 | * We cannot call tracepoints inside RCU idle regions which |
| 720 | * means we must not trace H_CEDE. |
| 721 | */ |
| 722 | if (opcode == H_CEDE) |
| 723 | return; |
| 724 | |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 725 | local_irq_save(flags); |
| 726 | |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 727 | depth = this_cpu_ptr(&hcall_trace_depth); |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 728 | |
| 729 | if (*depth) |
| 730 | goto out; |
| 731 | |
| 732 | (*depth)++; |
Li Zhong | e4f387d | 2011-12-18 16:03:04 +0000 | [diff] [blame] | 733 | preempt_disable(); |
Anton Blanchard | 6f26353 | 2009-10-26 18:51:09 +0000 | [diff] [blame] | 734 | trace_hcall_entry(opcode, args); |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 735 | (*depth)--; |
| 736 | |
| 737 | out: |
| 738 | local_irq_restore(flags); |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 739 | } |
| 740 | |
Anton Blanchard | 6f26353 | 2009-10-26 18:51:09 +0000 | [diff] [blame] | 741 | void __trace_hcall_exit(long opcode, unsigned long retval, |
| 742 | unsigned long *retbuf) |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 743 | { |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 744 | unsigned long flags; |
| 745 | unsigned int *depth; |
| 746 | |
Anton Blanchard | a5ccfee | 2012-01-09 14:29:15 +0000 | [diff] [blame] | 747 | if (opcode == H_CEDE) |
| 748 | return; |
| 749 | |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 750 | local_irq_save(flags); |
| 751 | |
Christoph Lameter | 69111ba | 2014-10-21 15:23:25 -0500 | [diff] [blame] | 752 | depth = this_cpu_ptr(&hcall_trace_depth); |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 753 | |
| 754 | if (*depth) |
| 755 | goto out; |
| 756 | |
| 757 | (*depth)++; |
Anton Blanchard | 6f26353 | 2009-10-26 18:51:09 +0000 | [diff] [blame] | 758 | trace_hcall_exit(opcode, retval, retbuf); |
Li Zhong | e4f387d | 2011-12-18 16:03:04 +0000 | [diff] [blame] | 759 | preempt_enable(); |
Anton Blanchard | 57cdfdf | 2010-10-21 00:52:12 +0000 | [diff] [blame] | 760 | (*depth)--; |
| 761 | |
| 762 | out: |
| 763 | local_irq_restore(flags); |
Anton Blanchard | c8cd093 | 2009-10-26 18:50:29 +0000 | [diff] [blame] | 764 | } |
| 765 | #endif |
Brian King | 9ee820f | 2011-05-04 16:01:20 +1000 | [diff] [blame] | 766 | |
| 767 | /** |
| 768 | * h_get_mpp |
| 769 | * H_GET_MPP hcall returns info in 7 parms |
| 770 | */ |
| 771 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) |
| 772 | { |
| 773 | int rc; |
| 774 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; |
| 775 | |
| 776 | rc = plpar_hcall9(H_GET_MPP, retbuf); |
| 777 | |
| 778 | mpp_data->entitled_mem = retbuf[0]; |
| 779 | mpp_data->mapped_mem = retbuf[1]; |
| 780 | |
| 781 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; |
| 782 | mpp_data->pool_num = retbuf[2] & 0xffff; |
| 783 | |
| 784 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; |
| 785 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; |
Anton Blanchard | b0d436c | 2013-08-07 02:01:24 +1000 | [diff] [blame] | 786 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; |
Brian King | 9ee820f | 2011-05-04 16:01:20 +1000 | [diff] [blame] | 787 | |
| 788 | mpp_data->pool_size = retbuf[4]; |
| 789 | mpp_data->loan_request = retbuf[5]; |
| 790 | mpp_data->backing_mem = retbuf[6]; |
| 791 | |
| 792 | return rc; |
| 793 | } |
| 794 | EXPORT_SYMBOL(h_get_mpp); |
| 795 | |
| 796 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) |
| 797 | { |
| 798 | int rc; |
| 799 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; |
| 800 | |
| 801 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); |
| 802 | |
| 803 | mpp_x_data->coalesced_bytes = retbuf[0]; |
| 804 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; |
| 805 | mpp_x_data->pool_purr_cycles = retbuf[2]; |
| 806 | mpp_x_data->pool_spurr_cycles = retbuf[3]; |
| 807 | |
| 808 | return rc; |
| 809 | } |