blob: 03ff9867a610c2ac6b31ca85b6971e1b446875c0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
Michael Ellermanf7ebf352008-04-24 15:13:19 +100022/* Enables debugging of low-level hash table routines - careful! */
23#undef DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
Benjamin Herrenschmidt463ce0e2005-11-23 17:56:06 +110027#include <linux/console.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040028#include <linux/export.h>
Anton Blanchard58995a92015-04-09 13:51:32 +100029#include <linux/jump_label.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/processor.h>
31#include <asm/mmu.h>
32#include <asm/page.h>
33#include <asm/pgtable.h>
34#include <asm/machdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/iommu.h>
37#include <asm/tlbflush.h>
38#include <asm/tlb.h>
39#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/cputable.h>
David Gibsondcad47f2005-11-07 09:49:43 +110041#include <asm/udbg.h>
Paul Mackerras2249ca92005-11-07 13:18:13 +110042#include <asm/smp.h>
Anton Blanchardc8cd0932009-10-26 18:50:29 +000043#include <asm/trace.h>
Stephen Rothwellf5339272012-03-15 18:18:00 +000044#include <asm/firmware.h>
Deepthi Dharwar212bebb2013-08-22 15:23:52 +053045#include <asm/plpar_wrappers.h>
Hari Bathinic1caae32014-12-18 23:36:55 +053046#include <asm/kexec.h>
Hari Bathini408cddd2014-10-01 12:32:30 +053047#include <asm/fadump.h>
Daniel Axtens42f5b4c2016-05-18 11:16:50 +100048#include <asm/asm-prototypes.h>
Michael Ellermana1218722005-11-03 15:33:31 +110049
Michael Ellerman21cf9132008-04-16 13:51:48 +100050#include "pseries.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +053052/* Flag bits for H_BULK_REMOVE */
53#define HBR_REQUEST 0x4000000000000000UL
54#define HBR_RESPONSE 0x8000000000000000UL
55#define HBR_END 0xc000000000000000UL
56#define HBR_AVPN 0x0200000000000000UL
57#define HBR_ANDCOND 0x0100000000000000UL
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Anton Blanchardb9377ff2006-07-19 08:01:28 +100060/* in hvCall.S */
Linus Torvalds1da177e2005-04-16 15:20:36 -070061EXPORT_SYMBOL(plpar_hcall);
Anton Blanchardb9377ff2006-07-19 08:01:28 +100062EXPORT_SYMBOL(plpar_hcall9);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063EXPORT_SYMBOL(plpar_hcall_norets);
Anton Blanchardb9377ff2006-07-19 08:01:28 +100064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065void vpa_init(int cpu)
66{
67 int hwcpu = get_hard_smp_processor_id(cpu);
Michael Neuling2f6093c2006-08-07 16:19:19 +100068 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 long ret;
Paul Mackerrascf9efce2010-08-26 19:56:43 +000070 struct paca_struct *pp;
71 struct dtl_entry *dtl;
Olof Johansson233ccd02005-09-03 15:55:59 -070072
Michael Ellermanb89bdfb2013-08-15 15:22:15 +100073 /*
74 * The spec says it "may be problematic" if CPU x registers the VPA of
75 * CPU y. We should never do that, but wail if we ever do.
76 */
77 WARN_ON(cpu != smp_processor_id());
78
Olof Johansson233ccd02005-09-03 15:55:59 -070079 if (cpu_has_feature(CPU_FTR_ALTIVEC))
Paul Mackerras8154c5d2010-08-12 20:18:15 +000080 lppaca_of(cpu).vmxregs_in_use = 1;
Olof Johansson233ccd02005-09-03 15:55:59 -070081
Michael Ellerman6e0b8bc92013-06-28 18:15:18 +100082 if (cpu_has_feature(CPU_FTR_ARCH_207S))
83 lppaca_of(cpu).ebb_regs_in_use = 1;
84
Paul Mackerras8154c5d2010-08-12 20:18:15 +000085 addr = __pa(&lppaca_of(cpu));
Michael Neuling2f6093c2006-08-07 16:19:19 +100086 ret = register_vpa(hwcpu, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Michael Neuling2f6093c2006-08-07 16:19:19 +100088 if (ret) {
Anton Blanchard711ef842011-07-25 01:46:33 +000089 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
90 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
Michael Neuling2f6093c2006-08-07 16:19:19 +100091 return;
92 }
Aneesh Kumar K.Vd8c476e2016-04-29 23:26:08 +100093
94#ifdef CONFIG_PPC_STD_MMU_64
Michael Neuling2f6093c2006-08-07 16:19:19 +100095 /*
96 * PAPR says this feature is SLB-Buffer but firmware never
97 * reports that. All SPLPAR support SLB shadow buffer.
98 */
Aneesh Kumar K.Vd8c476e2016-04-29 23:26:08 +100099 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
100 addr = __pa(paca[cpu].slb_shadow_ptr);
Michael Neuling2f6093c2006-08-07 16:19:19 +1000101 ret = register_slb_shadow(hwcpu, addr);
102 if (ret)
Anton Blanchard711ef842011-07-25 01:46:33 +0000103 pr_err("WARNING: SLB shadow buffer registration for "
104 "cpu %d (hw %d) of area %lx failed with %ld\n",
105 cpu, hwcpu, addr, ret);
Michael Neuling2f6093c2006-08-07 16:19:19 +1000106 }
Aneesh Kumar K.Vd8c476e2016-04-29 23:26:08 +1000107#endif /* CONFIG_PPC_STD_MMU_64 */
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000108
109 /*
110 * Register dispatch trace log, if one has been allocated.
111 */
112 pp = &paca[cpu];
113 dtl = pp->dispatch_log;
114 if (dtl) {
115 pp->dtl_ridx = 0;
116 pp->dtl_curr = dtl;
117 lppaca_of(cpu).dtl_idx = 0;
118
119 /* hypervisor reads buffer length from this field */
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000120 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000121 ret = register_dtl(hwcpu, __pa(dtl));
122 if (ret)
Anton Blanchard711ef842011-07-25 01:46:33 +0000123 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
124 "failed with %ld\n", smp_processor_id(),
125 hwcpu, ret);
Paul Mackerrascf9efce2010-08-26 19:56:43 +0000126 lppaca_of(cpu).dtl_enable_mask = 2;
127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Aneesh Kumar K.Vd8c476e2016-04-29 23:26:08 +1000130#ifdef CONFIG_PPC_STD_MMU_64
131
Geoff Levand035223f2006-10-05 11:35:10 -0700132static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000133 unsigned long vpn, unsigned long pa,
134 unsigned long rflags, unsigned long vflags,
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000135 int psize, int apsize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 unsigned long lpar_rc;
138 unsigned long flags;
139 unsigned long slot;
David Gibson96e28442005-07-13 01:11:42 -0700140 unsigned long hpte_v, hpte_r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100142 if (!(vflags & HPTE_V_BOLTED))
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000143 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
144 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
145 hpte_group, vpn, pa, rflags, vflags, psize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000147 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +1000148 hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100149
150 if (!(vflags & HPTE_V_BOLTED))
Michael Ellerman551a2322009-06-17 18:13:50 +0000151 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /* Now fill in the actual HPTE */
154 /* Set CEC cookie to 0 */
155 /* Zero page = 0 */
156 /* I-cache Invalidate = 0 */
157 /* I-cache synchronize = 0 */
158 /* Exact = 0 */
159 flags = 0;
160
Brian King9ee820f2011-05-04 16:01:20 +1000161 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
162 flags |= H_COALESCE_CAND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000164 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200165 if (unlikely(lpar_rc == H_PTEG_FULL)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100166 if (!(vflags & HPTE_V_BOLTED))
Michael Ellerman551a2322009-06-17 18:13:50 +0000167 pr_devel(" full\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 return -1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 /*
172 * Since we try and ioremap PHBs we don't own, the pte insert
173 * will fail. However we must catch the failure in hash_page
174 * or we will loop forever, so return -2 in this case.
175 */
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200176 if (unlikely(lpar_rc != H_SUCCESS)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100177 if (!(vflags & HPTE_V_BOLTED))
Aneesh Kumar K.V4b8f63d2013-04-28 09:37:25 +0000178 pr_devel(" lpar err %ld\n", lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return -2;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100180 }
181 if (!(vflags & HPTE_V_BOLTED))
Michael Ellerman551a2322009-06-17 18:13:50 +0000182 pr_devel(" -> slot: %lu\n", slot & 7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184 /* Because of iSeries, we have to pass down the secondary
185 * bucket bit here as well
186 */
David Gibson96e28442005-07-13 01:11:42 -0700187 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
190static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
191
192static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
193{
194 unsigned long slot_offset;
195 unsigned long lpar_rc;
196 int i;
197 unsigned long dummy1, dummy2;
198
199 /* pick a random slot to start at */
200 slot_offset = mftb() & 0x7;
201
202 for (i = 0; i < HPTES_PER_GROUP; i++) {
203
204 /* don't remove a bolted entry */
205 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
206 (0x1UL << 4), &dummy1, &dummy2);
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200207 if (lpar_rc == H_SUCCESS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return i;
Michael Wolf9fb26402013-04-05 10:41:40 +0000209
210 /*
211 * The test for adjunct partition is performed before the
212 * ANDCOND test. H_RESOURCE may be returned, so we need to
213 * check for that as well.
214 */
215 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 slot_offset++;
218 slot_offset &= 0x7;
219 }
220
221 return -1;
222}
223
224static void pSeries_lpar_hptab_clear(void)
225{
226 unsigned long size_bytes = 1UL << ppc64_pft_size;
227 unsigned long hpte_count = size_bytes >> 4;
Michael Neulingd504bed2010-05-10 20:28:26 +0000228 struct {
229 unsigned long pteh;
230 unsigned long ptel;
231 } ptes[4];
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000232 long lpar_rc;
Anton Blanchardbed9a312011-07-26 18:15:03 +0000233 unsigned long i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Michael Neulingd504bed2010-05-10 20:28:26 +0000235 /* Read in batches of 4,
236 * invalidate only valid entries not in the VRMA
237 * hpte_count will be a multiple of 4
238 */
239 for (i = 0; i < hpte_count; i += 4) {
240 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
241 if (lpar_rc != H_SUCCESS)
242 continue;
243 for (j = 0; j < 4; j++){
244 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
245 HPTE_V_VRMA_MASK)
246 continue;
247 if (ptes[j].pteh & HPTE_V_VALID)
248 plpar_pte_remove_raw(0, i + j, 0,
249 &(ptes[j].pteh), &(ptes[j].ptel));
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000250 }
251 }
Anton Blancharde844b1e2013-11-20 22:14:59 +1100252
253#ifdef __LITTLE_ENDIAN__
Hari Bathini408cddd2014-10-01 12:32:30 +0530254 /*
255 * Reset exceptions to big endian.
256 *
257 * FIXME this is a hack for kexec, we need to reset the exception
258 * endian before starting the new kernel and this is a convenient place
259 * to do it.
260 *
261 * This is also called on boot when a fadump happens. In that case we
262 * must not change the exception endian mode.
263 */
264 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
Anton Blancharde844b1e2013-11-20 22:14:59 +1100265 long rc;
266
267 rc = pseries_big_endian_exceptions();
268 /*
269 * At this point it is unlikely panic() will get anything
270 * out to the user, but at least this will stop us from
271 * continuing on further and creating an even more
272 * difficult to debug situation.
Hari Bathinic1caae32014-12-18 23:36:55 +0530273 *
274 * There is a known problem when kdump'ing, if cpus are offline
275 * the above call will fail. Rather than panicking again, keep
276 * going and hope the kdump kernel is also little endian, which
277 * it usually is.
Anton Blancharde844b1e2013-11-20 22:14:59 +1100278 */
Hari Bathinic1caae32014-12-18 23:36:55 +0530279 if (rc && !kdump_in_progress())
Anton Blancharde844b1e2013-11-20 22:14:59 +1100280 panic("Could not enable big endian exceptions");
281 }
282#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285/*
286 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
287 * the low 3 bits of flags happen to line up. So no transform is needed.
288 * We can probably optimize here and assume the high bits of newpp are
289 * already zero. For now I am paranoid.
290 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100291static long pSeries_lpar_hpte_updatepp(unsigned long slot,
292 unsigned long newpp,
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000293 unsigned long vpn,
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530294 int psize, int apsize,
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530295 int ssize, unsigned long inv_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
297 unsigned long lpar_rc;
298 unsigned long flags = (newpp & 7) | H_AVPN;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100299 unsigned long want_v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000301 want_v = hpte_encode_avpn(vpn, psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Michael Ellerman551a2322009-06-17 18:13:50 +0000303 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
Michael Ellermanf7ebf352008-04-24 15:13:19 +1000304 want_v, slot, flags, psize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Paul Mackerras1189be62007-10-11 20:37:10 +1000306 lpar_rc = plpar_pte_protect(flags, slot, want_v);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100307
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200308 if (lpar_rc == H_NOT_FOUND) {
Michael Ellerman551a2322009-06-17 18:13:50 +0000309 pr_devel("not found !\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 return -1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100311 }
312
Michael Ellerman551a2322009-06-17 18:13:50 +0000313 pr_devel("ok\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200315 BUG_ON(lpar_rc != H_SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317 return 0;
318}
319
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530320static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530322 long lpar_rc;
323 unsigned long i, j;
324 struct {
325 unsigned long pteh;
326 unsigned long ptel;
327 } ptes[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530329 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530331 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
332 if (lpar_rc != H_SUCCESS)
333 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530335 for (j = 0; j < 4; j++) {
336 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
337 (ptes[j].pteh & HPTE_V_VALID))
338 return i + j;
339 }
340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530342 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000345static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 long slot;
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530348 unsigned long hash;
349 unsigned long want_v;
350 unsigned long hpte_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000352 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
353 want_v = hpte_encode_avpn(vpn, psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Paul Mackerras1189be62007-10-11 20:37:10 +1000355 /* Bolted entries are always in the primary group */
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530356 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
357 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
358 if (slot < 0)
359 return -1;
360 return hpte_group + slot;
361}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100364 unsigned long ea,
Paul Mackerras1189be62007-10-11 20:37:10 +1000365 int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000367 unsigned long vpn;
368 unsigned long lpar_rc, slot, vsid, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Paul Mackerras1189be62007-10-11 20:37:10 +1000370 vsid = get_kernel_vsid(ea, ssize);
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000371 vpn = hpt_vpn(ea, vsid, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000373 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 BUG_ON(slot == -1);
375
376 flags = newpp & 7;
377 lpar_rc = plpar_pte_protect(flags, slot, 0);
378
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200379 BUG_ON(lpar_rc != H_SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000382static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530383 int psize, int apsize,
384 int ssize, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100386 unsigned long want_v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 unsigned long lpar_rc;
388 unsigned long dummy1, dummy2;
389
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000390 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
391 slot, vpn, psize, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000393 want_v = hpte_encode_avpn(vpn, psize, ssize);
Paul Mackerras1189be62007-10-11 20:37:10 +1000394 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200395 if (lpar_rc == H_NOT_FOUND)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return;
397
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200398 BUG_ON(lpar_rc != H_SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +0530401#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530402/*
403 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
404 * to make sure that we avoid bouncing the hypervisor tlbie lock.
405 */
406#define PPC64_HUGE_HPTE_BATCH 12
407
408static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
409 unsigned long *vpn, int count,
410 int psize, int ssize)
411{
412 unsigned long param[8];
413 int i = 0, pix = 0, rc;
414 unsigned long flags = 0;
415 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
416
417 if (lock_tlbie)
418 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
419
420 for (i = 0; i < count; i++) {
421
422 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
423 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
424 ssize, 0);
425 } else {
426 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
427 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
428 pix += 2;
429 if (pix == 8) {
430 rc = plpar_hcall9(H_BULK_REMOVE, param,
431 param[0], param[1], param[2],
432 param[3], param[4], param[5],
433 param[6], param[7]);
434 BUG_ON(rc != H_SUCCESS);
435 pix = 0;
436 }
437 }
438 }
439 if (pix) {
440 param[pix] = HBR_END;
441 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
442 param[2], param[3], param[4], param[5],
443 param[6], param[7]);
444 BUG_ON(rc != H_SUCCESS);
445 }
446
447 if (lock_tlbie)
448 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
449}
450
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530451static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
452 unsigned long addr,
453 unsigned char *hpte_slot_array,
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530454 int psize, int ssize, int local)
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530455{
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530456 int i, index = 0;
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530457 unsigned long s_addr = addr;
458 unsigned int max_hpte_count, valid;
459 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
460 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530461 unsigned long shift, hidx, vpn = 0, hash, slot;
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530462
463 shift = mmu_psize_defs[psize].shift;
464 max_hpte_count = 1U << (PMD_SHIFT - shift);
465
466 for (i = 0; i < max_hpte_count; i++) {
467 valid = hpte_valid(hpte_slot_array, i);
468 if (!valid)
469 continue;
470 hidx = hpte_hash_index(hpte_slot_array, i);
471
472 /* get the vpn */
473 addr = s_addr + (i * (1ul << shift));
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530474 vpn = hpt_vpn(addr, vsid, ssize);
475 hash = hpt_hash(vpn, shift, ssize);
476 if (hidx & _PTEIDX_SECONDARY)
477 hash = ~hash;
478
479 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
480 slot += hidx & _PTEIDX_GROUP_IX;
481
482 slot_array[index] = slot;
483 vpn_array[index] = vpn;
484 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
485 /*
486 * Now do a bluk invalidate
487 */
488 __pSeries_lpar_hugepage_invalidate(slot_array,
489 vpn_array,
490 PPC64_HUGE_HPTE_BATCH,
491 psize, ssize);
492 index = 0;
493 } else
494 index++;
495 }
496 if (index)
497 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
498 index, psize, ssize);
499}
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +0530500#else
501static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
502 unsigned long addr,
503 unsigned char *hpte_slot_array,
504 int psize, int ssize, int local)
505{
506 WARN(1, "%s called without THP support\n", __func__);
507}
508#endif
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530509
David Gibson27828f92016-02-09 13:32:41 +1000510static int pSeries_lpar_hpte_removebolted(unsigned long ea,
511 int psize, int ssize)
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100512{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000513 unsigned long vpn;
514 unsigned long slot, vsid;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100515
516 vsid = get_kernel_vsid(ea, ssize);
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000517 vpn = hpt_vpn(ea, vsid, ssize);
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100518
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000519 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
David Gibson27828f92016-02-09 13:32:41 +1000520 if (slot == -1)
521 return -ENOENT;
522
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530523 /*
524 * lpar doesn't use the passed actual page size
525 */
526 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
David Gibson27828f92016-02-09 13:32:41 +1000527 return 0;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100528}
529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530/*
531 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
532 * lock.
533 */
Geoff Levand035223f2006-10-05 11:35:10 -0700534static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000536 unsigned long vpn;
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100537 unsigned long i, pix, rc;
Paul Mackerras12e86f92007-02-08 15:02:35 +1100538 unsigned long flags = 0;
Christoph Lameter69111ba2014-10-21 15:23:25 -0500539 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
Matt Evans44ae3ab2011-04-06 19:48:50 +0000540 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100541 unsigned long param[9];
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100542 unsigned long hash, index, shift, hidx, slot;
543 real_pte_t pte;
Paul Mackerras1189be62007-10-11 20:37:10 +1000544 int psize, ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
546 if (lock_tlbie)
547 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
548
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100549 psize = batch->psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000550 ssize = batch->ssize;
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100551 pix = 0;
552 for (i = 0; i < number; i++) {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000553 vpn = batch->vpn[i];
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100554 pte = batch->pte[i];
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000555 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
556 hash = hpt_hash(vpn, shift, ssize);
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100557 hidx = __rpte_to_hidx(pte, index);
558 if (hidx & _PTEIDX_SECONDARY)
559 hash = ~hash;
560 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
561 slot += hidx & _PTEIDX_GROUP_IX;
Paul Mackerras12e86f92007-02-08 15:02:35 +1100562 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530563 /*
564 * lpar doesn't use the passed actual page size
565 */
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000566 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530567 0, ssize, local);
Paul Mackerras12e86f92007-02-08 15:02:35 +1100568 } else {
569 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000570 param[pix+1] = hpte_encode_avpn(vpn, psize,
Paul Mackerras1189be62007-10-11 20:37:10 +1000571 ssize);
Paul Mackerras12e86f92007-02-08 15:02:35 +1100572 pix += 2;
573 if (pix == 8) {
574 rc = plpar_hcall9(H_BULK_REMOVE, param,
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100575 param[0], param[1], param[2],
576 param[3], param[4], param[5],
577 param[6], param[7]);
Paul Mackerras12e86f92007-02-08 15:02:35 +1100578 BUG_ON(rc != H_SUCCESS);
579 pix = 0;
580 }
Paul Mackerrasf03e64f2007-02-06 21:10:31 +1100581 }
582 } pte_iterate_hashed_end();
583 }
584 if (pix) {
585 param[pix] = HBR_END;
586 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
587 param[2], param[3], param[4], param[5],
588 param[6], param[7]);
589 BUG_ON(rc != H_SUCCESS);
590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
592 if (lock_tlbie)
593 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
594}
595
Will Schmidt4e89a2d2010-09-28 15:33:12 +0000596static int __init disable_bulk_remove(char *str)
597{
598 if (strcmp(str, "off") == 0 &&
599 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
600 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
601 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
602 }
603 return 1;
604}
605
606__setup("bulk_remove=", disable_bulk_remove);
607
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000608void __init hpte_init_lpar(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
610 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
611 ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
612 ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
613 ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
614 ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100615 ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
617 ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530618 ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
Robert Jennings14f966e2009-04-15 05:55:32 +0000620
621#ifdef CONFIG_PPC_SMLPAR
622#define CMO_FREE_HINT_DEFAULT 1
623static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
624
625static int __init cmo_free_hint(char *str)
626{
627 char *parm;
628 parm = strstrip(str);
629
630 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
631 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
632 cmo_free_hint_flag = 0;
633 return 1;
634 }
635
636 cmo_free_hint_flag = 1;
637 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
638
639 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
640 return 1;
641
642 return 0;
643}
644
645__setup("cmo_free_hint=", cmo_free_hint);
646
647static void pSeries_set_page_state(struct page *page, int order,
648 unsigned long state)
649{
650 int i, j;
651 unsigned long cmo_page_sz, addr;
652
653 cmo_page_sz = cmo_get_page_size();
654 addr = __pa((unsigned long)page_address(page));
655
656 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
657 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
658 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
659 }
660}
661
662void arch_free_page(struct page *page, int order)
663{
Aneesh Kumar K.Vd8c476e2016-04-29 23:26:08 +1000664 if (radix_enabled())
665 return;
Robert Jennings14f966e2009-04-15 05:55:32 +0000666 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
667 return;
668
669 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
670}
671EXPORT_SYMBOL(arch_free_page);
672
Aneesh Kumar K.Vd8c476e2016-04-29 23:26:08 +1000673#endif /* CONFIG_PPC_SMLPAR */
674#endif /* CONFIG_PPC_STD_MMU_64 */
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000675
676#ifdef CONFIG_TRACEPOINTS
Zhouyi Zhoud4fe0962014-08-21 10:41:23 +0800677#ifdef HAVE_JUMP_LABEL
Anton Blanchardcc1adb52014-07-03 15:52:03 +1000678struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
679
680void hcall_tracepoint_regfunc(void)
681{
682 static_key_slow_inc(&hcall_tracepoint_key);
683}
684
685void hcall_tracepoint_unregfunc(void)
686{
687 static_key_slow_dec(&hcall_tracepoint_key);
688}
689#else
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000690/*
691 * We optimise our hcall path by placing hcall_tracepoint_refcount
692 * directly in the TOC so we can check if the hcall tracepoints are
693 * enabled via a single load.
694 */
695
696/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
697extern long hcall_tracepoint_refcount;
698
699void hcall_tracepoint_regfunc(void)
700{
701 hcall_tracepoint_refcount++;
702}
703
704void hcall_tracepoint_unregfunc(void)
705{
706 hcall_tracepoint_refcount--;
707}
Anton Blanchardcc1adb52014-07-03 15:52:03 +1000708#endif
709
710/*
711 * Since the tracing code might execute hcalls we need to guard against
712 * recursion. One example of this are spinlocks calling H_YIELD on
713 * shared processor partitions.
714 */
715static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
716
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000717
Anton Blanchard6f263532009-10-26 18:51:09 +0000718void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000719{
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000720 unsigned long flags;
721 unsigned int *depth;
722
Anton Blancharda5ccfee2012-01-09 14:29:15 +0000723 /*
724 * We cannot call tracepoints inside RCU idle regions which
725 * means we must not trace H_CEDE.
726 */
727 if (opcode == H_CEDE)
728 return;
729
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000730 local_irq_save(flags);
731
Christoph Lameter69111ba2014-10-21 15:23:25 -0500732 depth = this_cpu_ptr(&hcall_trace_depth);
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000733
734 if (*depth)
735 goto out;
736
737 (*depth)++;
Li Zhonge4f387d2011-12-18 16:03:04 +0000738 preempt_disable();
Anton Blanchard6f263532009-10-26 18:51:09 +0000739 trace_hcall_entry(opcode, args);
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000740 (*depth)--;
741
742out:
743 local_irq_restore(flags);
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000744}
745
Anton Blanchard6f263532009-10-26 18:51:09 +0000746void __trace_hcall_exit(long opcode, unsigned long retval,
747 unsigned long *retbuf)
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000748{
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000749 unsigned long flags;
750 unsigned int *depth;
751
Anton Blancharda5ccfee2012-01-09 14:29:15 +0000752 if (opcode == H_CEDE)
753 return;
754
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000755 local_irq_save(flags);
756
Christoph Lameter69111ba2014-10-21 15:23:25 -0500757 depth = this_cpu_ptr(&hcall_trace_depth);
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000758
759 if (*depth)
760 goto out;
761
762 (*depth)++;
Anton Blanchard6f263532009-10-26 18:51:09 +0000763 trace_hcall_exit(opcode, retval, retbuf);
Li Zhonge4f387d2011-12-18 16:03:04 +0000764 preempt_enable();
Anton Blanchard57cdfdf2010-10-21 00:52:12 +0000765 (*depth)--;
766
767out:
768 local_irq_restore(flags);
Anton Blanchardc8cd0932009-10-26 18:50:29 +0000769}
770#endif
Brian King9ee820f2011-05-04 16:01:20 +1000771
772/**
773 * h_get_mpp
774 * H_GET_MPP hcall returns info in 7 parms
775 */
776int h_get_mpp(struct hvcall_mpp_data *mpp_data)
777{
778 int rc;
779 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
780
781 rc = plpar_hcall9(H_GET_MPP, retbuf);
782
783 mpp_data->entitled_mem = retbuf[0];
784 mpp_data->mapped_mem = retbuf[1];
785
786 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
787 mpp_data->pool_num = retbuf[2] & 0xffff;
788
789 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
790 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
Anton Blanchardb0d436c2013-08-07 02:01:24 +1000791 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
Brian King9ee820f2011-05-04 16:01:20 +1000792
793 mpp_data->pool_size = retbuf[4];
794 mpp_data->loan_request = retbuf[5];
795 mpp_data->backing_mem = retbuf[6];
796
797 return rc;
798}
799EXPORT_SYMBOL(h_get_mpp);
800
801int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
802{
803 int rc;
804 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
805
806 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
807
808 mpp_x_data->coalesced_bytes = retbuf[0];
809 mpp_x_data->pool_coalesced_bytes = retbuf[1];
810 mpp_x_data->pool_purr_cycles = retbuf[2];
811 mpp_x_data->pool_spurr_cycles = retbuf[3];
812
813 return rc;
814}