blob: 0656d20d59ec8ec328c776318274e00f756e7c32 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
Sankar P5cdcd9d2009-05-12 12:41:13 +05305 * Based on earlier code written by:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/pgtable.h>
18#include <asm/mmu.h>
19#include <asm/mmu_context.h>
20#include <asm/paca.h>
21#include <asm/cputable.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110022#include <asm/cacheflush.h>
Michael Neuling2f6093c2006-08-07 16:19:19 +100023#include <asm/smp.h>
24#include <linux/compiler.h>
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +053025#include <linux/context_tracking.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010026#include <linux/mm_types.h>
27
will schmidtaa39be02007-10-30 06:24:19 +110028#include <asm/udbg.h>
Anton Blanchardb68a70c2011-04-04 23:56:18 +000029#include <asm/code-patching.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Anshuman Khandual1d150102015-08-13 17:07:54 +100031enum slb_index {
32 LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
33 VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */
34 KSTACK_INDEX = 2, /* Kernel stack map */
35};
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110036
Michael Ellermanfd88b942017-06-19 21:57:33 +100037extern void slb_allocate(unsigned long ea);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Paul Mackerras3b575062008-05-02 14:29:12 +100039#define slb_esid_mask(ssize) \
40 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
41
Paul Mackerras1189be62007-10-11 20:37:10 +100042static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
Anshuman Khandual1d150102015-08-13 17:07:54 +100043 enum slb_index index)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044{
Anshuman Khandual1d150102015-08-13 17:07:54 +100045 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
47
Paul Mackerras1189be62007-10-11 20:37:10 +100048static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
49 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
Paul Mackerras1189be62007-10-11 20:37:10 +100051 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
52 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
Paul Mackerras1189be62007-10-11 20:37:10 +100055static inline void slb_shadow_update(unsigned long ea, int ssize,
Michael Neuling67439b72007-08-03 11:55:39 +100056 unsigned long flags,
Anshuman Khandual1d150102015-08-13 17:07:54 +100057 enum slb_index index)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Michael Ellerman26cd8352015-08-13 17:11:18 +100059 struct slb_shadow *p = get_slb_shadow();
60
Michael Neuling2f6093c2006-08-07 16:19:19 +100061 /*
62 * Clear the ESID first so the entry is not valid while we are
Michael Neuling00efee72007-08-24 16:58:37 +100063 * updating it. No write barriers are needed here, provided
64 * we only update the current CPU's SLB shadow buffer.
Michael Neuling2f6093c2006-08-07 16:19:19 +100065 */
Nicholas Piggin926bc2f2018-05-30 20:31:22 +100066 WRITE_ONCE(p->save_area[index].esid, 0);
67 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
68 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
Michael Neuling2f6093c2006-08-07 16:19:19 +100069}
70
Anshuman Khandual1d150102015-08-13 17:07:54 +100071static inline void slb_shadow_clear(enum slb_index index)
Michael Neuling2f6093c2006-08-07 16:19:19 +100072{
Mahesh Salgaonkar0f52b3a2018-08-23 10:26:08 +053073 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
Paul Mackerras1189be62007-10-11 20:37:10 +100076static inline void create_shadowed_slbe(unsigned long ea, int ssize,
77 unsigned long flags,
Anshuman Khandual1d150102015-08-13 17:07:54 +100078 enum slb_index index)
Paul Mackerras175587c2007-08-25 13:14:28 +100079{
80 /*
81 * Updating the shadow buffer before writing the SLB ensures
82 * we don't get a stale entry here if we get preempted by PHYP
83 * between these two statements.
84 */
Anshuman Khandual1d150102015-08-13 17:07:54 +100085 slb_shadow_update(ea, ssize, flags, index);
Paul Mackerras175587c2007-08-25 13:14:28 +100086
87 asm volatile("slbmte %0,%1" :
Paul Mackerras1189be62007-10-11 20:37:10 +100088 : "r" (mk_vsid_data(ea, ssize, flags)),
Anshuman Khandual1d150102015-08-13 17:07:54 +100089 "r" (mk_esid_data(ea, ssize, index))
Paul Mackerras175587c2007-08-25 13:14:28 +100090 : "memory" );
91}
92
Nicholas Piggine7e81842018-08-10 16:42:48 +100093/*
94 * Insert bolted entries into SLB (which may not be empty, so don't clear
95 * slb_cache_ptr).
96 */
97void __slb_restore_bolted_realmode(void)
98{
99 struct slb_shadow *p = get_slb_shadow();
100 enum slb_index index;
101
102 /* No isync needed because realmode. */
103 for (index = 0; index < SLB_NUM_BOLTED; index++) {
104 asm volatile("slbmte %0,%1" :
105 : "r" (be64_to_cpu(p->save_area[index].vsid)),
106 "r" (be64_to_cpu(p->save_area[index].esid)));
107 }
108}
109
110/*
111 * Insert the bolted entries into an empty SLB.
112 * This is not the same as rebolt because the bolted segments are not
113 * changed, just loaded from the shadow area.
114 */
115void slb_restore_bolted_realmode(void)
116{
117 __slb_restore_bolted_realmode();
118 get_paca()->slb_cache_ptr = 0;
119}
120
121/*
122 * This flushes all SLB entries including 0, so it must be realmode.
123 */
124void slb_flush_all_realmode(void)
125{
126 /*
127 * This flushes all SLB entries including 0, so it must be realmode.
128 */
129 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
130}
131
Paul Mackerras9c1e1052009-08-17 15:17:54 +1000132static void __slb_flush_and_rebolt(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 /* If you change this make sure you change SLB_NUM_BOLTED
Alexander Grafd8d164a92014-05-15 14:38:03 +0200135 * and PR KVM appropriately too. */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000136 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
Paul Mackerras1189be62007-10-11 20:37:10 +1000137 unsigned long ksp_esid_data, ksp_vsid_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100139 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000140 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100141 lflags = SLB_VSID_KERNEL | linear_llp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000142 vflags = SLB_VSID_KERNEL | vmalloc_llp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Anshuman Khandual1d150102015-08-13 17:07:54 +1000144 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
Paul Mackerras1189be62007-10-11 20:37:10 +1000145 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 ksp_esid_data &= ~SLB_ESID_V;
Paul Mackerras1189be62007-10-11 20:37:10 +1000147 ksp_vsid_data = 0;
Anshuman Khandual1d150102015-08-13 17:07:54 +1000148 slb_shadow_clear(KSTACK_INDEX);
Paul Mackerrasedd06222007-08-10 21:04:07 +1000149 } else {
150 /* Update stack entry; others don't change */
Anshuman Khandual1d150102015-08-13 17:07:54 +1000151 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +1000152 ksp_vsid_data =
Anshuman Khandual1d150102015-08-13 17:07:54 +1000153 be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
Paul Mackerrasedd06222007-08-10 21:04:07 +1000154 }
Michael Neuling2f6093c2006-08-07 16:19:19 +1000155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 /* We need to do this all in asm, so we're sure we don't touch
157 * the stack between the slbia and rebolting it. */
158 asm volatile("isync\n"
159 "slbia\n"
160 /* Slot 1 - first VMALLOC segment */
161 "slbmte %0,%1\n"
162 /* Slot 2 - kernel stack */
163 "slbmte %2,%3\n"
164 "isync"
Paul Mackerras1189be62007-10-11 20:37:10 +1000165 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
Aneesh Kumar K.V5f812262017-04-12 10:10:22 +0530166 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
Paul Mackerras1189be62007-10-11 20:37:10 +1000167 "r"(ksp_vsid_data),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 "r"(ksp_esid_data)
169 : "memory");
170}
171
Paul Mackerras9c1e1052009-08-17 15:17:54 +1000172void slb_flush_and_rebolt(void)
173{
174
175 WARN_ON(!irqs_disabled());
176
177 /*
178 * We can't take a PMU exception in the following code, so hard
179 * disable interrupts.
180 */
181 hard_irq_disable();
182
183 __slb_flush_and_rebolt();
184 get_paca()->slb_cache_ptr = 0;
185}
186
Mahesh Salgaonkarc6d15252018-09-11 19:57:15 +0530187void slb_save_contents(struct slb_entry *slb_ptr)
188{
189 int i;
190 unsigned long e, v;
191
192 /* Save slb_cache_ptr value. */
193 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;
194
195 if (!slb_ptr)
196 return;
197
198 for (i = 0; i < mmu_slb_size; i++) {
199 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
200 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
201 slb_ptr->esid = e;
202 slb_ptr->vsid = v;
203 slb_ptr++;
204 }
205}
206
207void slb_dump_contents(struct slb_entry *slb_ptr)
208{
209 int i, n;
210 unsigned long e, v;
211 unsigned long llp;
212
213 if (!slb_ptr)
214 return;
215
216 pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
217 pr_err("Last SLB entry inserted at slot %lld\n", get_paca()->stab_rr);
218
219 for (i = 0; i < mmu_slb_size; i++) {
220 e = slb_ptr->esid;
221 v = slb_ptr->vsid;
222 slb_ptr++;
223
224 if (!e && !v)
225 continue;
226
227 pr_err("%02d %016lx %016lx\n", i, e, v);
228
229 if (!(e & SLB_ESID_V)) {
230 pr_err("\n");
231 continue;
232 }
233 llp = v & SLB_VSID_LLP;
234 if (v & SLB_VSID_B_1T) {
235 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
236 GET_ESID_1T(e),
237 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
238 } else {
239 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
240 GET_ESID(e),
241 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
242 }
243 }
244 pr_err("----------------------------------\n");
245
246 /* Dump slb cache entires as well. */
247 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
248 pr_err("Valid SLB cache entries:\n");
249 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
250 for (i = 0; i < n; i++)
251 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
252 pr_err("Rest of SLB cache entries:\n");
253 for (i = n; i < SLB_CACHE_ENTRIES; i++)
254 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
255}
256
Michael Neuling67439b72007-08-03 11:55:39 +1000257void slb_vmalloc_update(void)
258{
259 unsigned long vflags;
260
261 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
Anshuman Khandual1d150102015-08-13 17:07:54 +1000262 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
Michael Neuling67439b72007-08-03 11:55:39 +1000263 slb_flush_and_rebolt();
264}
265
will schmidt465ccab2007-10-31 05:59:33 +1100266/* Helper function to compare esids. There are four cases to handle.
267 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
268 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
269 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
270 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
271 */
272static inline int esids_match(unsigned long addr1, unsigned long addr2)
273{
274 int esid_1t_count;
275
276 /* System is not 1T segment size capable. */
Matt Evans44ae3ab2011-04-06 19:48:50 +0000277 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
will schmidt465ccab2007-10-31 05:59:33 +1100278 return (GET_ESID(addr1) == GET_ESID(addr2));
279
280 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
281 ((addr2 >> SID_SHIFT_1T) != 0));
282
283 /* both addresses are < 1T */
284 if (esid_1t_count == 0)
285 return (GET_ESID(addr1) == GET_ESID(addr2));
286
287 /* One address < 1T, the other > 1T. Not a match */
288 if (esid_1t_count == 1)
289 return 0;
290
291 /* Both addresses are > 1T. */
292 return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295/* Flush all user entries from the segment table of the current processor. */
296void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
297{
Paul Mackerras9c1e1052009-08-17 15:17:54 +1000298 unsigned long offset;
Paul Mackerras1189be62007-10-11 20:37:10 +1000299 unsigned long slbie_data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 unsigned long pc = KSTK_EIP(tsk);
301 unsigned long stack = KSTK_ESP(tsk);
Anton Blanchardde4376c2009-07-13 20:53:53 +0000302 unsigned long exec_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Paul Mackerras9c1e1052009-08-17 15:17:54 +1000304 /*
305 * We need interrupts hard-disabled here, not just soft-disabled,
306 * so that a PMU interrupt can't occur, which might try to access
307 * user memory (to get a stack trace) and possible cause an SLB miss
308 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
309 */
310 hard_irq_disable();
311 offset = get_paca()->slb_cache_ptr;
Matt Evans44ae3ab2011-04-06 19:48:50 +0000312 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
Olof Johanssonf66bce52007-10-16 00:58:59 +1000313 offset <= SLB_CACHE_ENTRIES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 int i;
315 asm volatile("isync" : : : "memory");
316 for (i = 0; i < offset; i++) {
Paul Mackerras1189be62007-10-11 20:37:10 +1000317 slbie_data = (unsigned long)get_paca()->slb_cache[i]
318 << SID_SHIFT; /* EA */
319 slbie_data |= user_segment_size(slbie_data)
320 << SLBIE_SSIZE_SHIFT;
321 slbie_data |= SLBIE_C; /* C set for user addresses */
322 asm volatile("slbie %0" : : "r" (slbie_data));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 }
324 asm volatile("isync" : : : "memory");
325 } else {
Paul Mackerras9c1e1052009-08-17 15:17:54 +1000326 __slb_flush_and_rebolt();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 }
328
Nicholas Piggin505ea822018-09-15 01:30:46 +1000329 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
330 /* Workaround POWER5 < DD2.1 issue */
331 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
332 asm volatile("slbie %0" : : "r" (slbie_data));
333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 get_paca()->slb_cache_ptr = 0;
Aneesh Kumar K.V52b1e662017-03-22 09:06:49 +0530336 copy_mm_to_paca(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 /*
339 * preload some userspace segments into the SLB.
Anton Blanchardde4376c2009-07-13 20:53:53 +0000340 * Almost all 32 and 64bit PowerPC executables are linked at
341 * 0x10000000 so it makes sense to preload this segment.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 */
Anton Blanchardde4376c2009-07-13 20:53:53 +0000343 exec_base = 0x10000000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Anton Blanchard5eb9bac2009-07-13 20:53:52 +0000345 if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
Anton Blanchardde4376c2009-07-13 20:53:53 +0000346 is_kernel_addr(exec_base))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 return;
Anton Blanchard5eb9bac2009-07-13 20:53:52 +0000348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 slb_allocate(pc);
350
Anton Blanchard5eb9bac2009-07-13 20:53:52 +0000351 if (!esids_match(pc, stack))
352 slb_allocate(stack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Anton Blanchardde4376c2009-07-13 20:53:53 +0000354 if (!esids_match(pc, exec_base) &&
355 !esids_match(stack, exec_base))
356 slb_allocate(exec_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100359static inline void patch_slb_encoding(unsigned int *insn_addr,
360 unsigned int immed)
361{
Anshuman Khandual79d0be72015-07-29 12:40:02 +0530362
363 /*
364 * This function patches either an li or a cmpldi instruction with
365 * a new immediate value. This relies on the fact that both li
366 * (which is actually addi) and cmpldi both take a 16-bit immediate
367 * value, and it is situated in the same location in the instruction,
368 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
369 * The signedness of the immediate operand differs between the two
370 * instructions however this code is only ever patching a small value,
371 * much less than 1 << 15, so we can get away with it.
372 * To patch the value we read the existing instruction, clear the
373 * immediate value, and or in our new value, then write the instruction
374 * back.
375 */
376 unsigned int insn = (*insn_addr & 0xffff0000) | immed;
Anton Blanchardb68a70c2011-04-04 23:56:18 +0000377 patch_instruction(insn_addr, insn);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100378}
379
Anton Blanchardb86206e2014-03-10 09:44:22 +1100380extern u32 slb_miss_kernel_load_linear[];
381extern u32 slb_miss_kernel_load_io[];
382extern u32 slb_compare_rr_to_size[];
383extern u32 slb_miss_kernel_load_vmemmap[];
384
Brian King46db2f82009-08-28 12:06:29 +0000385void slb_set_size(u16 size)
386{
Brian King46db2f82009-08-28 12:06:29 +0000387 if (mmu_slb_size == size)
388 return;
389
390 mmu_slb_size = size;
391 patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
392}
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394void slb_initialize(void)
395{
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000396 unsigned long linear_llp, vmalloc_llp, io_llp;
Stephen Rothwell56291e12006-11-14 12:57:38 +1100397 unsigned long lflags, vflags;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100398 static int slb_encoding_inited;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000399#ifdef CONFIG_SPARSEMEM_VMEMMAP
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000400 unsigned long vmemmap_llp;
401#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100402
403 /* Prepare our SLB miss handler based on our page size */
404 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000405 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
406 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
407 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000408#ifdef CONFIG_SPARSEMEM_VMEMMAP
409 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
410#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100411 if (!slb_encoding_inited) {
412 slb_encoding_inited = 1;
413 patch_slb_encoding(slb_miss_kernel_load_linear,
414 SLB_VSID_KERNEL | linear_llp);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000415 patch_slb_encoding(slb_miss_kernel_load_io,
416 SLB_VSID_KERNEL | io_llp);
Michael Neuling584f8b72007-12-06 17:24:48 +1100417 patch_slb_encoding(slb_compare_rr_to_size,
418 mmu_slb_size);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100419
Michael Ellerman651e2dd2009-06-17 18:13:51 +0000420 pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
421 pr_devel("SLB: io LLP = %04lx\n", io_llp);
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000422
423#ifdef CONFIG_SPARSEMEM_VMEMMAP
424 patch_slb_encoding(slb_miss_kernel_load_vmemmap,
425 SLB_VSID_KERNEL | vmemmap_llp);
Michael Ellerman651e2dd2009-06-17 18:13:51 +0000426 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000427#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100428 }
429
Nicholas Piggin09b44382018-09-15 01:30:45 +1000430 get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
Stephen Rothwell56291e12006-11-14 12:57:38 +1100431
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100432 lflags = SLB_VSID_KERNEL | linear_llp;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000433 vflags = SLB_VSID_KERNEL | vmalloc_llp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Anshuman Khandual2be682a2015-07-29 12:39:59 +0530435 /* Invalidate the entire SLB (even entry 0) & all the ERATS */
Paul Mackerras175587c2007-08-25 13:14:28 +1000436 asm volatile("isync":::"memory");
437 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
438 asm volatile("isync; slbia; isync":::"memory");
Anshuman Khandual1d150102015-08-13 17:07:54 +1000439 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
440 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100441
Paul Mackerras3b575062008-05-02 14:29:12 +1000442 /* For the boot cpu, we're running on the stack in init_thread_union,
443 * which is in the first segment of the linear mapping, and also
444 * get_paca()->kstack hasn't been initialized yet.
445 * For secondary cpus, we need to bolt the kernel stack entry now.
446 */
Anshuman Khandual1d150102015-08-13 17:07:54 +1000447 slb_shadow_clear(KSTACK_INDEX);
Paul Mackerras3b575062008-05-02 14:29:12 +1000448 if (raw_smp_processor_id() != boot_cpuid &&
449 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
450 create_shadowed_slbe(get_paca()->kstack,
Anshuman Khandual1d150102015-08-13 17:07:54 +1000451 mmu_kernel_ssize, lflags, KSTACK_INDEX);
Paul Mackerrasdfbe0d32008-01-15 17:29:33 +1100452
Paul Mackerras175587c2007-08-25 13:14:28 +1000453 asm volatile("isync":::"memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530455
456static void insert_slb_entry(unsigned long vsid, unsigned long ea,
457 int bpsize, int ssize)
458{
459 unsigned long flags, vsid_data, esid_data;
460 enum slb_index index;
461 int slb_cache_index;
462
463 /*
464 * We are irq disabled, hence should be safe to access PACA.
465 */
Aneesh Kumar K.Va5db5062018-06-01 13:54:02 +0530466 VM_WARN_ON(!irqs_disabled());
467
468 /*
469 * We can't take a PMU exception in the following code, so hard
470 * disable interrupts.
471 */
472 hard_irq_disable();
473
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530474 index = get_paca()->stab_rr;
475
476 /*
477 * simple round-robin replacement of slb starting at SLB_NUM_BOLTED.
478 */
479 if (index < (mmu_slb_size - 1))
480 index++;
481 else
482 index = SLB_NUM_BOLTED;
483
484 get_paca()->stab_rr = index;
485
486 flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
487 vsid_data = (vsid << slb_vsid_shift(ssize)) | flags |
488 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
489 esid_data = mk_esid_data(ea, ssize, index);
490
Aneesh Kumar K.Va5db5062018-06-01 13:54:02 +0530491 /*
492 * No need for an isync before or after this slbmte. The exception
493 * we enter with and the rfid we exit with are context synchronizing.
494 * Also we only handle user segments here.
495 */
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530496 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
497 : "memory");
498
499 /*
500 * Now update slb cache entries
501 */
502 slb_cache_index = get_paca()->slb_cache_ptr;
503 if (slb_cache_index < SLB_CACHE_ENTRIES) {
504 /*
505 * We have space in slb cache for optimized switch_slb().
506 * Top 36 bits from esid_data as per ISA
507 */
508 get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
509 get_paca()->slb_cache_ptr++;
510 } else {
511 /*
512 * Our cache is full and the current cache content strictly
513 * doesn't indicate the active SLB conents. Bump the ptr
514 * so that switch_slb() will ignore the cache.
515 */
516 get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
517 }
518}
519
520static void handle_multi_context_slb_miss(int context_id, unsigned long ea)
521{
522 struct mm_struct *mm = current->mm;
523 unsigned long vsid;
524 int bpsize;
525
526 /*
527 * We are always above 1TB, hence use high user segment size.
528 */
529 vsid = get_vsid(context_id, ea, mmu_highuser_ssize);
530 bpsize = get_slice_psize(mm, ea);
531 insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
532}
533
534void slb_miss_large_addr(struct pt_regs *regs)
535{
536 enum ctx_state prev_state = exception_enter();
537 unsigned long ea = regs->dar;
538 int context;
539
540 if (REGION_ID(ea) != USER_REGION_ID)
541 goto slb_bad_addr;
542
543 /*
544 * Are we beyound what the page table layout supports ?
545 */
546 if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
547 goto slb_bad_addr;
548
549 /* Lower address should have been handled by asm code */
550 if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT))
551 goto slb_bad_addr;
552
553 /*
554 * consider this as bad access if we take a SLB miss
555 * on an address above addr limit.
556 */
557 if (ea >= current->mm->context.slb_addr_limit)
558 goto slb_bad_addr;
559
560 context = get_ea_context(&current->mm->context, ea);
561 if (!context)
562 goto slb_bad_addr;
563
564 handle_multi_context_slb_miss(context, ea);
565 exception_exit(prev_state);
566 return;
567
568slb_bad_addr:
569 if (user_mode(regs))
570 _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
571 else
572 bad_page_fault(regs, ea, SIGSEGV);
573 exception_exit(prev_state);
574}