blob: 4fec05817d660bcb95db16380745f4f209bfd62e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * native hashtable management.
3 *
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/spinlock.h>
13#include <linux/bitops.h>
14#include <linux/threads.h>
15#include <linux/smp.h>
16
17#include <asm/abs_addr.h>
18#include <asm/machdep.h>
19#include <asm/mmu.h>
20#include <asm/mmu_context.h>
21#include <asm/pgtable.h>
22#include <asm/tlbflush.h>
23#include <asm/tlb.h>
24#include <asm/cputable.h>
25
26#define HPTE_LOCK_BIT 3
27
28static DEFINE_SPINLOCK(native_tlbie_lock);
29
30static inline void native_lock_hpte(HPTE *hptep)
31{
32 unsigned long *word = &hptep->dw0.dword0;
33
34 while (1) {
35 if (!test_and_set_bit(HPTE_LOCK_BIT, word))
36 break;
37 while(test_bit(HPTE_LOCK_BIT, word))
38 cpu_relax();
39 }
40}
41
42static inline void native_unlock_hpte(HPTE *hptep)
43{
44 unsigned long *word = &hptep->dw0.dword0;
45
46 asm volatile("lwsync":::"memory");
47 clear_bit(HPTE_LOCK_BIT, word);
48}
49
50long native_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, int secondary,
52 unsigned long hpteflags, int bolted, int large)
53{
54 unsigned long arpn = physRpn_to_absRpn(prpn);
55 HPTE *hptep = htab_address + hpte_group;
56 Hpte_dword0 dw0;
57 HPTE lhpte;
58 int i;
59
60 for (i = 0; i < HPTES_PER_GROUP; i++) {
61 dw0 = hptep->dw0.dw0;
62
63 if (!dw0.v) {
64 /* retry with lock held */
65 native_lock_hpte(hptep);
66 dw0 = hptep->dw0.dw0;
67 if (!dw0.v)
68 break;
69 native_unlock_hpte(hptep);
70 }
71
72 hptep++;
73 }
74
75 if (i == HPTES_PER_GROUP)
76 return -1;
77
78 lhpte.dw1.dword1 = 0;
79 lhpte.dw1.dw1.rpn = arpn;
80 lhpte.dw1.flags.flags = hpteflags;
81
82 lhpte.dw0.dword0 = 0;
83 lhpte.dw0.dw0.avpn = va >> 23;
84 lhpte.dw0.dw0.h = secondary;
85 lhpte.dw0.dw0.bolted = bolted;
86 lhpte.dw0.dw0.v = 1;
87
88 if (large) {
89 lhpte.dw0.dw0.l = 1;
90 lhpte.dw0.dw0.avpn &= ~0x1UL;
91 }
92
93 hptep->dw1.dword1 = lhpte.dw1.dword1;
94
95 /* Guarantee the second dword is visible before the valid bit */
96 __asm__ __volatile__ ("eieio" : : : "memory");
97
98 /*
99 * Now set the first dword including the valid bit
100 * NOTE: this also unlocks the hpte
101 */
102 hptep->dw0.dword0 = lhpte.dw0.dword0;
103
104 __asm__ __volatile__ ("ptesync" : : : "memory");
105
106 return i | (secondary << 3);
107}
108
109static long native_hpte_remove(unsigned long hpte_group)
110{
111 HPTE *hptep;
112 Hpte_dword0 dw0;
113 int i;
114 int slot_offset;
115
116 /* pick a random entry to start at */
117 slot_offset = mftb() & 0x7;
118
119 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hptep = htab_address + hpte_group + slot_offset;
121 dw0 = hptep->dw0.dw0;
122
123 if (dw0.v && !dw0.bolted) {
124 /* retry with lock held */
125 native_lock_hpte(hptep);
126 dw0 = hptep->dw0.dw0;
127 if (dw0.v && !dw0.bolted)
128 break;
129 native_unlock_hpte(hptep);
130 }
131
132 slot_offset++;
133 slot_offset &= 0x7;
134 }
135
136 if (i == HPTES_PER_GROUP)
137 return -1;
138
139 /* Invalidate the hpte. NOTE: this also unlocks it */
140 hptep->dw0.dword0 = 0;
141
142 return i;
143}
144
145static inline void set_pp_bit(unsigned long pp, HPTE *addr)
146{
147 unsigned long old;
148 unsigned long *p = &addr->dw1.dword1;
149
150 __asm__ __volatile__(
151 "1: ldarx %0,0,%3\n\
152 rldimi %0,%2,0,61\n\
153 stdcx. %0,0,%3\n\
154 bne 1b"
155 : "=&r" (old), "=m" (*p)
156 : "r" (pp), "r" (p), "m" (*p)
157 : "cc");
158}
159
160/*
161 * Only works on small pages. Yes its ugly to have to check each slot in
162 * the group but we only use this during bootup.
163 */
164static long native_hpte_find(unsigned long vpn)
165{
166 HPTE *hptep;
167 unsigned long hash;
168 unsigned long i, j;
169 long slot;
170 Hpte_dword0 dw0;
171
172 hash = hpt_hash(vpn, 0);
173
174 for (j = 0; j < 2; j++) {
175 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
176 for (i = 0; i < HPTES_PER_GROUP; i++) {
177 hptep = htab_address + slot;
178 dw0 = hptep->dw0.dw0;
179
180 if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
181 (dw0.h == j)) {
182 /* HPTE matches */
183 if (j)
184 slot = -slot;
185 return slot;
186 }
187 ++slot;
188 }
189 hash = ~hash;
190 }
191
192 return -1;
193}
194
195static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
196 unsigned long va, int large, int local)
197{
198 HPTE *hptep = htab_address + slot;
199 Hpte_dword0 dw0;
200 unsigned long avpn = va >> 23;
201 int ret = 0;
202
203 if (large)
204 avpn &= ~0x1UL;
205
206 native_lock_hpte(hptep);
207
208 dw0 = hptep->dw0.dw0;
209
210 /* Even if we miss, we need to invalidate the TLB */
211 if ((dw0.avpn != avpn) || !dw0.v) {
212 native_unlock_hpte(hptep);
213 ret = -1;
214 } else {
215 set_pp_bit(newpp, hptep);
216 native_unlock_hpte(hptep);
217 }
218
219 /* Ensure it is out of the tlb too */
220 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
221 tlbiel(va);
222 } else {
223 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
224
225 if (lock_tlbie)
226 spin_lock(&native_tlbie_lock);
227 tlbie(va, large);
228 if (lock_tlbie)
229 spin_unlock(&native_tlbie_lock);
230 }
231
232 return ret;
233}
234
235/*
236 * Update the page protection bits. Intended to be used to create
237 * guard pages for kernel data structures on pages which are bolted
238 * in the HPT. Assumes pages being operated on will not be stolen.
239 * Does not work on large pages.
240 *
241 * No need to lock here because we should be the only user.
242 */
243static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
244{
245 unsigned long vsid, va, vpn, flags = 0;
246 long slot;
247 HPTE *hptep;
248 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
249
250 vsid = get_kernel_vsid(ea);
251 va = (vsid << 28) | (ea & 0x0fffffff);
252 vpn = va >> PAGE_SHIFT;
253
254 slot = native_hpte_find(vpn);
255 if (slot == -1)
256 panic("could not find page to bolt\n");
257 hptep = htab_address + slot;
258
259 set_pp_bit(newpp, hptep);
260
261 /* Ensure it is out of the tlb too */
262 if (lock_tlbie)
263 spin_lock_irqsave(&native_tlbie_lock, flags);
264 tlbie(va, 0);
265 if (lock_tlbie)
266 spin_unlock_irqrestore(&native_tlbie_lock, flags);
267}
268
269static void native_hpte_invalidate(unsigned long slot, unsigned long va,
270 int large, int local)
271{
272 HPTE *hptep = htab_address + slot;
273 Hpte_dword0 dw0;
274 unsigned long avpn = va >> 23;
275 unsigned long flags;
276 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
277
278 if (large)
279 avpn &= ~0x1UL;
280
281 local_irq_save(flags);
282 native_lock_hpte(hptep);
283
284 dw0 = hptep->dw0.dw0;
285
286 /* Even if we miss, we need to invalidate the TLB */
287 if ((dw0.avpn != avpn) || !dw0.v) {
288 native_unlock_hpte(hptep);
289 } else {
290 /* Invalidate the hpte. NOTE: this also unlocks it */
291 hptep->dw0.dword0 = 0;
292 }
293
294 /* Invalidate the tlb */
295 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
296 tlbiel(va);
297 } else {
298 if (lock_tlbie)
299 spin_lock(&native_tlbie_lock);
300 tlbie(va, large);
301 if (lock_tlbie)
302 spin_unlock(&native_tlbie_lock);
303 }
304 local_irq_restore(flags);
305}
306
R Sharadaf4c82d52005-06-25 14:58:08 -0700307/*
308 * clear all mappings on kexec. All cpus are in real mode (or they will
309 * be when they isi), and we are the only one left. We rely on our kernel
310 * mapping being 0xC0's and the hardware ignoring those two real bits.
311 *
312 * TODO: add batching support when enabled. remember, no dynamic memory here,
313 * athough there is the control page available...
314 */
315static void native_hpte_clear(void)
316{
317 unsigned long slot, slots, flags;
318 HPTE *hptep = htab_address;
319 Hpte_dword0 dw0;
320 unsigned long pteg_count;
321
322 pteg_count = htab_hash_mask + 1;
323
324 local_irq_save(flags);
325
326 /* we take the tlbie lock and hold it. Some hardware will
327 * deadlock if we try to tlbie from two processors at once.
328 */
329 spin_lock(&native_tlbie_lock);
330
331 slots = pteg_count * HPTES_PER_GROUP;
332
333 for (slot = 0; slot < slots; slot++, hptep++) {
334 /*
335 * we could lock the pte here, but we are the only cpu
336 * running, right? and for crash dump, we probably
337 * don't want to wait for a maybe bad cpu.
338 */
339 dw0 = hptep->dw0.dw0;
340
341 if (dw0.v) {
342 hptep->dw0.dword0 = 0;
343 tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l);
344 }
345 }
346
347 spin_unlock(&native_tlbie_lock);
348 local_irq_restore(flags);
349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351static void native_flush_hash_range(unsigned long context,
352 unsigned long number, int local)
353{
354 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
355 int i, j;
356 HPTE *hptep;
357 Hpte_dword0 dw0;
358 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
359
360 /* XXX fix for large ptes */
361 unsigned long large = 0;
362
363 local_irq_save(flags);
364
365 j = 0;
366 for (i = 0; i < number; i++) {
David Gibson1f8d4192005-05-05 16:15:13 -0700367 if (batch->addr[i] < KERNELBASE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 vsid = get_vsid(context, batch->addr[i]);
369 else
370 vsid = get_kernel_vsid(batch->addr[i]);
371
372 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
373 batch->vaddr[j] = va;
374 if (large)
375 vpn = va >> HPAGE_SHIFT;
376 else
377 vpn = va >> PAGE_SHIFT;
378 hash = hpt_hash(vpn, large);
379 secondary = (pte_val(batch->pte[i]) & _PAGE_SECONDARY) >> 15;
380 if (secondary)
381 hash = ~hash;
382 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
383 slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
384
385 hptep = htab_address + slot;
386
387 avpn = va >> 23;
388 if (large)
389 avpn &= ~0x1UL;
390
391 native_lock_hpte(hptep);
392
393 dw0 = hptep->dw0.dw0;
394
395 /* Even if we miss, we need to invalidate the TLB */
396 if ((dw0.avpn != avpn) || !dw0.v) {
397 native_unlock_hpte(hptep);
398 } else {
399 /* Invalidate the hpte. NOTE: this also unlocks it */
400 hptep->dw0.dword0 = 0;
401 }
402
403 j++;
404 }
405
406 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
407 asm volatile("ptesync":::"memory");
408
409 for (i = 0; i < j; i++)
410 __tlbiel(batch->vaddr[i]);
411
412 asm volatile("ptesync":::"memory");
413 } else {
414 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
415
416 if (lock_tlbie)
417 spin_lock(&native_tlbie_lock);
418
419 asm volatile("ptesync":::"memory");
420
421 for (i = 0; i < j; i++)
422 __tlbie(batch->vaddr[i], 0);
423
424 asm volatile("eieio; tlbsync; ptesync":::"memory");
425
426 if (lock_tlbie)
427 spin_unlock(&native_tlbie_lock);
428 }
429
430 local_irq_restore(flags);
431}
432
433#ifdef CONFIG_PPC_PSERIES
434/* Disable TLB batching on nighthawk */
435static inline int tlb_batching_enabled(void)
436{
437 struct device_node *root = of_find_node_by_path("/");
438 int enabled = 1;
439
440 if (root) {
441 const char *model = get_property(root, "model", NULL);
442 if (model && !strcmp(model, "IBM,9076-N81"))
443 enabled = 0;
444 of_node_put(root);
445 }
446
447 return enabled;
448}
449#else
450static inline int tlb_batching_enabled(void)
451{
452 return 1;
453}
454#endif
455
456void hpte_init_native(void)
457{
458 ppc_md.hpte_invalidate = native_hpte_invalidate;
459 ppc_md.hpte_updatepp = native_hpte_updatepp;
460 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
461 ppc_md.hpte_insert = native_hpte_insert;
R Sharadaf4c82d52005-06-25 14:58:08 -0700462 ppc_md.hpte_remove = native_hpte_remove;
463 ppc_md.hpte_clear_all = native_hpte_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 if (tlb_batching_enabled())
465 ppc_md.flush_hash_range = native_flush_hash_range;
466 htab_finish_init();
467}