blob: 7f3abc32c4dd4cd44abf0e30f487d31d7585fdbd [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001#ifndef _SPARC64_TSB_H
2#define _SPARC64_TSB_H
3
4/* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
8 * faults.
9 *
10 * TLB miss trap handler software does the actual lookup via something
11 * of the form:
12 *
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
16 * cmp %g4, %g6
17 * bne,pn %xcc, tsb_miss_{d,i}tlb
18 * mov FAULT_CODE_{D,I}TLB, %g3
19 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
20 * retry
21 *
David S. Miller98c55842006-01-31 18:31:20 -080022 *
David S. Miller74bf4312006-01-31 18:29:18 -080023 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
24 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
25 * register which is:
26 *
27 * -------------------------------------------------
28 * | - | CONTEXT | - | VADDR bits 63:22 |
29 * -------------------------------------------------
30 * 63 61 60 48 47 42 41 0
31 *
32 * Like the powerpc hashtables we need to use locking in order to
33 * synchronize while we update the entries. PTE updates need locking
34 * as well.
35 *
36 * We need to carefully choose a lock bits for the TSB entry. We
37 * choose to use bit 47 in the tag. Also, since we never map anything
38 * at page zero in context zero, we use zero as an invalid tag entry.
39 * When the lock bit is set, this forces a tag comparison failure.
David S. Miller74bf4312006-01-31 18:29:18 -080040 */
41
David S. Miller4753eb22006-01-31 18:32:44 -080042#define TSB_TAG_LOCK_BIT 47
43#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
David S. Miller74bf4312006-01-31 18:29:18 -080044
45#define TSB_MEMBAR membar #StoreStore
46
David S. Miller517af332006-02-01 15:55:21 -080047/* Some cpus support physical address quad loads. We want to use
48 * those if possible so we don't need to hard-lock the TSB mapping
49 * into the TLB. We encode some instruction patching in order to
50 * support this.
51 *
52 * The kernel TSB is locked into the TLB by virtue of being in the
53 * kernel image, so we don't play these games for swapper_tsb access.
54 */
55#ifndef __ASSEMBLY__
David S. Millerd257d5d2006-02-06 23:44:37 -080056struct tsb_ldquad_phys_patch_entry {
57 unsigned int addr;
58 unsigned int sun4u_insn;
59 unsigned int sun4v_insn;
60};
61extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
62 __tsb_ldquad_phys_patch_end;
63
David S. Miller517af332006-02-01 15:55:21 -080064struct tsb_phys_patch_entry {
65 unsigned int addr;
66 unsigned int insn;
67};
68extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
69#endif
70#define TSB_LOAD_QUAD(TSB, REG) \
71661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
David S. Millerd257d5d2006-02-06 23:44:37 -080072 .section .tsb_ldquad_phys_patch, "ax"; \
David S. Miller517af332006-02-01 15:55:21 -080073 .word 661b; \
74 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
David S. Millerd257d5d2006-02-06 23:44:37 -080075 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
David S. Miller517af332006-02-01 15:55:21 -080076 .previous
77
78#define TSB_LOAD_TAG_HIGH(TSB, REG) \
79661: lduwa [TSB] ASI_N, REG; \
80 .section .tsb_phys_patch, "ax"; \
81 .word 661b; \
82 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
83 .previous
84
85#define TSB_LOAD_TAG(TSB, REG) \
86661: ldxa [TSB] ASI_N, REG; \
87 .section .tsb_phys_patch, "ax"; \
88 .word 661b; \
89 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
90 .previous
91
92#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
93661: casa [TSB] ASI_N, REG1, REG2; \
94 .section .tsb_phys_patch, "ax"; \
95 .word 661b; \
96 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
97 .previous
98
99#define TSB_CAS_TAG(TSB, REG1, REG2) \
100661: casxa [TSB] ASI_N, REG1, REG2; \
101 .section .tsb_phys_patch, "ax"; \
102 .word 661b; \
103 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
104 .previous
105
106#define TSB_STORE(ADDR, VAL) \
107661: stxa VAL, [ADDR] ASI_N; \
108 .section .tsb_phys_patch, "ax"; \
109 .word 661b; \
110 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
111 .previous
112
David S. Miller74bf4312006-01-31 18:29:18 -0800113#define TSB_LOCK_TAG(TSB, REG1, REG2) \
David S. Miller517af332006-02-01 15:55:21 -080011499: TSB_LOAD_TAG_HIGH(TSB, REG1); \
115 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
116 andcc REG1, REG2, %g0; \
117 bne,pn %icc, 99b; \
118 nop; \
119 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
120 cmp REG1, REG2; \
121 bne,pn %icc, 99b; \
122 nop; \
123 TSB_MEMBAR
124
125#define TSB_WRITE(TSB, TTE, TAG) \
126 add TSB, 0x8, TSB; \
127 TSB_STORE(TSB, TTE); \
128 sub TSB, 0x8, TSB; \
129 TSB_MEMBAR; \
130 TSB_STORE(TSB, TAG);
131
132#define KTSB_LOAD_QUAD(TSB, REG) \
133 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
134
135#define KTSB_STORE(ADDR, VAL) \
136 stxa VAL, [ADDR] ASI_N;
137
138#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
David S. Miller74bf4312006-01-31 18:29:18 -080013999: lduwa [TSB] ASI_N, REG1; \
David S. Miller4753eb22006-01-31 18:32:44 -0800140 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
David S. Miller74bf4312006-01-31 18:29:18 -0800141 andcc REG1, REG2, %g0; \
142 bne,pn %icc, 99b; \
143 nop; \
144 casa [TSB] ASI_N, REG1, REG2;\
145 cmp REG1, REG2; \
146 bne,pn %icc, 99b; \
147 nop; \
148 TSB_MEMBAR
149
David S. Miller517af332006-02-01 15:55:21 -0800150#define KTSB_WRITE(TSB, TTE, TAG) \
151 add TSB, 0x8, TSB; \
152 stxa TTE, [TSB] ASI_N; \
153 sub TSB, 0x8, TSB; \
154 TSB_MEMBAR; \
155 stxa TAG, [TSB] ASI_N;
David S. Miller74bf4312006-01-31 18:29:18 -0800156
157 /* Do a kernel page table walk. Leaves physical PTE pointer in
158 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
159 * VADDR will not be clobbered, but REG2 will.
160 */
161#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
162 sethi %hi(swapper_pg_dir), REG1; \
163 or REG1, %lo(swapper_pg_dir), REG1; \
164 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
165 srlx REG2, 64 - PAGE_SHIFT, REG2; \
166 andn REG2, 0x3, REG2; \
167 lduw [REG1 + REG2], REG1; \
168 brz,pn REG1, FAIL_LABEL; \
169 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
170 srlx REG2, 64 - PAGE_SHIFT, REG2; \
171 sllx REG1, 11, REG1; \
172 andn REG2, 0x3, REG2; \
173 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
174 brz,pn REG1, FAIL_LABEL; \
175 sllx VADDR, 64 - PMD_SHIFT, REG2; \
176 srlx REG2, 64 - PAGE_SHIFT, REG2; \
177 sllx REG1, 11, REG1; \
178 andn REG2, 0x7, REG2; \
179 add REG1, REG2, REG1;
180
181 /* Do a user page table walk in MMU globals. Leaves physical PTE
182 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
183 * termination. Physical base of page tables is in PHYS_PGD which
184 * will not be modified.
185 *
186 * VADDR will not be clobbered, but REG1 and REG2 will.
187 */
188#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
189 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
190 srlx REG2, 64 - PAGE_SHIFT, REG2; \
191 andn REG2, 0x3, REG2; \
192 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
193 brz,pn REG1, FAIL_LABEL; \
194 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
195 srlx REG2, 64 - PAGE_SHIFT, REG2; \
196 sllx REG1, 11, REG1; \
197 andn REG2, 0x3, REG2; \
198 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
199 brz,pn REG1, FAIL_LABEL; \
200 sllx VADDR, 64 - PMD_SHIFT, REG2; \
201 srlx REG2, 64 - PAGE_SHIFT, REG2; \
202 sllx REG1, 11, REG1; \
203 andn REG2, 0x7, REG2; \
204 add REG1, REG2, REG1;
205
206/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
207 * If no entry is found, FAIL_LABEL will be branched to. On success
208 * the resulting PTE value will be left in REG1. VADDR is preserved
209 * by this routine.
210 */
211#define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
212 sethi %hi(prom_trans), REG1; \
213 or REG1, %lo(prom_trans), REG1; \
21497: ldx [REG1 + 0x00], REG2; \
215 brz,pn REG2, FAIL_LABEL; \
216 nop; \
217 ldx [REG1 + 0x08], REG3; \
218 add REG2, REG3, REG3; \
219 cmp REG2, VADDR; \
220 bgu,pt %xcc, 98f; \
221 cmp VADDR, REG3; \
222 bgeu,pt %xcc, 98f; \
223 ldx [REG1 + 0x10], REG3; \
224 sub VADDR, REG2, REG2; \
225 ba,pt %xcc, 99f; \
226 add REG3, REG2, REG1; \
22798: ba,pt %xcc, 97b; \
228 add REG1, (3 * 8), REG1; \
22999:
230
David S. Miller2f7ee7c2006-01-31 18:33:49 -0800231 /* We use a 32K TSB for the whole kernel, this allows to
232 * handle about 16MB of modules and vmalloc mappings without
233 * incurring many hash conflicts.
234 */
235#define KERNEL_TSB_SIZE_BYTES (32 * 1024)
236#define KERNEL_TSB_NENTRIES \
237 (KERNEL_TSB_SIZE_BYTES / 16)
238
David S. Miller74bf4312006-01-31 18:29:18 -0800239 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
240 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
241 * and the found TTE will be left in REG1. REG3 and REG4 must
242 * be an even/odd pair of registers.
243 *
244 * VADDR and TAG will be preserved and not clobbered by this macro.
245 */
David S. Miller74bf4312006-01-31 18:29:18 -0800246#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
247 sethi %hi(swapper_tsb), REG1; \
248 or REG1, %lo(swapper_tsb), REG1; \
David S. Miller2f7ee7c2006-01-31 18:33:49 -0800249 srlx VADDR, PAGE_SHIFT, REG2; \
250 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
David S. Miller74bf4312006-01-31 18:29:18 -0800251 sllx REG2, 4, REG2; \
252 add REG1, REG2, REG2; \
David S. Miller517af332006-02-01 15:55:21 -0800253 KTSB_LOAD_QUAD(REG2, REG3); \
David S. Miller74bf4312006-01-31 18:29:18 -0800254 cmp REG3, TAG; \
255 be,a,pt %xcc, OK_LABEL; \
256 mov REG4, REG1;
257
258#endif /* !(_SPARC64_TSB_H) */