blob: 3e18241b6f35218b65619762f63fc1867052aa83 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/ppc64/mm/slb_low.S
3 *
4 * Low-level SLB routines
5 *
6 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
7 *
8 * Based on earlier C version:
9 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
10 * Copyright (c) 2001 Dave Engebretsen
11 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/config.h>
20#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/ppc_asm.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020022#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cputable.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110024#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110028/* void slb_allocate_realmode(unsigned long ea);
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 *
30 * Create an SLB entry for the given EA (user or kernel).
31 * r3 = faulting address, r13 = PACA
32 * r9, r10, r11 are clobbered by this function
33 * No other registers are examined or changed.
34 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110035_GLOBAL(slb_allocate_realmode)
36 /* r3 = faulting address */
37
38 srdi r9,r3,60 /* get region */
39 srdi r10,r3,28 /* get esid */
40 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
41
42 /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
43 blt cr7,0f /* user or kernel? */
44
45 /* kernel address: proto-VSID = ESID */
46 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
47 * this code will generate the protoVSID 0xfffffffff for the
48 * top segment. That's ok, the scramble below will translate
49 * it to VSID 0, which is reserved as a bad VSID - one which
50 * will never have any pages in it. */
51
52 /* Check if hitting the linear mapping of the vmalloc/ioremap
53 * kernel space
54 */
55 bne cr7,1f
56
57 /* Linear mapping encoding bits, the "li" instruction below will
58 * be patched by the kernel at boot
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110060_GLOBAL(slb_miss_kernel_load_linear)
61 li r11,0
62 b slb_finish_load
63
641: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
65 * will be patched by the kernel at boot
66 */
67_GLOBAL(slb_miss_kernel_load_virtual)
68 li r11,0
69 b slb_finish_load
70
71
720: /* user address: proto-VSID = context << 15 | ESID. First check
73 * if the address is within the boundaries of the user region
74 */
75 srdi. r9,r10,USER_ESID_BITS
76 bne- 8f /* invalid ea bits set */
77
78 /* Figure out if the segment contains huge pages */
79#ifdef CONFIG_HUGETLB_PAGE
80BEGIN_FTR_SECTION
81 b 1f
82END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
83 lhz r9,PACAHIGHHTLBAREAS(r13)
84 srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
85 srd r9,r9,r11
86 lhz r11,PACALOWHTLBAREAS(r13)
87 srd r11,r11,r10
88 or. r9,r9,r11
89 beq 1f
90_GLOBAL(slb_miss_user_load_huge)
91 li r11,0
92 b 2f
931:
94#endif /* CONFIG_HUGETLB_PAGE */
95
96_GLOBAL(slb_miss_user_load_normal)
97 li r11,0
98
992:
100 ld r9,PACACONTEXTID(r13)
101 rldimi r10,r9,USER_ESID_BITS,0
102 b slb_finish_load
103
1048: /* invalid EA */
105 li r10,0 /* BAD_VSID */
106 li r11,SLB_VSID_USER /* flags don't much matter */
107 b slb_finish_load
108
109#ifdef __DISABLED__
110
111/* void slb_allocate_user(unsigned long ea);
112 *
113 * Create an SLB entry for the given EA (user or kernel).
114 * r3 = faulting address, r13 = PACA
115 * r9, r10, r11 are clobbered by this function
116 * No other registers are examined or changed.
117 *
118 * It is called with translation enabled in order to be able to walk the
119 * page tables. This is not currently used.
120 */
121_GLOBAL(slb_allocate_user)
122 /* r3 = faulting address */
123 srdi r10,r3,28 /* get esid */
124
125 crset 4*cr7+lt /* set "user" flag for later */
126
127 /* check if we fit in the range covered by the pagetables*/
128 srdi. r9,r3,PGTABLE_EADDR_SIZE
129 crnot 4*cr0+eq,4*cr0+eq
130 beqlr
131
132 /* now we need to get to the page tables in order to get the page
133 * size encoding from the PMD. In the future, we'll be able to deal
134 * with 1T segments too by getting the encoding from the PGD instead
135 */
136 ld r9,PACAPGDIR(r13)
137 cmpldi cr0,r9,0
138 beqlr
139 rlwinm r11,r10,8,25,28
140 ldx r9,r9,r11 /* get pgd_t */
141 cmpldi cr0,r9,0
142 beqlr
143 rlwinm r11,r10,3,17,28
144 ldx r9,r9,r11 /* get pmd_t */
145 cmpldi cr0,r9,0
146 beqlr
147
148 /* build vsid flags */
149 andi. r11,r9,SLB_VSID_LLP
150 ori r11,r11,SLB_VSID_USER
151
152 /* get context to calculate proto-VSID */
153 ld r9,PACACONTEXTID(r13)
154 rldimi r10,r9,USER_ESID_BITS,0
155
156 /* fall through slb_finish_load */
157
158#endif /* __DISABLED__ */
159
160
161/*
162 * Finish loading of an SLB entry and return
163 *
164 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
165 */
166slb_finish_load:
167 ASM_VSID_SCRAMBLE(r10,r9)
168 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
169
170 /* r3 = EA, r11 = VSID data */
171 /*
172 * Find a slot, round robin. Previously we tried to find a
173 * free slot first but that took too long. Unfortunately we
174 * dont have any LRU information to help us choose a slot.
175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176#ifdef CONFIG_PPC_ISERIES
177 /*
178 * On iSeries, the "bolted" stack segment can be cast out on
179 * shared processor switch so we need to check for a miss on
180 * it and restore it to the right slot.
181 */
182 ld r9,PACAKSAVE(r13)
183 clrrdi r9,r9,28
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100184 clrrdi r3,r3,28
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100186 cmpld r9,r3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 beq 3f
188#endif /* CONFIG_PPC_ISERIES */
189
190 ld r10,PACASTABRR(r13)
191 addi r10,r10,1
192 /* use a cpu feature mask if we ever change our slb size */
193 cmpldi r10,SLB_NUM_ENTRIES
194
195 blt+ 4f
196 li r10,SLB_NUM_BOLTED
197
1984:
199 std r10,PACASTABRR(r13)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013:
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100202 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
203 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100205 /* r3 = ESID data, r11 = VSID data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /*
208 * No need for an isync before or after this slbmte. The exception
209 * we enter with and the rfid we exit with are context synchronizing.
210 */
211 slbmte r11,r10
212
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100213 /* we're done for kernel addresses */
214 crclr 4*cr0+eq /* set result to "success" */
215 bgelr cr7
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 /* Update the slb cache */
218 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
219 cmpldi r3,SLB_CACHE_ENTRIES
220 bge 1f
221
222 /* still room in the slb cache */
223 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
224 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
225 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
226 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
227 addi r3,r3,1 /* offset++ */
228 b 2f
2291: /* offset >= SLB_CACHE_ENTRIES */
230 li r3,SLB_CACHE_ENTRIES+1
2312:
232 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100233 crclr 4*cr0+eq /* set result to "success" */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 blr
235