blob: 846b63d6f5e8b39aaa971d94ce9ec51b5dca709b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/cache-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundta2527102006-09-27 11:29:55 +09005 * Copyright (C) 2001, 2002, 2003, 2004, 2005 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Copyright (C) 2003 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <linux/threads.h>
17#include <asm/addrspace.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/processor.h>
21#include <asm/cache.h>
22#include <asm/io.h>
23#include <asm/uaccess.h>
24#include <asm/pgalloc.h>
25#include <asm/mmu_context.h>
26#include <asm/cacheflush.h>
27
Paul Mundta2527102006-09-27 11:29:55 +090028extern void __flush_cache_4096(unsigned long addr, unsigned long phys,
29 unsigned long exec_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030extern void __flush_cache_4096_all(unsigned long start);
31static void __flush_cache_4096_all_ex(unsigned long start);
32extern void __flush_dcache_all(void);
33static void __flush_dcache_all_ex(void);
34
35/*
36 * SH-4 has virtually indexed and physically tagged cache.
37 */
38
39struct semaphore p3map_sem[4];
40
41void __init p3_cache_init(void)
42{
43 if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
44 panic("%s failed.", __FUNCTION__);
45
46 sema_init (&p3map_sem[0], 1);
47 sema_init (&p3map_sem[1], 1);
48 sema_init (&p3map_sem[2], 1);
49 sema_init (&p3map_sem[3], 1);
50}
51
52/*
53 * Write back the dirty D-caches, but not invalidate them.
54 *
55 * START: Virtual Address (U0, P1, or P3)
56 * SIZE: Size of the region.
57 */
58void __flush_wback_region(void *start, int size)
59{
60 unsigned long v;
61 unsigned long begin, end;
62
63 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
64 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
65 & ~(L1_CACHE_BYTES-1);
66 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
67 asm volatile("ocbwb %0"
68 : /* no output */
69 : "m" (__m(v)));
70 }
71}
72
73/*
74 * Write back the dirty D-caches and invalidate them.
75 *
76 * START: Virtual Address (U0, P1, or P3)
77 * SIZE: Size of the region.
78 */
79void __flush_purge_region(void *start, int size)
80{
81 unsigned long v;
82 unsigned long begin, end;
83
84 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
85 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
86 & ~(L1_CACHE_BYTES-1);
87 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
88 asm volatile("ocbp %0"
89 : /* no output */
90 : "m" (__m(v)));
91 }
92}
93
94
95/*
96 * No write back please
97 */
98void __flush_invalidate_region(void *start, int size)
99{
100 unsigned long v;
101 unsigned long begin, end;
102
103 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
104 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
105 & ~(L1_CACHE_BYTES-1);
106 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
107 asm volatile("ocbi %0"
108 : /* no output */
109 : "m" (__m(v)));
110 }
111}
112
113static void __flush_dcache_all_ex(void)
114{
115 unsigned long addr, end_addr, entry_offset;
116
Paul Mundta2527102006-09-27 11:29:55 +0900117 end_addr = CACHE_OC_ADDRESS_ARRAY +
118 (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) *
119 cpu_data->dcache.ways;
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 entry_offset = 1 << cpu_data->dcache.entry_shift;
Paul Mundta2527102006-09-27 11:29:55 +0900122 for (addr = CACHE_OC_ADDRESS_ARRAY;
123 addr < end_addr;
124 addr += entry_offset) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 ctrl_outl(0, addr);
126 }
127}
128
129static void __flush_cache_4096_all_ex(unsigned long start)
130{
131 unsigned long addr, entry_offset;
132 int i;
133
134 entry_offset = 1 << cpu_data->dcache.entry_shift;
Paul Mundta2527102006-09-27 11:29:55 +0900135 for (i = 0; i < cpu_data->dcache.ways;
136 i++, start += cpu_data->dcache.way_incr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 for (addr = CACHE_OC_ADDRESS_ARRAY + start;
138 addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start;
139 addr += entry_offset) {
140 ctrl_outl(0, addr);
141 }
142 }
143}
144
145void flush_cache_4096_all(unsigned long start)
146{
147 if (cpu_data->dcache.ways == 1)
148 __flush_cache_4096_all(start);
149 else
150 __flush_cache_4096_all_ex(start);
151}
152
153/*
154 * Write back the range of D-cache, and purge the I-cache.
155 *
156 * Called from kernel/module.c:sys_init_module and routine for a.out format.
157 */
158void flush_icache_range(unsigned long start, unsigned long end)
159{
160 flush_cache_all();
161}
162
163/*
Paul Mundta2527102006-09-27 11:29:55 +0900164 * Write back the D-cache and purge the I-cache for signal trampoline.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 * .. which happens to be the same behavior as flush_icache_range().
166 * So, we simply flush out a line.
167 */
168void flush_cache_sigtramp(unsigned long addr)
169{
170 unsigned long v, index;
Paul Mundta2527102006-09-27 11:29:55 +0900171 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 int i;
173
174 v = addr & ~(L1_CACHE_BYTES-1);
175 asm volatile("ocbwb %0"
176 : /* no output */
177 : "m" (__m(v)));
178
179 index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
180
181 local_irq_save(flags);
182 jump_to_P2();
Paul Mundta2527102006-09-27 11:29:55 +0900183 for (i = 0; i < cpu_data->icache.ways;
184 i++, index += cpu_data->icache.way_incr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 ctrl_outl(0, index); /* Clear out Valid-bit */
186 back_to_P1();
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900187 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 local_irq_restore(flags);
189}
190
191static inline void flush_cache_4096(unsigned long start,
192 unsigned long phys)
193{
Paul Mundta2527102006-09-27 11:29:55 +0900194 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196 /*
197 * SH7751, SH7751R, and ST40 have no restriction to handle cache.
198 * (While SH7750 must do that at P2 area.)
199 */
200 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
201 || start < CACHE_OC_ADDRESS_ARRAY) {
202 local_irq_save(flags);
Paul Mundta2527102006-09-27 11:29:55 +0900203 __flush_cache_4096(start | SH_CACHE_ASSOC,
204 P1SEGADDR(phys), 0x20000000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 local_irq_restore(flags);
206 } else {
Paul Mundta2527102006-09-27 11:29:55 +0900207 __flush_cache_4096(start | SH_CACHE_ASSOC,
208 P1SEGADDR(phys), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210}
211
212/*
213 * Write back & invalidate the D-cache of the page.
214 * (To avoid "alias" issues)
215 */
216void flush_dcache_page(struct page *page)
217{
218 if (test_bit(PG_mapped, &page->flags)) {
219 unsigned long phys = PHYSADDR(page_address(page));
220
221 /* Loop all the D-cache */
222 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);
223 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
224 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
225 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
226 }
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900227
228 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229}
230
231static inline void flush_icache_all(void)
232{
233 unsigned long flags, ccr;
234
235 local_irq_save(flags);
236 jump_to_P2();
237
238 /* Flush I-cache */
239 ccr = ctrl_inl(CCR);
240 ccr |= CCR_CACHE_ICI;
241 ctrl_outl(ccr, CCR);
242
243 back_to_P1();
244 local_irq_restore(flags);
245}
246
Paul Mundta2527102006-09-27 11:29:55 +0900247void flush_dcache_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
249 if (cpu_data->dcache.ways == 1)
250 __flush_dcache_all();
251 else
252 __flush_dcache_all_ex();
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900253 wmb();
Paul Mundta2527102006-09-27 11:29:55 +0900254}
255
256void flush_cache_all(void)
257{
258 flush_dcache_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 flush_icache_all();
260}
261
262void flush_cache_mm(struct mm_struct *mm)
263{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 flush_cache_all();
265}
266
267/*
268 * Write back and invalidate I/D-caches for the page.
269 *
270 * ADDR: Virtual Address (U0 address)
271 * PFN: Physical page number
272 */
273void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
274{
275 unsigned long phys = pfn << PAGE_SHIFT;
276
277 /* We only need to flush D-cache when we have alias */
278 if ((address^phys) & CACHE_ALIAS) {
279 /* Loop 4K of the D-cache */
280 flush_cache_4096(
281 CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
282 phys);
283 /* Loop another 4K of the D-cache */
284 flush_cache_4096(
285 CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
286 phys);
287 }
288
289 if (vma->vm_flags & VM_EXEC)
290 /* Loop 4K (half) of the I-cache */
291 flush_cache_4096(
292 CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
293 phys);
294}
295
296/*
297 * Write back and invalidate D-caches.
298 *
299 * START, END: Virtual Address (U0 address)
300 *
301 * NOTE: We need to flush the _physical_ page entry.
302 * Flushing the cache lines for U0 only isn't enough.
303 * We need to flush for P1 too, which may contain aliases.
304 */
305void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
306 unsigned long end)
307{
308 unsigned long p = start & PAGE_MASK;
309 pgd_t *dir;
310 pmd_t *pmd;
Paul Mundta2527102006-09-27 11:29:55 +0900311 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 pte_t *pte;
313 pte_t entry;
314 unsigned long phys;
315 unsigned long d = 0;
316
Paul Mundta2527102006-09-27 11:29:55 +0900317 /*
318 * Don't bother with the lookup and alias check if we have a
319 * wide range to cover, just blow away the dcache in its
320 * entirety instead. -- PFM.
321 */
322 if (((end - start) >> PAGE_SHIFT) >= 64) {
323 flush_dcache_all();
324
325 if (vma->vm_flags & VM_EXEC)
326 flush_icache_all();
327
328 return;
329 }
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 dir = pgd_offset(vma->vm_mm, p);
Paul Mundta2527102006-09-27 11:29:55 +0900332 pud = pud_offset(dir, p);
333 pmd = pmd_offset(pud, p);
334 end = PAGE_ALIGN(end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 do {
337 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
338 p &= ~((1 << PMD_SHIFT) -1);
339 p += (1 << PMD_SHIFT);
340 pmd++;
341 continue;
342 }
343 pte = pte_offset_kernel(pmd, p);
344 do {
345 entry = *pte;
346 if ((pte_val(entry) & _PAGE_PRESENT)) {
347 phys = pte_val(entry)&PTE_PHYS_MASK;
348 if ((p^phys) & CACHE_ALIAS) {
Paul Mundta2527102006-09-27 11:29:55 +0900349 d |= 1 << ((p & CACHE_ALIAS)>>12);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 d |= 1 << ((phys & CACHE_ALIAS)>>12);
351 if (d == 0x0f)
352 goto loop_exit;
353 }
354 }
355 pte++;
356 p += PAGE_SIZE;
357 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
358 pmd++;
359 } while (p < end);
360 loop_exit:
361 if (d & 1)
362 flush_cache_4096_all(0);
363 if (d & 2)
364 flush_cache_4096_all(0x1000);
365 if (d & 4)
366 flush_cache_4096_all(0x2000);
367 if (d & 8)
368 flush_cache_4096_all(0x3000);
369 if (vma->vm_flags & VM_EXEC)
370 flush_icache_all();
371}
372
373/*
374 * flush_icache_user_range
375 * @vma: VMA of the process
376 * @page: page
377 * @addr: U0 address
378 * @len: length of the range (< page size)
379 */
380void flush_icache_user_range(struct vm_area_struct *vma,
381 struct page *page, unsigned long addr, int len)
382{
383 flush_cache_page(vma, addr, page_to_pfn(page));
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900384 mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
386