blob: dfc1d0379479f7a01e9e411d4386bbb1a5846682 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/cache-sh4.c
3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
Paul Mundtd10040f2007-09-24 16:38:25 +09005 * Copyright (C) 2001 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Copyright (C) 2003 Richard Curnow
Chris Smith09b5a102008-07-02 15:17:11 +09007 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mm.h>
Paul Mundt52e27782006-11-21 11:09:41 +090015#include <linux/io.h>
16#include <linux/mutex.h>
Paul Mundt2277ab42009-07-22 19:20:49 +090017#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/mmu_context.h>
19#include <asm/cacheflush.h>
20
Paul Mundt28ccf7f2006-09-27 18:30:07 +090021/*
22 * The maximum number of pages we support up to when doing ranged dcache
23 * flushing. Anything exceeding this will simply flush the dcache in its
24 * entirety.
25 */
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
Chris Smith09b5a102008-07-02 15:17:11 +090027#define MAX_ICACHE_PAGES 32
Paul Mundt28ccf7f2006-09-27 18:30:07 +090028
Richard Curnowb638d0b2006-09-27 14:09:26 +090029static void __flush_dcache_segment_1way(unsigned long start,
30 unsigned long extent);
31static void __flush_dcache_segment_2way(unsigned long start,
32 unsigned long extent);
33static void __flush_dcache_segment_4way(unsigned long start,
34 unsigned long extent);
35
36static void __flush_cache_4096(unsigned long addr, unsigned long phys,
Paul Mundta2527102006-09-27 11:29:55 +090037 unsigned long exec_offset);
Richard Curnowb638d0b2006-09-27 14:09:26 +090038
39/*
40 * This is initialised here to ensure that it is not placed in the BSS. If
41 * that were to happen, note that cache_init gets called before the BSS is
42 * cleared, so this would get nulled out which would be hopeless.
43 */
44static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
45 (void (*)(unsigned long, unsigned long))0xdeadbeef;
46
47static void compute_alias(struct cache_info *c)
48{
49 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
Paul Mundtd10040f2007-09-24 16:38:25 +090050 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
Richard Curnowb638d0b2006-09-27 14:09:26 +090051}
52
53static void __init emit_cache_params(void)
54{
55 printk("PVR=%08x CVR=%08x PRR=%08x\n",
56 ctrl_inl(CCN_PVR),
57 ctrl_inl(CCN_CVR),
58 ctrl_inl(CCN_PRR));
59 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
Paul Mundt7ec9d6f2007-09-21 18:05:20 +090060 boot_cpu_data.icache.ways,
61 boot_cpu_data.icache.sets,
62 boot_cpu_data.icache.way_incr);
Richard Curnowb638d0b2006-09-27 14:09:26 +090063 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
Paul Mundt7ec9d6f2007-09-21 18:05:20 +090064 boot_cpu_data.icache.entry_mask,
65 boot_cpu_data.icache.alias_mask,
66 boot_cpu_data.icache.n_aliases);
Richard Curnowb638d0b2006-09-27 14:09:26 +090067 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
Paul Mundt7ec9d6f2007-09-21 18:05:20 +090068 boot_cpu_data.dcache.ways,
69 boot_cpu_data.dcache.sets,
70 boot_cpu_data.dcache.way_incr);
Richard Curnowb638d0b2006-09-27 14:09:26 +090071 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
Paul Mundt7ec9d6f2007-09-21 18:05:20 +090072 boot_cpu_data.dcache.entry_mask,
73 boot_cpu_data.dcache.alias_mask,
74 boot_cpu_data.dcache.n_aliases);
Richard Curnowb638d0b2006-09-27 14:09:26 +090075
Paul Mundtab27f622007-09-24 17:00:45 +090076 /*
77 * Emit Secondary Cache parameters if the CPU has a probed L2.
78 */
79 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
80 printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
81 boot_cpu_data.scache.ways,
82 boot_cpu_data.scache.sets,
83 boot_cpu_data.scache.way_incr);
84 printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
85 boot_cpu_data.scache.entry_mask,
86 boot_cpu_data.scache.alias_mask,
87 boot_cpu_data.scache.n_aliases);
88 }
89
Richard Curnowb638d0b2006-09-27 14:09:26 +090090 if (!__flush_dcache_segment_fn)
91 panic("unknown number of cache ways\n");
92}
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/*
95 * SH-4 has virtually indexed and physically tagged cache.
96 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070097void __init p3_cache_init(void)
98{
Paul Mundt7ec9d6f2007-09-21 18:05:20 +090099 compute_alias(&boot_cpu_data.icache);
100 compute_alias(&boot_cpu_data.dcache);
Paul Mundtab27f622007-09-24 17:00:45 +0900101 compute_alias(&boot_cpu_data.scache);
Richard Curnowb638d0b2006-09-27 14:09:26 +0900102
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900103 switch (boot_cpu_data.dcache.ways) {
Richard Curnowb638d0b2006-09-27 14:09:26 +0900104 case 1:
105 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
106 break;
107 case 2:
108 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
109 break;
110 case 4:
111 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
112 break;
113 default:
114 __flush_dcache_segment_fn = NULL;
115 break;
116 }
117
118 emit_cache_params();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
121/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 * Write back the range of D-cache, and purge the I-cache.
123 *
Chris Smith09b5a102008-07-02 15:17:11 +0900124 * Called from kernel/module.c:sys_init_module and routine for a.out format,
125 * signal handler code and kprobes code
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 */
127void flush_icache_range(unsigned long start, unsigned long end)
128{
Chris Smith09b5a102008-07-02 15:17:11 +0900129 int icacheaddr;
130 unsigned long flags, v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 int i;
132
Chris Smith09b5a102008-07-02 15:17:11 +0900133 /* If there are too many pages then just blow the caches */
134 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
135 flush_cache_all();
136 } else {
137 /* selectively flush d-cache then invalidate the i-cache */
138 /* this is inefficient, so only use for small ranges */
139 start &= ~(L1_CACHE_BYTES-1);
140 end += L1_CACHE_BYTES-1;
141 end &= ~(L1_CACHE_BYTES-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Chris Smith09b5a102008-07-02 15:17:11 +0900143 local_irq_save(flags);
144 jump_to_uncached();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Chris Smith09b5a102008-07-02 15:17:11 +0900146 for (v = start; v < end; v+=L1_CACHE_BYTES) {
147 asm volatile("ocbwb %0"
148 : /* no output */
149 : "m" (__m(v)));
Richard Curnowb638d0b2006-09-27 14:09:26 +0900150
Chris Smith09b5a102008-07-02 15:17:11 +0900151 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
152 v & cpu_data->icache.entry_mask);
Richard Curnowb638d0b2006-09-27 14:09:26 +0900153
Chris Smith09b5a102008-07-02 15:17:11 +0900154 for (i = 0; i < cpu_data->icache.ways;
155 i++, icacheaddr += cpu_data->icache.way_incr)
156 /* Clear i-cache line valid-bit */
157 ctrl_outl(0, icacheaddr);
158 }
159
160 back_to_cached();
161 local_irq_restore(flags);
162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
165static inline void flush_cache_4096(unsigned long start,
166 unsigned long phys)
167{
Paul Mundt33573c02006-09-27 18:37:30 +0900168 unsigned long flags, exec_offset = 0;
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 /*
Richard Curnowb638d0b2006-09-27 14:09:26 +0900171 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
172 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 */
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900174 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
Paul Mundt33573c02006-09-27 18:37:30 +0900175 (start < CACHE_OC_ADDRESS_ARRAY))
Paul Mundt510c72ad2006-11-27 12:06:26 +0900176 exec_offset = 0x20000000;
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900177
Paul Mundt33573c02006-09-27 18:37:30 +0900178 local_irq_save(flags);
179 __flush_cache_4096(start | SH_CACHE_ASSOC,
180 P1SEGADDR(phys), exec_offset);
181 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
184/*
185 * Write back & invalidate the D-cache of the page.
186 * (To avoid "alias" issues)
187 */
188void flush_dcache_page(struct page *page)
189{
Paul Mundt2277ab42009-07-22 19:20:49 +0900190 struct address_space *mapping = page_mapping(page);
191
192#ifndef CONFIG_SMP
193 if (mapping && !mapping_mapped(mapping))
194 set_bit(PG_dcache_dirty, &page->flags);
195 else
196#endif
197 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 unsigned long phys = PHYSADDR(page_address(page));
Richard Curnowb638d0b2006-09-27 14:09:26 +0900199 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
200 int i, n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 /* Loop all the D-cache */
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900203 n = boot_cpu_data.dcache.n_aliases;
Paul Mundt510c72ad2006-11-27 12:06:26 +0900204 for (i = 0; i < n; i++, addr += 4096)
Richard Curnowb638d0b2006-09-27 14:09:26 +0900205 flush_cache_4096(addr, phys);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 }
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900207
208 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900211/* TODO: Selective icache invalidation through IC address array.. */
Paul Mundt205a3b42008-09-05 18:00:29 +0900212static void __uses_jump_to_uncached flush_icache_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 unsigned long flags, ccr;
215
216 local_irq_save(flags);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900217 jump_to_uncached();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 /* Flush I-cache */
220 ccr = ctrl_inl(CCR);
221 ccr |= CCR_CACHE_ICI;
222 ctrl_outl(ccr, CCR);
223
Paul Mundt29847622006-09-27 14:57:44 +0900224 /*
Stuart Menefycbaa1182007-11-30 17:06:36 +0900225 * back_to_cached() will take care of the barrier for us, don't add
Paul Mundt29847622006-09-27 14:57:44 +0900226 * another one!
227 */
228
Stuart Menefycbaa1182007-11-30 17:06:36 +0900229 back_to_cached();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 local_irq_restore(flags);
231}
232
Paul Mundta2527102006-09-27 11:29:55 +0900233void flush_dcache_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900235 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900236 wmb();
Paul Mundta2527102006-09-27 11:29:55 +0900237}
238
239void flush_cache_all(void)
240{
241 flush_dcache_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 flush_icache_all();
243}
244
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900245static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
246 unsigned long end)
247{
248 unsigned long d = 0, p = start & PAGE_MASK;
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900249 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
250 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900251 unsigned long select_bit;
252 unsigned long all_aliases_mask;
253 unsigned long addr_offset;
254 pgd_t *dir;
255 pmd_t *pmd;
256 pud_t *pud;
257 pte_t *pte;
258 int i;
259
260 dir = pgd_offset(mm, p);
261 pud = pud_offset(dir, p);
262 pmd = pmd_offset(pud, p);
263 end = PAGE_ALIGN(end);
264
265 all_aliases_mask = (1 << n_aliases) - 1;
266
267 do {
268 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
269 p &= PMD_MASK;
270 p += PMD_SIZE;
271 pmd++;
272
273 continue;
274 }
275
276 pte = pte_offset_kernel(pmd, p);
277
278 do {
279 unsigned long phys;
280 pte_t entry = *pte;
281
282 if (!(pte_val(entry) & _PAGE_PRESENT)) {
283 pte++;
284 p += PAGE_SIZE;
285 continue;
286 }
287
288 phys = pte_val(entry) & PTE_PHYS_MASK;
289
290 if ((p ^ phys) & alias_mask) {
291 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
292 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
293
294 if (d == all_aliases_mask)
295 goto loop_exit;
296 }
297
298 pte++;
299 p += PAGE_SIZE;
300 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
301 pmd++;
302 } while (p < end);
303
304loop_exit:
305 addr_offset = 0;
306 select_bit = 1;
307
308 for (i = 0; i < n_aliases; i++) {
309 if (d & select_bit) {
310 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
311 wmb();
312 }
313
314 select_bit <<= 1;
315 addr_offset += PAGE_SIZE;
316 }
317}
318
319/*
320 * Note : (RPC) since the caches are physically tagged, the only point
321 * of flush_cache_mm for SH-4 is to get rid of aliases from the
322 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
323 * lines can stay resident so long as the virtual address they were
324 * accessed with (hence cache set) is in accord with the physical
325 * address (i.e. tag). It's no different here. So I reckon we don't
326 * need to flush the I-cache, since aliases don't matter for that. We
327 * should try that.
328 *
329 * Caller takes mm->mmap_sem.
330 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331void flush_cache_mm(struct mm_struct *mm)
332{
Richard Curnowb638d0b2006-09-27 14:09:26 +0900333 /*
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900334 * If cache is only 4k-per-way, there are never any 'aliases'. Since
335 * the cache is physically tagged, the data can just be left in there.
Richard Curnowb638d0b2006-09-27 14:09:26 +0900336 */
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900337 if (boot_cpu_data.dcache.n_aliases == 0)
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900338 return;
339
340 /*
341 * Don't bother groveling around the dcache for the VMA ranges
342 * if there are too many PTEs to make it worthwhile.
343 */
344 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
345 flush_dcache_all();
346 else {
347 struct vm_area_struct *vma;
348
349 /*
350 * In this case there are reasonably sized ranges to flush,
351 * iterate through the VMA list and take care of any aliases.
352 */
353 for (vma = mm->mmap; vma; vma = vma->vm_next)
354 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
355 }
356
357 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
358 if (mm->exec_vm)
359 flush_icache_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360}
361
362/*
363 * Write back and invalidate I/D-caches for the page.
364 *
365 * ADDR: Virtual Address (U0 address)
366 * PFN: Physical page number
367 */
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900368void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
369 unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
371 unsigned long phys = pfn << PAGE_SHIFT;
Richard Curnowb638d0b2006-09-27 14:09:26 +0900372 unsigned int alias_mask;
373
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900374 alias_mask = boot_cpu_data.dcache.alias_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376 /* We only need to flush D-cache when we have alias */
Richard Curnowb638d0b2006-09-27 14:09:26 +0900377 if ((address^phys) & alias_mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /* Loop 4K of the D-cache */
379 flush_cache_4096(
Richard Curnowb638d0b2006-09-27 14:09:26 +0900380 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 phys);
382 /* Loop another 4K of the D-cache */
383 flush_cache_4096(
Richard Curnowb638d0b2006-09-27 14:09:26 +0900384 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 phys);
386 }
387
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900388 alias_mask = boot_cpu_data.icache.alias_mask;
Richard Curnowb638d0b2006-09-27 14:09:26 +0900389 if (vma->vm_flags & VM_EXEC) {
390 /*
391 * Evict entries from the portion of the cache from which code
392 * may have been executed at this address (virtual). There's
393 * no need to evict from the portion corresponding to the
394 * physical address as for the D-cache, because we know the
395 * kernel has never executed the code through its identity
396 * translation.
397 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 flush_cache_4096(
Richard Curnowb638d0b2006-09-27 14:09:26 +0900399 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 phys);
Richard Curnowb638d0b2006-09-27 14:09:26 +0900401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
404/*
405 * Write back and invalidate D-caches.
406 *
407 * START, END: Virtual Address (U0 address)
408 *
409 * NOTE: We need to flush the _physical_ page entry.
410 * Flushing the cache lines for U0 only isn't enough.
411 * We need to flush for P1 too, which may contain aliases.
412 */
413void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
414 unsigned long end)
415{
Richard Curnowb638d0b2006-09-27 14:09:26 +0900416 /*
417 * If cache is only 4k-per-way, there are never any 'aliases'. Since
418 * the cache is physically tagged, the data can just be left in there.
419 */
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900420 if (boot_cpu_data.dcache.n_aliases == 0)
Richard Curnowb638d0b2006-09-27 14:09:26 +0900421 return;
422
Paul Mundta2527102006-09-27 11:29:55 +0900423 /*
424 * Don't bother with the lookup and alias check if we have a
425 * wide range to cover, just blow away the dcache in its
426 * entirety instead. -- PFM.
427 */
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900428 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
Paul Mundta2527102006-09-27 11:29:55 +0900429 flush_dcache_all();
Paul Mundt28ccf7f2006-09-27 18:30:07 +0900430 else
431 __flush_cache_mm(vma->vm_mm, start, end);
Richard Curnowb638d0b2006-09-27 14:09:26 +0900432
433 if (vma->vm_flags & VM_EXEC) {
434 /*
435 * TODO: Is this required??? Need to look at how I-cache
436 * coherency is assured when new programs are loaded to see if
437 * this matters.
438 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 flush_icache_all();
Richard Curnowb638d0b2006-09-27 14:09:26 +0900440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
443/*
444 * flush_icache_user_range
445 * @vma: VMA of the process
446 * @page: page
447 * @addr: U0 address
448 * @len: length of the range (< page size)
449 */
450void flush_icache_user_range(struct vm_area_struct *vma,
451 struct page *page, unsigned long addr, int len)
452{
453 flush_cache_page(vma, addr, page_to_pfn(page));
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900454 mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455}
456
Richard Curnowb638d0b2006-09-27 14:09:26 +0900457/**
458 * __flush_cache_4096
459 *
460 * @addr: address in memory mapped cache array
461 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
462 * set i.e. associative write)
463 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
464 * region else 0x0
465 *
466 * The offset into the cache array implied by 'addr' selects the
467 * 'colour' of the virtual address range that will be flushed. The
468 * operation (purge/write-back) is selected by the lower 2 bits of
469 * 'phys'.
470 */
471static void __flush_cache_4096(unsigned long addr, unsigned long phys,
472 unsigned long exec_offset)
473{
474 int way_count;
475 unsigned long base_addr = addr;
476 struct cache_info *dcache;
477 unsigned long way_incr;
478 unsigned long a, ea, p;
479 unsigned long temp_pc;
480
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900481 dcache = &boot_cpu_data.dcache;
Richard Curnowb638d0b2006-09-27 14:09:26 +0900482 /* Write this way for better assembly. */
483 way_count = dcache->ways;
484 way_incr = dcache->way_incr;
485
486 /*
487 * Apply exec_offset (i.e. branch to P2 if required.).
488 *
489 * FIXME:
490 *
491 * If I write "=r" for the (temp_pc), it puts this in r6 hence
492 * trashing exec_offset before it's been added on - why? Hence
493 * "=&r" as a 'workaround'
494 */
495 asm volatile("mov.l 1f, %0\n\t"
496 "add %1, %0\n\t"
497 "jmp @%0\n\t"
498 "nop\n\t"
499 ".balign 4\n\t"
500 "1: .long 2f\n\t"
501 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
502
503 /*
504 * We know there will be >=1 iteration, so write as do-while to avoid
505 * pointless nead-of-loop check for 0 iterations.
506 */
507 do {
508 ea = base_addr + PAGE_SIZE;
509 a = base_addr;
510 p = phys;
511
512 do {
513 *(volatile unsigned long *)a = p;
514 /*
515 * Next line: intentionally not p+32, saves an add, p
516 * will do since only the cache tag bits need to
517 * match.
518 */
519 *(volatile unsigned long *)(a+32) = p;
520 a += 64;
521 p += 64;
522 } while (a < ea);
523
524 base_addr += way_incr;
525 } while (--way_count != 0);
526}
527
528/*
529 * Break the 1, 2 and 4 way variants of this out into separate functions to
530 * avoid nearly all the overhead of having the conditional stuff in the function
531 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
532 */
533static void __flush_dcache_segment_1way(unsigned long start,
534 unsigned long extent_per_way)
535{
536 unsigned long orig_sr, sr_with_bl;
537 unsigned long base_addr;
538 unsigned long way_incr, linesz, way_size;
539 struct cache_info *dcache;
540 register unsigned long a0, a0e;
541
542 asm volatile("stc sr, %0" : "=r" (orig_sr));
543 sr_with_bl = orig_sr | (1<<28);
544 base_addr = ((unsigned long)&empty_zero_page[0]);
545
546 /*
547 * The previous code aligned base_addr to 16k, i.e. the way_size of all
548 * existing SH-4 D-caches. Whilst I don't see a need to have this
549 * aligned to any better than the cache line size (which it will be
550 * anyway by construction), let's align it to at least the way_size of
551 * any existing or conceivable SH-4 D-cache. -- RPC
552 */
553 base_addr = ((base_addr >> 16) << 16);
554 base_addr |= start;
555
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900556 dcache = &boot_cpu_data.dcache;
Richard Curnowb638d0b2006-09-27 14:09:26 +0900557 linesz = dcache->linesz;
558 way_incr = dcache->way_incr;
559 way_size = dcache->way_size;
560
561 a0 = base_addr;
562 a0e = base_addr + extent_per_way;
563 do {
564 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
565 asm volatile("movca.l r0, @%0\n\t"
566 "ocbi @%0" : : "r" (a0));
567 a0 += linesz;
568 asm volatile("movca.l r0, @%0\n\t"
569 "ocbi @%0" : : "r" (a0));
570 a0 += linesz;
571 asm volatile("movca.l r0, @%0\n\t"
572 "ocbi @%0" : : "r" (a0));
573 a0 += linesz;
574 asm volatile("movca.l r0, @%0\n\t"
575 "ocbi @%0" : : "r" (a0));
576 asm volatile("ldc %0, sr" : : "r" (orig_sr));
577 a0 += linesz;
578 } while (a0 < a0e);
579}
580
581static void __flush_dcache_segment_2way(unsigned long start,
582 unsigned long extent_per_way)
583{
584 unsigned long orig_sr, sr_with_bl;
585 unsigned long base_addr;
586 unsigned long way_incr, linesz, way_size;
587 struct cache_info *dcache;
588 register unsigned long a0, a1, a0e;
589
590 asm volatile("stc sr, %0" : "=r" (orig_sr));
591 sr_with_bl = orig_sr | (1<<28);
592 base_addr = ((unsigned long)&empty_zero_page[0]);
593
594 /* See comment under 1-way above */
595 base_addr = ((base_addr >> 16) << 16);
596 base_addr |= start;
597
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900598 dcache = &boot_cpu_data.dcache;
Richard Curnowb638d0b2006-09-27 14:09:26 +0900599 linesz = dcache->linesz;
600 way_incr = dcache->way_incr;
601 way_size = dcache->way_size;
602
603 a0 = base_addr;
604 a1 = a0 + way_incr;
605 a0e = base_addr + extent_per_way;
606 do {
607 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
608 asm volatile("movca.l r0, @%0\n\t"
609 "movca.l r0, @%1\n\t"
610 "ocbi @%0\n\t"
611 "ocbi @%1" : :
612 "r" (a0), "r" (a1));
613 a0 += linesz;
614 a1 += linesz;
615 asm volatile("movca.l r0, @%0\n\t"
616 "movca.l r0, @%1\n\t"
617 "ocbi @%0\n\t"
618 "ocbi @%1" : :
619 "r" (a0), "r" (a1));
620 a0 += linesz;
621 a1 += linesz;
622 asm volatile("movca.l r0, @%0\n\t"
623 "movca.l r0, @%1\n\t"
624 "ocbi @%0\n\t"
625 "ocbi @%1" : :
626 "r" (a0), "r" (a1));
627 a0 += linesz;
628 a1 += linesz;
629 asm volatile("movca.l r0, @%0\n\t"
630 "movca.l r0, @%1\n\t"
631 "ocbi @%0\n\t"
632 "ocbi @%1" : :
633 "r" (a0), "r" (a1));
634 asm volatile("ldc %0, sr" : : "r" (orig_sr));
635 a0 += linesz;
636 a1 += linesz;
637 } while (a0 < a0e);
638}
639
640static void __flush_dcache_segment_4way(unsigned long start,
641 unsigned long extent_per_way)
642{
643 unsigned long orig_sr, sr_with_bl;
644 unsigned long base_addr;
645 unsigned long way_incr, linesz, way_size;
646 struct cache_info *dcache;
647 register unsigned long a0, a1, a2, a3, a0e;
648
649 asm volatile("stc sr, %0" : "=r" (orig_sr));
650 sr_with_bl = orig_sr | (1<<28);
651 base_addr = ((unsigned long)&empty_zero_page[0]);
652
653 /* See comment under 1-way above */
654 base_addr = ((base_addr >> 16) << 16);
655 base_addr |= start;
656
Paul Mundt7ec9d6f2007-09-21 18:05:20 +0900657 dcache = &boot_cpu_data.dcache;
Richard Curnowb638d0b2006-09-27 14:09:26 +0900658 linesz = dcache->linesz;
659 way_incr = dcache->way_incr;
660 way_size = dcache->way_size;
661
662 a0 = base_addr;
663 a1 = a0 + way_incr;
664 a2 = a1 + way_incr;
665 a3 = a2 + way_incr;
666 a0e = base_addr + extent_per_way;
667 do {
668 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
669 asm volatile("movca.l r0, @%0\n\t"
670 "movca.l r0, @%1\n\t"
671 "movca.l r0, @%2\n\t"
672 "movca.l r0, @%3\n\t"
673 "ocbi @%0\n\t"
674 "ocbi @%1\n\t"
675 "ocbi @%2\n\t"
676 "ocbi @%3\n\t" : :
677 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
678 a0 += linesz;
679 a1 += linesz;
680 a2 += linesz;
681 a3 += linesz;
682 asm volatile("movca.l r0, @%0\n\t"
683 "movca.l r0, @%1\n\t"
684 "movca.l r0, @%2\n\t"
685 "movca.l r0, @%3\n\t"
686 "ocbi @%0\n\t"
687 "ocbi @%1\n\t"
688 "ocbi @%2\n\t"
689 "ocbi @%3\n\t" : :
690 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
691 a0 += linesz;
692 a1 += linesz;
693 a2 += linesz;
694 a3 += linesz;
695 asm volatile("movca.l r0, @%0\n\t"
696 "movca.l r0, @%1\n\t"
697 "movca.l r0, @%2\n\t"
698 "movca.l r0, @%3\n\t"
699 "ocbi @%0\n\t"
700 "ocbi @%1\n\t"
701 "ocbi @%2\n\t"
702 "ocbi @%3\n\t" : :
703 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
704 a0 += linesz;
705 a1 += linesz;
706 a2 += linesz;
707 a3 += linesz;
708 asm volatile("movca.l r0, @%0\n\t"
709 "movca.l r0, @%1\n\t"
710 "movca.l r0, @%2\n\t"
711 "movca.l r0, @%3\n\t"
712 "ocbi @%0\n\t"
713 "ocbi @%1\n\t"
714 "ocbi @%2\n\t"
715 "ocbi @%3\n\t" : :
716 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
717 asm volatile("ldc %0, sr" : : "r" (orig_sr));
718 a0 += linesz;
719 a1 += linesz;
720 a2 += linesz;
721 a3 += linesz;
722 } while (a0 < a0e);
723}