blob: fa37bff306b963d526df7922519df30c662983d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/cache-sh7705.c
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2004 Alex Song
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
Paul Mundt39e688a2007-03-05 19:46:47 +090010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
13#include <linux/mman.h>
14#include <linux/mm.h>
Paul Mundt2277ab42009-07-22 19:20:49 +090015#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/threads.h>
17#include <asm/addrspace.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/processor.h>
21#include <asm/cache.h>
22#include <asm/io.h>
23#include <asm/uaccess.h>
24#include <asm/pgalloc.h>
25#include <asm/mmu_context.h>
26#include <asm/cacheflush.h>
27
Paul Mundt0f08f332006-09-27 17:03:56 +090028/*
29 * The 32KB cache on the SH7705 suffers from the same synonym problem
30 * as SH4 CPUs
31 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070032static inline void cache_wback_all(void)
33{
34 unsigned long ways, waysize, addrstart;
35
Paul Mundt11c19652006-12-25 10:19:56 +090036 ways = current_cpu_data.dcache.ways;
37 waysize = current_cpu_data.dcache.sets;
38 waysize <<= current_cpu_data.dcache.entry_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40 addrstart = CACHE_OC_ADDRESS_ARRAY;
41
42 do {
43 unsigned long addr;
44
45 for (addr = addrstart;
46 addr < addrstart + waysize;
Paul Mundt11c19652006-12-25 10:19:56 +090047 addr += current_cpu_data.dcache.linesz) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 unsigned long data;
49 int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
50
51 data = ctrl_inl(addr);
52
53 if ((data & v) == v)
54 ctrl_outl(data & ~v, addr);
Paul Mundt39e688a2007-03-05 19:46:47 +090055
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 }
57
Paul Mundt11c19652006-12-25 10:19:56 +090058 addrstart += current_cpu_data.dcache.way_incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 } while (--ways);
60}
61
62/*
63 * Write back the range of D-cache, and purge the I-cache.
64 *
65 * Called from kernel/module.c:sys_init_module and routine for a.out format.
66 */
67void flush_icache_range(unsigned long start, unsigned long end)
68{
69 __flush_wback_region((void *)start, end - start);
70}
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/*
73 * Writeback&Invalidate the D-cache of the page
74 */
Stuart Menefycbaa1182007-11-30 17:06:36 +090075static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
77 unsigned long ways, waysize, addrstart;
78 unsigned long flags;
79
80 phys |= SH_CACHE_VALID;
81
82 /*
83 * Here, phys is the physical address of the page. We check all the
84 * tags in the cache for those with the same page number as this page
85 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
86 * derived from the offset within in the 4k page). Matching valid
87 * entries are invalidated.
88 *
89 * Since 2 bits of the cache index are derived from the virtual page
90 * number, knowing this would reduce the number of cache entries to be
91 * searched by a factor of 4. However this function exists to deal with
92 * potential cache aliasing, therefore the optimisation is probably not
93 * possible.
94 */
95 local_irq_save(flags);
Stuart Menefycbaa1182007-11-30 17:06:36 +090096 jump_to_uncached();
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Paul Mundt11c19652006-12-25 10:19:56 +090098 ways = current_cpu_data.dcache.ways;
99 waysize = current_cpu_data.dcache.sets;
100 waysize <<= current_cpu_data.dcache.entry_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 addrstart = CACHE_OC_ADDRESS_ARRAY;
103
104 do {
105 unsigned long addr;
106
107 for (addr = addrstart;
108 addr < addrstart + waysize;
Paul Mundt11c19652006-12-25 10:19:56 +0900109 addr += current_cpu_data.dcache.linesz) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 unsigned long data;
111
112 data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
113 if (data == phys) {
114 data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
115 ctrl_outl(data, addr);
116 }
117 }
118
Paul Mundt11c19652006-12-25 10:19:56 +0900119 addrstart += current_cpu_data.dcache.way_incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 } while (--ways);
121
Stuart Menefycbaa1182007-11-30 17:06:36 +0900122 back_to_cached();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 local_irq_restore(flags);
124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
127 * Write back & invalidate the D-cache of the page.
128 * (To avoid "alias" issues)
129 */
130void flush_dcache_page(struct page *page)
131{
Paul Mundt2277ab42009-07-22 19:20:49 +0900132 struct address_space *mapping = page_mapping(page);
133
134 if (mapping && !mapping_mapped(mapping))
135 set_bit(PG_dcache_dirty, &page->flags);
136 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 __flush_dcache_page(PHYSADDR(page_address(page)));
138}
139
Stuart Menefycbaa1182007-11-30 17:06:36 +0900140void __uses_jump_to_uncached flush_cache_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 unsigned long flags;
143
144 local_irq_save(flags);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900145 jump_to_uncached();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 cache_wback_all();
Stuart Menefycbaa1182007-11-30 17:06:36 +0900148 back_to_cached();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 local_irq_restore(flags);
150}
151
152void flush_cache_mm(struct mm_struct *mm)
153{
154 /* Is there any good way? */
155 /* XXX: possibly call flush_cache_range for each vm area */
156 flush_cache_all();
157}
158
159/*
160 * Write back and invalidate D-caches.
161 *
162 * START, END: Virtual Address (U0 address)
163 *
164 * NOTE: We need to flush the _physical_ page entry.
165 * Flushing the cache lines for U0 only isn't enough.
166 * We need to flush for P1 too, which may contain aliases.
167 */
168void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
169 unsigned long end)
170{
171
172 /*
173 * We could call flush_cache_page for the pages of these range,
174 * but it's not efficient (scan the caches all the time...).
175 *
176 * We can't use A-bit magic, as there's the case we don't have
177 * valid entry on TLB.
178 */
179 flush_cache_all();
180}
181
182/*
183 * Write back and invalidate I/D-caches for the page.
184 *
185 * ADDRESS: Virtual Address (U0 address)
186 */
Paul Mundt0f08f332006-09-27 17:03:56 +0900187void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
188 unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
190 __flush_dcache_page(pfn << PAGE_SHIFT);
191}
192
193/*
194 * This is called when a page-cache page is about to be mapped into a
195 * user process' address space. It offers an opportunity for a
196 * port to ensure d-cache/i-cache coherency if necessary.
197 *
198 * Not entirely sure why this is necessary on SH3 with 32K cache but
199 * without it we get occasional "Memory fault" when loading a program.
200 */
201void flush_icache_page(struct vm_area_struct *vma, struct page *page)
202{
203 __flush_purge_region(page_address(page), PAGE_SIZE);
204}