blob: 074e6bb54eb3a56373c45c642f6fbba3d46a29f3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/fault-armv.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19
Russell King09d9bae2008-09-05 14:08:44 +010020#include <asm/bugs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010022#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
Russell King7b0a1002009-10-24 14:11:59 +010026#include "mm.h"
27
Russell Kingbb30f362008-09-06 20:04:59 +010028static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30/*
31 * We take the easy way out of this problem - we make the
32 * PTE uncacheable. However, we leave the write buffer on.
Hugh Dickins69b04752005-10-29 18:16:36 -070033 *
34 * Note that the pte lock held when calling update_mmu_cache must also
35 * guard the pte (somewhere else in the same mm) that we modify here.
36 * Therefore those configurations which might call adjust_pte (those
37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 */
Russell Kingc26c20b2009-12-18 16:21:35 +000039static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
40 pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
Russell Kingc26c20b2009-12-18 16:21:35 +000042 pte_t entry = *ptep;
Russell King53cdb272008-07-27 10:35:54 +010043 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 /*
Russell King53cdb272008-07-27 10:35:54 +010046 * If this page is present, it's actually being shared.
47 */
48 ret = pte_present(entry);
49
50 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * If this page isn't present, or is already setup to
52 * fault (ie, is old), we can safely ignore any issues.
53 */
Russell Kingbb30f362008-09-06 20:04:59 +010054 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
Nicolas Pitre08e445b2009-01-16 23:02:54 +010055 unsigned long pfn = pte_pfn(entry);
56 flush_cache_page(vma, address, pfn);
57 outer_flush_range((pfn << PAGE_SHIFT),
58 (pfn << PAGE_SHIFT) + PAGE_SIZE);
Russell Kingbb30f362008-09-06 20:04:59 +010059 pte_val(entry) &= ~L_PTE_MT_MASK;
60 pte_val(entry) |= shared_pte_mask;
Russell Kingc26c20b2009-12-18 16:21:35 +000061 set_pte_at(vma->vm_mm, address, ptep, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 flush_tlb_page(vma, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 }
Russell Kingc26c20b2009-12-18 16:21:35 +000064
65 return ret;
66}
67
68static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
69{
70 pgd_t *pgd;
71 pmd_t *pmd;
72 pte_t *pte;
73 int ret;
74
75 pgd = pgd_offset(vma->vm_mm, address);
76 if (pgd_none(*pgd))
77 goto no_pgd;
78 if (pgd_bad(*pgd))
79 goto bad_pgd;
80
81 pmd = pmd_offset(pgd, address);
82 if (pmd_none(*pmd))
83 goto no_pmd;
84 if (pmd_bad(*pmd))
85 goto bad_pmd;
86
87 pte = pte_offset_map(pmd, address);
88
89 ret = do_adjust_pte(vma, address, pte);
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 pte_unmap(pte);
Russell Kingc26c20b2009-12-18 16:21:35 +000092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 return ret;
94
95bad_pgd:
96 pgd_ERROR(*pgd);
97 pgd_clear(pgd);
98no_pgd:
99 return 0;
100
101bad_pmd:
102 pmd_ERROR(*pmd);
103 pmd_clear(pmd);
104no_pmd:
105 return 0;
106}
107
108static void
Russell King8830f042005-06-20 09:51:03 +0100109make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 struct mm_struct *mm = vma->vm_mm;
112 struct vm_area_struct *mpnt;
113 struct prio_tree_iter iter;
114 unsigned long offset;
115 pgoff_t pgoff;
116 int aliases = 0;
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
119
120 /*
121 * If we have any shared mappings that are in the same mm
122 * space, then we need to handle them specially to maintain
123 * cache coherency.
124 */
125 flush_dcache_mmap_lock(mapping);
126 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
127 /*
128 * If this VMA is not in our MM, we can ignore it.
129 * Note that we intentionally mask out the VMA
130 * that we are fixing up.
131 */
132 if (mpnt->vm_mm != mm || mpnt == vma)
133 continue;
134 if (!(mpnt->vm_flags & VM_MAYSHARE))
135 continue;
136 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
137 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
138 }
139 flush_dcache_mmap_unlock(mapping);
140 if (aliases)
141 adjust_pte(vma, addr);
142 else
Russell King8830f042005-06-20 09:51:03 +0100143 flush_cache_page(vma, addr, pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
145
146/*
147 * Take care of architecture specific things when placing a new PTE into
148 * a page table, or changing an existing PTE. Basically, there are two
149 * things that we need to take care of:
150 *
151 * 1. If PG_dcache_dirty is set for the page, we need to ensure
152 * that any cache entries for the kernels virtual memory
153 * range are written back to the page.
154 * 2. If we have multiple shared mappings of the same space in
155 * an object, we need to deal with the cache aliasing issues.
156 *
Hugh Dickins69b04752005-10-29 18:16:36 -0700157 * Note that the pte lock will be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 */
159void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
160{
161 unsigned long pfn = pte_pfn(pte);
Russell King8830f042005-06-20 09:51:03 +0100162 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 struct page *page;
164
165 if (!pfn_valid(pfn))
166 return;
Russell King8830f042005-06-20 09:51:03 +0100167
Russell King421fe932009-10-25 10:23:04 +0000168 /*
169 * The zero page is never written to, so never has any dirty
170 * cache lines, and therefore never needs to be flushed.
171 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 page = pfn_to_page(pfn);
Russell King421fe932009-10-25 10:23:04 +0000173 if (page == ZERO_PAGE(0))
174 return;
175
Russell King8830f042005-06-20 09:51:03 +0100176 mapping = page_mapping(page);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100177#ifndef CONFIG_SMP
Nitin Gupta787b2fa2009-10-12 14:20:23 +0530178 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
179 __flush_dcache_page(mapping, page);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100180#endif
Nitin Gupta787b2fa2009-10-12 14:20:23 +0530181 if (mapping) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (cache_is_vivt())
Russell King8830f042005-06-20 09:51:03 +0100183 make_coherent(mapping, vma, addr, pfn);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100184 else if (vma->vm_flags & VM_EXEC)
185 __flush_icache_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 }
187}
188
189/*
190 * Check whether the write buffer has physical address aliasing
191 * issues. If it has, we need to avoid them for the case where
192 * we have several shared mappings of the same object in user
193 * space.
194 */
195static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
196{
197 register unsigned long zero = 0, one = 1, val;
198
199 local_irq_disable();
200 mb();
201 *p1 = one;
202 mb();
203 *p2 = zero;
204 mb();
205 val = *p1;
206 mb();
207 local_irq_enable();
208 return val != zero;
209}
210
211void __init check_writebuffer_bugs(void)
212{
213 struct page *page;
214 const char *reason;
215 unsigned long v = 1;
216
217 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
218
219 page = alloc_page(GFP_KERNEL);
220 if (page) {
221 unsigned long *p1, *p2;
Russell King52e8bfd2009-12-23 19:54:31 +0000222 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
223 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 p1 = vmap(&page, 1, VM_IOREMAP, prot);
226 p2 = vmap(&page, 1, VM_IOREMAP, prot);
227
228 if (p1 && p2) {
229 v = check_writebuffer(p1, p2);
230 reason = "enabling work-around";
231 } else {
232 reason = "unable to map memory\n";
233 }
234
235 vunmap(p1);
236 vunmap(p2);
237 put_page(page);
238 } else {
239 reason = "unable to grab page\n";
240 }
241
242 if (v) {
243 printk("failed, %s\n", reason);
Russell Kingbb30f362008-09-06 20:04:59 +0100244 shared_pte_mask = L_PTE_MT_UNCACHED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 } else {
246 printk("ok\n");
247 }
248}