blob: 0e49154454a6f9078870ca38c5bdddd68ab66d18 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/fault-armv.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Modifications for ARM processor (c) 1995-2002 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/bitops.h>
12#include <linux/vmalloc.h>
13#include <linux/init.h>
14#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Russell King09d9bae2008-09-05 14:08:44 +010017#include <asm/bugs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010019#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/tlbflush.h>
21
Russell King7b0a1002009-10-24 14:11:59 +010022#include "mm.h"
23
Russell Kingf6e33542010-11-16 00:22:09 +000024static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Catalin Marinas60121912010-09-13 15:58:06 +010026#if __LINUX_ARM_ARCH__ < 6
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
28 * We take the easy way out of this problem - we make the
29 * PTE uncacheable. However, we leave the write buffer on.
Hugh Dickins69b04752005-10-29 18:16:36 -070030 *
31 * Note that the pte lock held when calling update_mmu_cache must also
32 * guard the pte (somewhere else in the same mm) that we modify here.
33 * Therefore those configurations which might call adjust_pte (those
34 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Russell Kingc26c20b2009-12-18 16:21:35 +000036static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
Russell Kinged42aca2009-12-18 16:31:38 +000037 unsigned long pfn, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Russell Kingc26c20b2009-12-18 16:21:35 +000039 pte_t entry = *ptep;
Russell King53cdb272008-07-27 10:35:54 +010040 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 /*
Russell King53cdb272008-07-27 10:35:54 +010043 * If this page is present, it's actually being shared.
44 */
45 ret = pte_present(entry);
46
47 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 * If this page isn't present, or is already setup to
49 * fault (ie, is old), we can safely ignore any issues.
50 */
Russell Kingbb30f362008-09-06 20:04:59 +010051 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
Nicolas Pitre08e445b2009-01-16 23:02:54 +010052 flush_cache_page(vma, address, pfn);
53 outer_flush_range((pfn << PAGE_SHIFT),
54 (pfn << PAGE_SHIFT) + PAGE_SIZE);
Russell Kingbb30f362008-09-06 20:04:59 +010055 pte_val(entry) &= ~L_PTE_MT_MASK;
56 pte_val(entry) |= shared_pte_mask;
Russell Kingc26c20b2009-12-18 16:21:35 +000057 set_pte_at(vma->vm_mm, address, ptep, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 flush_tlb_page(vma, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 }
Russell Kingc26c20b2009-12-18 16:21:35 +000060
61 return ret;
62}
63
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -080064#if USE_SPLIT_PTE_PTLOCKS
Mika Westerberg4e54d932010-10-28 11:45:22 +010065/*
66 * If we are using split PTE locks, then we need to take the page
67 * lock here. Otherwise we are using shared mm->page_table_lock
68 * which is already locked, thus cannot take it.
69 */
70static inline void do_pte_lock(spinlock_t *ptl)
71{
72 /*
73 * Use nested version here to indicate that we are already
74 * holding one similar spinlock.
75 */
76 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
77}
78
79static inline void do_pte_unlock(spinlock_t *ptl)
80{
81 spin_unlock(ptl);
82}
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -080083#else /* !USE_SPLIT_PTE_PTLOCKS */
Mika Westerberg4e54d932010-10-28 11:45:22 +010084static inline void do_pte_lock(spinlock_t *ptl) {}
85static inline void do_pte_unlock(spinlock_t *ptl) {}
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -080086#endif /* USE_SPLIT_PTE_PTLOCKS */
Mika Westerberg4e54d932010-10-28 11:45:22 +010087
Russell Kinged42aca2009-12-18 16:31:38 +000088static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
89 unsigned long pfn)
Russell Kingc26c20b2009-12-18 16:21:35 +000090{
Russell King56dd4702009-12-18 16:24:34 +000091 spinlock_t *ptl;
Russell Kingc26c20b2009-12-18 16:21:35 +000092 pgd_t *pgd;
Mike Rapoport84e6ffb2020-06-04 16:46:19 -070093 p4d_t *p4d;
Russell King516295e2010-11-21 16:27:49 +000094 pud_t *pud;
Russell Kingc26c20b2009-12-18 16:21:35 +000095 pmd_t *pmd;
96 pte_t *pte;
97 int ret;
98
99 pgd = pgd_offset(vma->vm_mm, address);
Russell Kingf8a85f12009-12-18 16:23:44 +0000100 if (pgd_none_or_clear_bad(pgd))
101 return 0;
Russell Kingc26c20b2009-12-18 16:21:35 +0000102
Mike Rapoport84e6ffb2020-06-04 16:46:19 -0700103 p4d = p4d_offset(pgd, address);
104 if (p4d_none_or_clear_bad(p4d))
105 return 0;
106
107 pud = pud_offset(p4d, address);
Russell King516295e2010-11-21 16:27:49 +0000108 if (pud_none_or_clear_bad(pud))
109 return 0;
110
111 pmd = pmd_offset(pud, address);
Russell Kingf8a85f12009-12-18 16:23:44 +0000112 if (pmd_none_or_clear_bad(pmd))
113 return 0;
Russell Kingc26c20b2009-12-18 16:21:35 +0000114
Russell King56dd4702009-12-18 16:24:34 +0000115 /*
116 * This is called while another page table is mapped, so we
117 * must use the nested version. This also means we need to
118 * open-code the spin-locking.
119 */
120 ptl = pte_lockptr(vma->vm_mm, pmd);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700121 pte = pte_offset_map(pmd, address);
Mika Westerberg4e54d932010-10-28 11:45:22 +0100122 do_pte_lock(ptl);
Russell Kingc26c20b2009-12-18 16:21:35 +0000123
Russell Kinged42aca2009-12-18 16:31:38 +0000124 ret = do_adjust_pte(vma, address, pfn, pte);
Russell Kingc26c20b2009-12-18 16:21:35 +0000125
Mika Westerberg4e54d932010-10-28 11:45:22 +0100126 do_pte_unlock(ptl);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700127 pte_unmap(pte);
Russell Kingc26c20b2009-12-18 16:21:35 +0000128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130}
131
132static void
Russell Kingae140202009-12-18 16:43:57 +0000133make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
134 unsigned long addr, pte_t *ptep, unsigned long pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 struct mm_struct *mm = vma->vm_mm;
137 struct vm_area_struct *mpnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 unsigned long offset;
139 pgoff_t pgoff;
140 int aliases = 0;
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
143
144 /*
145 * If we have any shared mappings that are in the same mm
146 * space, then we need to handle them specially to maintain
147 * cache coherency.
148 */
149 flush_dcache_mmap_lock(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700150 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 /*
152 * If this VMA is not in our MM, we can ignore it.
153 * Note that we intentionally mask out the VMA
154 * that we are fixing up.
155 */
156 if (mpnt->vm_mm != mm || mpnt == vma)
157 continue;
158 if (!(mpnt->vm_flags & VM_MAYSHARE))
159 continue;
160 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
Russell Kinged42aca2009-12-18 16:31:38 +0000161 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 }
163 flush_dcache_mmap_unlock(mapping);
164 if (aliases)
Russell Kingae140202009-12-18 16:43:57 +0000165 do_adjust_pte(vma, addr, pfn, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
168/*
169 * Take care of architecture specific things when placing a new PTE into
170 * a page table, or changing an existing PTE. Basically, there are two
171 * things that we need to take care of:
172 *
Catalin Marinasc0177802010-09-13 15:57:36 +0100173 * 1. If PG_dcache_clean is not set for the page, we need to ensure
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * that any cache entries for the kernels virtual memory
175 * range are written back to the page.
176 * 2. If we have multiple shared mappings of the same space in
177 * an object, we need to deal with the cache aliasing issues.
178 *
Hugh Dickins69b04752005-10-29 18:16:36 -0700179 * Note that the pte lock will be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 */
Russell King4b3073e2009-12-18 16:40:18 +0000181void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
182 pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
Russell King4b3073e2009-12-18 16:40:18 +0000184 unsigned long pfn = pte_pfn(*ptep);
Russell King8830f042005-06-20 09:51:03 +0100185 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 struct page *page;
187
188 if (!pfn_valid(pfn))
189 return;
Russell King8830f042005-06-20 09:51:03 +0100190
Russell King421fe932009-10-25 10:23:04 +0000191 /*
192 * The zero page is never written to, so never has any dirty
193 * cache lines, and therefore never needs to be flushed.
194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 page = pfn_to_page(pfn);
Russell King421fe932009-10-25 10:23:04 +0000196 if (page == ZERO_PAGE(0))
197 return;
198
Huang Yingcb9f7532018-04-05 16:24:39 -0700199 mapping = page_mapping_file(page);
Catalin Marinasc0177802010-09-13 15:57:36 +0100200 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
Nitin Gupta787b2fa2009-10-12 14:20:23 +0530201 __flush_dcache_page(mapping, page);
Nitin Gupta787b2fa2009-10-12 14:20:23 +0530202 if (mapping) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (cache_is_vivt())
Russell Kingae140202009-12-18 16:43:57 +0000204 make_coherent(mapping, vma, addr, ptep, pfn);
Catalin Marinas826cbda2008-06-13 10:28:36 +0100205 else if (vma->vm_flags & VM_EXEC)
206 __flush_icache_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 }
208}
Catalin Marinas60121912010-09-13 15:58:06 +0100209#endif /* __LINUX_ARM_ARCH__ < 6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211/*
212 * Check whether the write buffer has physical address aliasing
213 * issues. If it has, we need to avoid them for the case where
214 * we have several shared mappings of the same object in user
215 * space.
216 */
217static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
218{
219 register unsigned long zero = 0, one = 1, val;
220
221 local_irq_disable();
222 mb();
223 *p1 = one;
224 mb();
225 *p2 = zero;
226 mb();
227 val = *p1;
228 mb();
229 local_irq_enable();
230 return val != zero;
231}
232
233void __init check_writebuffer_bugs(void)
234{
235 struct page *page;
236 const char *reason;
237 unsigned long v = 1;
238
Russell King4ed89f22014-10-28 11:26:42 +0000239 pr_info("CPU: Testing write buffer coherency: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 page = alloc_page(GFP_KERNEL);
242 if (page) {
243 unsigned long *p1, *p2;
Russell King52e8bfd2009-12-23 19:54:31 +0000244 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
245 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 p1 = vmap(&page, 1, VM_IOREMAP, prot);
248 p2 = vmap(&page, 1, VM_IOREMAP, prot);
249
250 if (p1 && p2) {
251 v = check_writebuffer(p1, p2);
252 reason = "enabling work-around";
253 } else {
254 reason = "unable to map memory\n";
255 }
256
257 vunmap(p1);
258 vunmap(p2);
259 put_page(page);
260 } else {
261 reason = "unable to grab page\n";
262 }
263
264 if (v) {
Russell King4ed89f22014-10-28 11:26:42 +0000265 pr_cont("failed, %s\n", reason);
Russell Kingbb30f362008-09-06 20:04:59 +0100266 shared_pte_mask = L_PTE_MT_UNCACHED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 } else {
Russell King4ed89f22014-10-28 11:26:42 +0000268 pr_cont("ok\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270}