blob: 150ece66ddf34506cf8d36963c2461a8188ebe91 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/pgtable.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
Russell Kingf6e33542010-11-16 00:22:09 +000013#include <linux/const.h>
Russell King002547b2006-06-20 20:46:52 +010014#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
Russell Kinga32618d2011-11-22 17:30:28 +000018#include <asm-generic/4level-fixup.h>
David Howellsa1ce3922012-10-02 18:01:25 +010019#include <asm/pgtable-nommu.h>
Russell King002547b2006-06-20 20:46:52 +010020
21#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Kirill A. Shutemov9849a562017-03-09 17:24:05 +030023#define __ARCH_USE_5LEVEL_HACK
Russell Kinga32618d2011-11-22 17:30:28 +000024#include <asm-generic/pgtable-nopud.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/memory.h>
Russell Kingad1ae2f2006-12-13 14:34:43 +000026#include <asm/pgtable-hwdef.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Catalin Marinas8d962502012-07-25 14:39:26 +010028
29#include <asm/tlbflush.h>
30
Catalin Marinasdcfdae02011-11-22 17:30:29 +000031#ifdef CONFIG_ARM_LPAE
32#include <asm/pgtable-3level.h>
33#else
Catalin Marinas17f57212011-09-05 17:41:02 +010034#include <asm/pgtable-2level.h>
Catalin Marinasdcfdae02011-11-22 17:30:29 +000035#endif
Catalin Marinas17f57212011-09-05 17:41:02 +010036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/*
Russell King5c3073e2005-05-03 12:20:29 +010038 * Just any arbitrary offset to the start of the vmalloc VM area: the
39 * current 8MB value just means that there will be a 8MB "hole" after the
40 * physical memory until the kernel virtual memory starts. That means that
41 * any out-of-bounds memory accesses will hopefully be caught.
42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
43 * area for the same reason. ;)
Russell King5c3073e2005-05-03 12:20:29 +010044 */
Russell King5c3073e2005-05-03 12:20:29 +010045#define VMALLOC_OFFSET (8*1024*1024)
46#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
Nicolas Pitre6ff09662015-09-13 03:25:26 +010047#define VMALLOC_END 0xff800000UL
Russell King5c3073e2005-05-03 12:20:29 +010048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#define LIBRARY_TEXT_START 0x0c000000
50
51#ifndef __ASSEMBLY__
Russell King69529c02010-11-16 00:19:55 +000052extern void __pte_error(const char *file, int line, pte_t);
53extern void __pmd_error(const char *file, int line, pmd_t);
54extern void __pgd_error(const char *file, int line, pgd_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Russell King69529c02010-11-16 00:19:55 +000056#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
57#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
58#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Hugh Dickins6119be02005-04-19 13:29:21 -070060/*
61 * This is the lowest virtual address we can permit any user space
62 * mapping to be mapped at. This is particularly important for
63 * non-high vector CPUs.
64 */
Russell Kingd8aa7122013-11-28 21:43:40 +000065#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
Hugh Dickins6119be02005-04-19 13:29:21 -070066
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
Catalin Marinas104ad3b2013-04-29 15:07:45 -070068 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
69 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
70 * page shared between user and kernel).
71 */
72#ifdef CONFIG_ARM_LPAE
73#define USER_PGTABLES_CEILING TASK_SIZE
74#endif
75
76/*
Imre_Deak44b18692007-02-11 13:45:13 +010077 * The pgprot_* and protection_map entries will be fixed up in runtime
78 * to include the cachable and bufferable bits based on memory policy,
79 * as well as any architecture dependent bits like global/ASID and SMP
80 * shared mapping bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 */
Russell Kingbb30f362008-09-06 20:04:59 +010082#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Imre_Deak44b18692007-02-11 13:45:13 +010084extern pgprot_t pgprot_user;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085extern pgprot_t pgprot_kernel;
Christoffer Dallcc577c22013-01-20 18:28:04 -050086extern pgprot_t pgprot_hyp_device;
87extern pgprot_t pgprot_s2;
88extern pgprot_t pgprot_s2_device;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Russell King8ec53662008-09-07 17:16:54 +010090#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Will Deacon26ffd0d2012-09-01 05:22:12 +010092#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
Russell King36bb94b2010-11-16 08:40:36 +000093#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
94#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
95#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
96#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
97#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
98#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
Russell King9522d7e2010-11-16 00:23:31 +000099#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
100#define PAGE_KERNEL_EXEC pgprot_kernel
Marc Zyngier09963532016-06-13 15:00:49 +0100101#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
Marc Zyngier59002702016-06-13 15:00:48 +0100102#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
Marc Zyngier74a6b882016-06-13 15:00:47 +0100103#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
Christoffer Dallcc577c22013-01-20 18:28:04 -0500104#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
105#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
Ard Biesheuvel903ed3a2014-09-17 14:56:19 -0700106#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
Russell King8ec53662008-09-07 17:16:54 +0100107
Will Deacon26ffd0d2012-09-01 05:22:12 +0100108#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
Russell King36bb94b2010-11-16 08:40:36 +0000109#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
110#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
111#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
112#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
113#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
114#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
Imre_Deak44b18692007-02-11 13:45:13 +0100115
Russell Kingeb9b2b62010-11-26 17:39:28 +0000116#define __pgprot_modify(prot,mask,bits) \
117 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
118
119#define pgprot_noncached(prot) \
120 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
121
122#define pgprot_writecombine(prot) \
123 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
124
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700125#define pgprot_stronglyordered(prot) \
126 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
127
Russell Kingeb9b2b62010-11-26 17:39:28 +0000128#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
129#define pgprot_dmacoherent(prot) \
Russell King9522d7e2010-11-16 00:23:31 +0000130 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
Russell Kingeb9b2b62010-11-26 17:39:28 +0000131#define __HAVE_PHYS_MEM_ACCESS_PROT
132struct file;
133extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
134 unsigned long size, pgprot_t vma_prot);
135#else
136#define pgprot_dmacoherent(prot) \
Russell King9522d7e2010-11-16 00:23:31 +0000137 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
Russell Kingeb9b2b62010-11-26 17:39:28 +0000138#endif
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140#endif /* __ASSEMBLY__ */
141
142/*
143 * The table below defines the page protection levels that we insert into our
144 * Linux page table version. These get translated into the best that the
145 * architecture can perform. Note that on most ARM hardware:
146 * 1) We cannot do execute protection
147 * 2) If we could do execute protection, then read is implied
148 * 3) write implies read permissions
149 */
Imre_Deak44b18692007-02-11 13:45:13 +0100150#define __P000 __PAGE_NONE
151#define __P001 __PAGE_READONLY
152#define __P010 __PAGE_COPY
153#define __P011 __PAGE_COPY
Russell King8ec53662008-09-07 17:16:54 +0100154#define __P100 __PAGE_READONLY_EXEC
155#define __P101 __PAGE_READONLY_EXEC
156#define __P110 __PAGE_COPY_EXEC
157#define __P111 __PAGE_COPY_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Imre_Deak44b18692007-02-11 13:45:13 +0100159#define __S000 __PAGE_NONE
160#define __S001 __PAGE_READONLY
161#define __S010 __PAGE_SHARED
162#define __S011 __PAGE_SHARED
Russell King8ec53662008-09-07 17:16:54 +0100163#define __S100 __PAGE_READONLY_EXEC
164#define __S101 __PAGE_READONLY_EXEC
165#define __S110 __PAGE_SHARED_EXEC
166#define __S111 __PAGE_SHARED_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168#ifndef __ASSEMBLY__
169/*
170 * ZERO_PAGE is a global shared page that is always zero: used
171 * for zero-mapped memory areas etc..
172 */
173extern struct page *empty_zero_page;
174#define ZERO_PAGE(vaddr) (empty_zero_page)
175
Russell King4eec4b12010-11-26 20:12:12 +0000176
177extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
178
179/* to find an entry in a page-table-directory */
180#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
181
182#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
183
184/* to find an entry in a kernel page-table-directory */
185#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187#define pmd_none(pmd) (!pmd_val(pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Dave McCracken46a82b22006-09-25 23:31:48 -0700189static inline pte_t *pmd_page_vaddr(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
Catalin Marinasd7c5d0d2011-09-05 17:52:36 +0100191 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Catalin Marinasd7c5d0d2011-09-05 17:52:36 +0100194#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Russell Kingb510b0492010-11-26 20:35:25 +0000196#ifndef CONFIG_HIGHPTE
197#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
198#define __pte_unmap(pte) do { } while (0)
199#else
Russell Kingd30e45e2010-11-16 00:16:01 +0000200#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
201#define __pte_unmap(pte) kunmap_atomic(pte)
Russell Kingb510b0492010-11-26 20:35:25 +0000202#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Russell Kingb510b0492010-11-26 20:35:25 +0000204#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
205
206#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
207
208#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
209#define pte_unmap(pte) __pte_unmap(pte)
210
Catalin Marinasd7c5d0d2011-09-05 17:52:36 +0100211#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
Will Deaconcae62922011-02-15 12:42:57 +0100212#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
Russell Kingb510b0492010-11-26 20:35:25 +0000213
214#define pte_page(pte) pfn_to_page(pte_pfn(pte))
215#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
216
Russell Kingb510b0492010-11-26 20:35:25 +0000217#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
218
Steven Capperf2950702014-07-18 16:15:27 +0100219#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
220 : !!(pte_val(pte) & (val)))
221#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
222
Russell Kingb510b0492010-11-26 20:35:25 +0000223#define pte_none(pte) (!pte_val(pte))
Steven Capperf2950702014-07-18 16:15:27 +0100224#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
225#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
Will Deacon19711882014-02-21 17:01:48 +0100226#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
Steven Capperf2950702014-07-18 16:15:27 +0100227#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
228#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
229#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
230#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
Russell Kingb510b0492010-11-26 20:35:25 +0000231
Will Deacon19711882014-02-21 17:01:48 +0100232#define pte_valid_user(pte) \
Steven Capperf2950702014-07-18 16:15:27 +0100233 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
Russell Kingb510b0492010-11-26 20:35:25 +0000234
Russell King1ee5e872017-10-25 11:04:14 +0100235static inline bool pte_access_permitted(pte_t pte, bool write)
236{
237 pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
238 pteval_t needed = mask;
239
240 if (write)
241 mask |= L_PTE_RDONLY;
242
243 return (pte_val(pte) & mask) == needed;
244}
245#define pte_access_permitted pte_access_permitted
246
Will Deacon47f12042012-08-10 17:51:18 +0100247#if __LINUX_ARM_ARCH__ < 6
248static inline void __sync_icache_dcache(pte_t pteval)
249{
250}
251#else
252extern void __sync_icache_dcache(pte_t pteval);
253#endif
254
255static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep, pte_t pteval)
257{
258 unsigned long ext = 0;
259
Will Deacon19711882014-02-21 17:01:48 +0100260 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
Steve Capperbd951302014-10-09 15:29:16 -0700261 if (!pte_special(pteval))
262 __sync_icache_dcache(pteval);
Will Deacon47f12042012-08-10 17:51:18 +0100263 ext |= PTE_EXT_NG;
264 }
265
266 set_pte_ext(ptep, pteval, ext);
267}
268
Jungseung Lee1f92f772014-11-29 03:03:51 +0100269static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
270{
271 pte_val(pte) &= ~pgprot_val(prot);
272 return pte;
273}
Russell Kingb510b0492010-11-26 20:35:25 +0000274
Jungseung Lee1f92f772014-11-29 03:03:51 +0100275static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
276{
277 pte_val(pte) |= pgprot_val(prot);
278 return pte;
279}
280
281static inline pte_t pte_wrprotect(pte_t pte)
282{
283 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
284}
285
286static inline pte_t pte_mkwrite(pte_t pte)
287{
288 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
289}
290
291static inline pte_t pte_mkclean(pte_t pte)
292{
293 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
294}
295
296static inline pte_t pte_mkdirty(pte_t pte)
297{
298 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
299}
300
301static inline pte_t pte_mkold(pte_t pte)
302{
303 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
304}
305
306static inline pte_t pte_mkyoung(pte_t pte)
307{
308 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
309}
310
311static inline pte_t pte_mkexec(pte_t pte)
312{
313 return clear_pte_bit(pte, __pgprot(L_PTE_XN));
314}
315
316static inline pte_t pte_mknexec(pte_t pte)
317{
318 return set_pte_bit(pte, __pgprot(L_PTE_XN));
319}
Russell Kingb510b0492010-11-26 20:35:25 +0000320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
322{
Catalin Marinas69dde4c2013-02-18 17:51:20 +0100323 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
324 L_PTE_NONE | L_PTE_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
326 return pte;
327}
328
Russell Kingfb93a1c2009-07-05 11:30:15 +0100329/*
330 * Encode and decode a swap entry. Swap entries are stored in the Linux
331 * page tables as follows:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 *
Russell Kingfb93a1c2009-07-05 11:30:15 +0100333 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
334 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
Kirill A. Shutemovb007ea72015-02-10 14:10:17 -0800335 * <--------------- offset ------------------------> < type -> 0 0
Russell Kingfb93a1c2009-07-05 11:30:15 +0100336 *
Kirill A. Shutemovb007ea72015-02-10 14:10:17 -0800337 * This gives us up to 31 swap files and 128GB per swap file. Note that
Russell Kingfb93a1c2009-07-05 11:30:15 +0100338 * the offset field is always non-zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 */
Kirill A. Shutemovb007ea72015-02-10 14:10:17 -0800340#define __SWP_TYPE_SHIFT 2
Will Deaconf5f20252012-08-10 17:51:19 +0100341#define __SWP_TYPE_BITS 5
Russell Kingfb93a1c2009-07-05 11:30:15 +0100342#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
343#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
344
345#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
346#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
347#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
350#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
351
Russell Kingfb93a1c2009-07-05 11:30:15 +0100352/*
353 * It is an error for the kernel to have more swap files than we can
354 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
355 * is increased beyond what we presently support.
356 */
357#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
360/* FIXME: this is not correct */
361#define kern_addr_valid(addr) (1)
362
363#include <asm-generic/pgtable.h>
364
365/*
366 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
367 */
368#define HAVE_ARCH_UNMAPPED_AREA
Rob Herring7dbaa462011-11-22 04:01:07 +0100369#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371#define pgtable_cache_init() do { } while (0)
372
373#endif /* !__ASSEMBLY__ */
374
Russell King002547b2006-06-20 20:46:52 +0100375#endif /* CONFIG_MMU */
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377#endif /* _ASMARM_PGTABLE_H */