blob: 9912eb0b499ae56764993ea165ce68dfd97f0b4a [file] [log] [blame]
Sam Ravnborga439fe52008-07-27 23:00:59 +02001#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
Sam Ravnborgeb485d62011-04-21 15:48:39 -070011#include <linux/const.h>
12
Sam Ravnborga439fe52008-07-27 23:00:59 +020013#ifndef __ASSEMBLY__
14#include <asm-generic/4level-fixup.h>
15
16#include <linux/spinlock.h>
17#include <linux/swap.h>
18#include <asm/types.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020019#include <asm/pgtsrmmu.h>
Sam Ravnborg9701b262012-05-13 10:21:25 +020020#include <asm/vaddrs.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020021#include <asm/oplib.h>
David Howellsd550bbd2012-03-28 18:30:03 +010022#include <asm/cpu_type.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020023
24
25struct vm_area_struct;
26struct page;
27
Sam Ravnborgf05a6862014-05-16 23:25:50 +020028void load_mmu(void);
29unsigned long calc_highpages(void);
Sam Ravnborg4c9660f2014-04-21 21:39:18 +020030unsigned long __init bootmem_init(unsigned long *pages_avail);
Sam Ravnborga439fe52008-07-27 23:00:59 +020031
Sam Ravnborga439fe52008-07-27 23:00:59 +020032#define pte_ERROR(e) __builtin_trap()
33#define pmd_ERROR(e) __builtin_trap()
34#define pgd_ERROR(e) __builtin_trap()
35
Sam Ravnborg1ee0e142012-05-10 23:12:10 +020036#define PMD_SHIFT 22
Sam Ravnborga439fe52008-07-27 23:00:59 +020037#define PMD_SIZE (1UL << PMD_SHIFT)
38#define PMD_MASK (~(PMD_SIZE-1))
39#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
David S. Miller3d386c02012-05-12 12:02:02 -070040#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
41#define PGDIR_SIZE SRMMU_PGDIR_SIZE
42#define PGDIR_MASK SRMMU_PGDIR_MASK
Sam Ravnborga439fe52008-07-27 23:00:59 +020043#define PTRS_PER_PTE 1024
David S. Miller3d386c02012-05-12 12:02:02 -070044#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
45#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
46#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
Sam Ravnborga439fe52008-07-27 23:00:59 +020047#define FIRST_USER_ADDRESS 0
48#define PTE_SIZE (PTRS_PER_PTE*4)
49
David S. Miller6439d1c2012-05-12 12:52:47 -070050#define PAGE_NONE SRMMU_PAGE_NONE
51#define PAGE_SHARED SRMMU_PAGE_SHARED
52#define PAGE_COPY SRMMU_PAGE_COPY
53#define PAGE_READONLY SRMMU_PAGE_RDONLY
54#define PAGE_KERNEL SRMMU_PAGE_KERNEL
Sam Ravnborga439fe52008-07-27 23:00:59 +020055
Sam Ravnborg881e02d2012-07-26 11:02:16 +000056/* Top-level page directory - dummy used by init-mm.
57 * srmmu.c will assign the real one (which is dynamically sized) */
58#define swapper_pg_dir NULL
Sam Ravnborga439fe52008-07-27 23:00:59 +020059
Sam Ravnborgf05a6862014-05-16 23:25:50 +020060void paging_init(void);
Sam Ravnborga439fe52008-07-27 23:00:59 +020061
Sam Ravnborga439fe52008-07-27 23:00:59 +020062extern unsigned long ptr_in_current_pgd;
63
David S. Miller6439d1c2012-05-12 12:52:47 -070064/* xwr */
65#define __P000 PAGE_NONE
66#define __P001 PAGE_READONLY
67#define __P010 PAGE_COPY
68#define __P011 PAGE_COPY
69#define __P100 PAGE_READONLY
70#define __P101 PAGE_READONLY
71#define __P110 PAGE_COPY
72#define __P111 PAGE_COPY
Sam Ravnborga439fe52008-07-27 23:00:59 +020073
David S. Miller6439d1c2012-05-12 12:52:47 -070074#define __S000 PAGE_NONE
75#define __S001 PAGE_READONLY
76#define __S010 PAGE_SHARED
77#define __S011 PAGE_SHARED
78#define __S100 PAGE_READONLY
79#define __S101 PAGE_READONLY
80#define __S110 PAGE_SHARED
81#define __S111 PAGE_SHARED
Sam Ravnborga439fe52008-07-27 23:00:59 +020082
Sam Ravnborga439fe52008-07-27 23:00:59 +020083/* First physical page can be anywhere, the following is needed so that
84 * va-->pa and vice versa conversions work properly without performance
85 * hit for all __pa()/__va() operations.
86 */
87extern unsigned long phys_base;
88extern unsigned long pfn_base;
89
90/*
Sam Ravnborga439fe52008-07-27 23:00:59 +020091 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc..
93 */
Sam Ravnborga439fe52008-07-27 23:00:59 +020094extern unsigned long empty_zero_page;
95
Sam Ravnborga439fe52008-07-27 23:00:59 +020096#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
97
98/*
David S. Millera46d6052012-05-12 12:26:47 -070099 * In general all page table modifications should use the V8 atomic
100 * swap instruction. This insures the mmu and the cpu are in sync
101 * with respect to ref/mod bits in the page tables.
102 */
103static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
104{
Kirill A. Shutemov4ecf8862015-02-11 15:25:35 -0800105 __asm__ __volatile__("swap [%2], %0" :
106 "=&r" (value) : "0" (value), "r" (addr) : "memory");
David S. Millera46d6052012-05-12 12:26:47 -0700107 return value;
108}
109
David S. Miller62875cf2012-05-12 13:39:23 -0700110/* Certain architectures need to do special things when pte's
111 * within a page table are directly modified. Thus, the following
112 * hook is made available.
113 */
114
115static inline void set_pte(pte_t *ptep, pte_t pteval)
David S. Millera46d6052012-05-12 12:26:47 -0700116{
117 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
118}
119
David S. Miller62875cf2012-05-12 13:39:23 -0700120#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
121
David S. Miller3d827362012-05-12 12:33:08 -0700122static inline int srmmu_device_memory(unsigned long x)
123{
124 return ((x & 0xF0000000) != 0);
125}
126
127static inline struct page *pmd_page(pmd_t pmd)
128{
129 if (srmmu_device_memory(pmd_val(pmd)))
130 BUG();
131 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
132}
133
Sam Ravnborg9701b262012-05-13 10:21:25 +0200134static inline unsigned long pgd_page_vaddr(pgd_t pgd)
135{
136 if (srmmu_device_memory(pgd_val(pgd))) {
137 return ~0;
138 } else {
139 unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
140 return (unsigned long)__nocache_va(v << 4);
141 }
142}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200143
David S. Miller62875cf2012-05-12 13:39:23 -0700144static inline int pte_present(pte_t pte)
145{
146 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
147}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200148
149static inline int pte_none(pte_t pte)
150{
David S. Millerc87fe1c2010-05-25 23:36:31 -0700151 return !pte_val(pte);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200152}
153
David S. Millera46d6052012-05-12 12:26:47 -0700154static inline void __pte_clear(pte_t *ptep)
155{
David S. Miller62875cf2012-05-12 13:39:23 -0700156 set_pte(ptep, __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700157}
158
159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160{
161 __pte_clear(ptep);
162}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200163
David S. Millerf167eda2012-05-12 13:30:28 -0700164static inline int pmd_bad(pmd_t pmd)
165{
166 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
167}
168
169static inline int pmd_present(pmd_t pmd)
170{
171 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
172}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200173
174static inline int pmd_none(pmd_t pmd)
175{
David S. Millerc87fe1c2010-05-25 23:36:31 -0700176 return !pmd_val(pmd);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200177}
178
David S. Millera46d6052012-05-12 12:26:47 -0700179static inline void pmd_clear(pmd_t *pmdp)
180{
181 int i;
182 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
David S. Miller62875cf2012-05-12 13:39:23 -0700183 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700184}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200185
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700186static inline int pgd_none(pgd_t pgd)
187{
188 return !(pgd_val(pgd) & 0xFFFFFFF);
189}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200190
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700191static inline int pgd_bad(pgd_t pgd)
192{
193 return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
194}
195
196static inline int pgd_present(pgd_t pgd)
197{
198 return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
199}
David S. Millera46d6052012-05-12 12:26:47 -0700200
201static inline void pgd_clear(pgd_t *pgdp)
202{
David S. Miller62875cf2012-05-12 13:39:23 -0700203 set_pte((pte_t *)pgdp, __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700204}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200205
206/*
207 * The following only work if pte_present() is true.
208 * Undefined behaviour if not..
209 */
Sam Ravnborga439fe52008-07-27 23:00:59 +0200210static inline int pte_write(pte_t pte)
211{
David S. Millerf755f772012-05-12 13:48:10 -0700212 return pte_val(pte) & SRMMU_WRITE;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200213}
214
Sam Ravnborga439fe52008-07-27 23:00:59 +0200215static inline int pte_dirty(pte_t pte)
216{
David S. Millerf755f772012-05-12 13:48:10 -0700217 return pte_val(pte) & SRMMU_DIRTY;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200218}
219
Sam Ravnborga439fe52008-07-27 23:00:59 +0200220static inline int pte_young(pte_t pte)
221{
David S. Millerf755f772012-05-12 13:48:10 -0700222 return pte_val(pte) & SRMMU_REF;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200223}
224
Sam Ravnborga439fe52008-07-27 23:00:59 +0200225static inline int pte_special(pte_t pte)
226{
227 return 0;
228}
229
Sam Ravnborga439fe52008-07-27 23:00:59 +0200230static inline pte_t pte_wrprotect(pte_t pte)
231{
David S. Miller301d5bb2012-05-12 13:54:58 -0700232 return __pte(pte_val(pte) & ~SRMMU_WRITE);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200233}
234
Sam Ravnborga439fe52008-07-27 23:00:59 +0200235static inline pte_t pte_mkclean(pte_t pte)
236{
David S. Miller301d5bb2012-05-12 13:54:58 -0700237 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200238}
239
Sam Ravnborga439fe52008-07-27 23:00:59 +0200240static inline pte_t pte_mkold(pte_t pte)
241{
David S. Miller301d5bb2012-05-12 13:54:58 -0700242 return __pte(pte_val(pte) & ~SRMMU_REF);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200243}
244
David S. Miller301d5bb2012-05-12 13:54:58 -0700245static inline pte_t pte_mkwrite(pte_t pte)
246{
247 return __pte(pte_val(pte) | SRMMU_WRITE);
248}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200249
David S. Miller301d5bb2012-05-12 13:54:58 -0700250static inline pte_t pte_mkdirty(pte_t pte)
251{
252 return __pte(pte_val(pte) | SRMMU_DIRTY);
253}
254
255static inline pte_t pte_mkyoung(pte_t pte)
256{
257 return __pte(pte_val(pte) | SRMMU_REF);
258}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200259
260#define pte_mkspecial(pte) (pte)
261
262#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
263
David S. Miller3d827362012-05-12 12:33:08 -0700264static inline unsigned long pte_pfn(pte_t pte)
265{
266 if (srmmu_device_memory(pte_val(pte))) {
267 /* Just return something that will cause
268 * pfn_valid() to return false. This makes
269 * copy_one_pte() to just directly copy to
270 * PTE over.
271 */
272 return ~0UL;
273 }
274 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
275}
276
Sam Ravnborga439fe52008-07-27 23:00:59 +0200277#define pte_page(pte) pfn_to_page(pte_pfn(pte))
278
279/*
280 * Conversion functions: convert a page and protection to a page entry,
281 * and a page entry and page directory to the page they refer to.
282 */
David S. Miller62875cf2012-05-12 13:39:23 -0700283static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
284{
285 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
286}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200287
David S. Miller62875cf2012-05-12 13:39:23 -0700288static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
289{
290 return __pte(((page) >> 4) | pgprot_val(pgprot));
291}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200292
David S. Miller62875cf2012-05-12 13:39:23 -0700293static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
294{
295 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
296}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200297
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000298#define pgprot_noncached pgprot_noncached
299static inline pgprot_t pgprot_noncached(pgprot_t prot)
300{
301 prot &= ~__pgprot(SRMMU_CACHE);
302 return prot;
303}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200304
Sam Ravnborga439fe52008-07-27 23:00:59 +0200305static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
306static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
307{
Sam Ravnborg9701b262012-05-13 10:21:25 +0200308 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
Sam Ravnborga439fe52008-07-27 23:00:59 +0200309 pgprot_val(newprot));
310}
311
312#define pgd_index(address) ((address) >> PGDIR_SHIFT)
313
314/* to find an entry in a page-table-directory */
315#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
316
317/* to find an entry in a kernel page-table-directory */
318#define pgd_offset_k(address) pgd_offset(&init_mm, address)
319
320/* Find an entry in the second-level page table.. */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200321static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
322{
323 return (pmd_t *) pgd_page_vaddr(*dir) +
324 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
325}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200326
327/* Find an entry in the third-level page table.. */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200328pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200329
330/*
David S. Milleree906c92012-05-12 00:35:45 -0700331 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
Sam Ravnborga439fe52008-07-27 23:00:59 +0200332 */
333#define pte_offset_map(d, a) pte_offset_kernel(d,a)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200334#define pte_unmap(pte) do{}while(0)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200335
Sam Ravnborga439fe52008-07-27 23:00:59 +0200336struct seq_file;
Sam Ravnborg9701b262012-05-13 10:21:25 +0200337void mmu_info(struct seq_file *m);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200338
339/* Fault handler stuff... */
340#define FAULT_CODE_PROT 0x1
341#define FAULT_CODE_WRITE 0x2
342#define FAULT_CODE_USER 0x4
343
David S. Millerf6139142012-05-13 13:16:39 -0700344#define update_mmu_cache(vma, address, ptep) do { } while (0)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200345
Sam Ravnborg9701b262012-05-13 10:21:25 +0200346void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
347 unsigned long xva, unsigned int len);
348void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200349
Sam Ravnborga439fe52008-07-27 23:00:59 +0200350/* Encode and de-code a swap entry */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200351static inline unsigned long __swp_type(swp_entry_t entry)
352{
353 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
354}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200355
Sam Ravnborg9701b262012-05-13 10:21:25 +0200356static inline unsigned long __swp_offset(swp_entry_t entry)
357{
358 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
359}
360
361static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
362{
363 return (swp_entry_t) {
364 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
365 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
366}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200367
368#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
369#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
370
Sam Ravnborga439fe52008-07-27 23:00:59 +0200371static inline unsigned long
372__get_phys (unsigned long addr)
373{
374 switch (sparc_cpu_model){
Sam Ravnborga439fe52008-07-27 23:00:59 +0200375 case sun4m:
376 case sun4d:
377 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
378 default:
379 return 0;
380 }
381}
382
383static inline int
384__get_iospace (unsigned long addr)
385{
386 switch (sparc_cpu_model){
Sam Ravnborga439fe52008-07-27 23:00:59 +0200387 case sun4m:
388 case sun4d:
389 return (srmmu_get_pte (addr) >> 28);
390 default:
391 return -1;
392 }
393}
394
395extern unsigned long *sparc_valid_addr_bitmap;
396
397/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
398#define kern_addr_valid(addr) \
399 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
400
Sam Ravnborga439fe52008-07-27 23:00:59 +0200401/*
402 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
403 * its high 4 bits. These macros/functions put it there or get it from there.
404 */
405#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
406#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
407#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
408
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200409int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
410 unsigned long, pgprot_t);
David S. Miller3e37fd32011-11-17 18:17:59 -0800411
412static inline int io_remap_pfn_range(struct vm_area_struct *vma,
413 unsigned long from, unsigned long pfn,
414 unsigned long size, pgprot_t prot)
415{
416 unsigned long long offset, space, phys_base;
417
418 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
419 space = GET_IOSPACE(pfn);
420 phys_base = offset | (space << 32ULL);
421
422 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
423}
Al Viro40d158e2013-05-11 12:13:10 -0400424#define io_remap_pfn_range io_remap_pfn_range
David S. Miller3e37fd32011-11-17 18:17:59 -0800425
Sam Ravnborga439fe52008-07-27 23:00:59 +0200426#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
427#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
428({ \
429 int __changed = !pte_same(*(__ptep), __entry); \
430 if (__changed) { \
431 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
432 flush_tlb_page(__vma, __address); \
433 } \
Sam Ravnborg1ee0e142012-05-10 23:12:10 +0200434 __changed; \
Sam Ravnborga439fe52008-07-27 23:00:59 +0200435})
436
437#include <asm-generic/pgtable.h>
438
439#endif /* !(__ASSEMBLY__) */
440
Sam Ravnborgeb485d62011-04-21 15:48:39 -0700441#define VMALLOC_START _AC(0xfe600000,UL)
Sam Ravnborgeb485d62011-04-21 15:48:39 -0700442#define VMALLOC_END _AC(0xffc00000,UL)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200443
Sam Ravnborga439fe52008-07-27 23:00:59 +0200444/* We provide our own get_unmapped_area to cope with VA holes for userland */
445#define HAVE_ARCH_UNMAPPED_AREA
446
447/*
448 * No page table caches to initialise
449 */
450#define pgtable_cache_init() do { } while (0)
451
452#endif /* !(_SPARC_PGTABLE_H) */