blob: 6fc13483f70222cb3ea1da047a005447b80cc0cb [file] [log] [blame]
Sam Ravnborga439fe52008-07-27 23:00:59 +02001#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
Sam Ravnborgeb485d62011-04-21 15:48:39 -070011#include <linux/const.h>
12
Sam Ravnborga439fe52008-07-27 23:00:59 +020013#ifndef __ASSEMBLY__
14#include <asm-generic/4level-fixup.h>
15
16#include <linux/spinlock.h>
17#include <linux/swap.h>
18#include <asm/types.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020019#include <asm/pgtsrmmu.h>
Sam Ravnborg9701b262012-05-13 10:21:25 +020020#include <asm/vaddrs.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020021#include <asm/oplib.h>
David Howellsd550bbd2012-03-28 18:30:03 +010022#include <asm/cpu_type.h>
Sam Ravnborga439fe52008-07-27 23:00:59 +020023
24
25struct vm_area_struct;
26struct page;
27
28extern void load_mmu(void);
29extern unsigned long calc_highpages(void);
30
Sam Ravnborga439fe52008-07-27 23:00:59 +020031#define pte_ERROR(e) __builtin_trap()
32#define pmd_ERROR(e) __builtin_trap()
33#define pgd_ERROR(e) __builtin_trap()
34
Sam Ravnborg1ee0e142012-05-10 23:12:10 +020035#define PMD_SHIFT 22
Sam Ravnborga439fe52008-07-27 23:00:59 +020036#define PMD_SIZE (1UL << PMD_SHIFT)
37#define PMD_MASK (~(PMD_SIZE-1))
38#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
David S. Miller3d386c02012-05-12 12:02:02 -070039#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
40#define PGDIR_SIZE SRMMU_PGDIR_SIZE
41#define PGDIR_MASK SRMMU_PGDIR_MASK
Sam Ravnborga439fe52008-07-27 23:00:59 +020042#define PTRS_PER_PTE 1024
David S. Miller3d386c02012-05-12 12:02:02 -070043#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
44#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
45#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
Sam Ravnborga439fe52008-07-27 23:00:59 +020046#define FIRST_USER_ADDRESS 0
47#define PTE_SIZE (PTRS_PER_PTE*4)
48
David S. Miller6439d1c2012-05-12 12:52:47 -070049#define PAGE_NONE SRMMU_PAGE_NONE
50#define PAGE_SHARED SRMMU_PAGE_SHARED
51#define PAGE_COPY SRMMU_PAGE_COPY
52#define PAGE_READONLY SRMMU_PAGE_RDONLY
53#define PAGE_KERNEL SRMMU_PAGE_KERNEL
Sam Ravnborga439fe52008-07-27 23:00:59 +020054
Sam Ravnborg881e02d2012-07-26 11:02:16 +000055/* Top-level page directory - dummy used by init-mm.
56 * srmmu.c will assign the real one (which is dynamically sized) */
57#define swapper_pg_dir NULL
Sam Ravnborga439fe52008-07-27 23:00:59 +020058
59extern void paging_init(void);
60
Sam Ravnborga439fe52008-07-27 23:00:59 +020061extern unsigned long ptr_in_current_pgd;
62
David S. Miller6439d1c2012-05-12 12:52:47 -070063/* xwr */
64#define __P000 PAGE_NONE
65#define __P001 PAGE_READONLY
66#define __P010 PAGE_COPY
67#define __P011 PAGE_COPY
68#define __P100 PAGE_READONLY
69#define __P101 PAGE_READONLY
70#define __P110 PAGE_COPY
71#define __P111 PAGE_COPY
Sam Ravnborga439fe52008-07-27 23:00:59 +020072
David S. Miller6439d1c2012-05-12 12:52:47 -070073#define __S000 PAGE_NONE
74#define __S001 PAGE_READONLY
75#define __S010 PAGE_SHARED
76#define __S011 PAGE_SHARED
77#define __S100 PAGE_READONLY
78#define __S101 PAGE_READONLY
79#define __S110 PAGE_SHARED
80#define __S111 PAGE_SHARED
Sam Ravnborga439fe52008-07-27 23:00:59 +020081
Sam Ravnborga439fe52008-07-27 23:00:59 +020082/* First physical page can be anywhere, the following is needed so that
83 * va-->pa and vice versa conversions work properly without performance
84 * hit for all __pa()/__va() operations.
85 */
86extern unsigned long phys_base;
87extern unsigned long pfn_base;
88
89/*
Sam Ravnborga439fe52008-07-27 23:00:59 +020090 * ZERO_PAGE is a global shared page that is always zero: used
91 * for zero-mapped memory areas etc..
92 */
Sam Ravnborga439fe52008-07-27 23:00:59 +020093extern unsigned long empty_zero_page;
94
Sam Ravnborga439fe52008-07-27 23:00:59 +020095#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
96
97/*
David S. Millera46d6052012-05-12 12:26:47 -070098 * In general all page table modifications should use the V8 atomic
99 * swap instruction. This insures the mmu and the cpu are in sync
100 * with respect to ref/mod bits in the page tables.
101 */
102static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
103{
104 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
105 return value;
106}
107
David S. Miller62875cf2012-05-12 13:39:23 -0700108/* Certain architectures need to do special things when pte's
109 * within a page table are directly modified. Thus, the following
110 * hook is made available.
111 */
112
113static inline void set_pte(pte_t *ptep, pte_t pteval)
David S. Millera46d6052012-05-12 12:26:47 -0700114{
115 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
116}
117
David S. Miller62875cf2012-05-12 13:39:23 -0700118#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
119
David S. Miller3d827362012-05-12 12:33:08 -0700120static inline int srmmu_device_memory(unsigned long x)
121{
122 return ((x & 0xF0000000) != 0);
123}
124
125static inline struct page *pmd_page(pmd_t pmd)
126{
127 if (srmmu_device_memory(pmd_val(pmd)))
128 BUG();
129 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
130}
131
Sam Ravnborg9701b262012-05-13 10:21:25 +0200132static inline unsigned long pgd_page_vaddr(pgd_t pgd)
133{
134 if (srmmu_device_memory(pgd_val(pgd))) {
135 return ~0;
136 } else {
137 unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
138 return (unsigned long)__nocache_va(v << 4);
139 }
140}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200141
David S. Miller62875cf2012-05-12 13:39:23 -0700142static inline int pte_present(pte_t pte)
143{
144 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
145}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200146
147static inline int pte_none(pte_t pte)
148{
David S. Millerc87fe1c2010-05-25 23:36:31 -0700149 return !pte_val(pte);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200150}
151
David S. Millera46d6052012-05-12 12:26:47 -0700152static inline void __pte_clear(pte_t *ptep)
153{
David S. Miller62875cf2012-05-12 13:39:23 -0700154 set_pte(ptep, __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700155}
156
157static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
158{
159 __pte_clear(ptep);
160}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200161
David S. Millerf167eda2012-05-12 13:30:28 -0700162static inline int pmd_bad(pmd_t pmd)
163{
164 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
165}
166
167static inline int pmd_present(pmd_t pmd)
168{
169 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
170}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200171
172static inline int pmd_none(pmd_t pmd)
173{
David S. Millerc87fe1c2010-05-25 23:36:31 -0700174 return !pmd_val(pmd);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200175}
176
David S. Millera46d6052012-05-12 12:26:47 -0700177static inline void pmd_clear(pmd_t *pmdp)
178{
179 int i;
180 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
David S. Miller62875cf2012-05-12 13:39:23 -0700181 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700182}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200183
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700184static inline int pgd_none(pgd_t pgd)
185{
186 return !(pgd_val(pgd) & 0xFFFFFFF);
187}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200188
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700189static inline int pgd_bad(pgd_t pgd)
190{
191 return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
192}
193
194static inline int pgd_present(pgd_t pgd)
195{
196 return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
197}
David S. Millera46d6052012-05-12 12:26:47 -0700198
199static inline void pgd_clear(pgd_t *pgdp)
200{
David S. Miller62875cf2012-05-12 13:39:23 -0700201 set_pte((pte_t *)pgdp, __pte(0));
David S. Millera46d6052012-05-12 12:26:47 -0700202}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200203
204/*
205 * The following only work if pte_present() is true.
206 * Undefined behaviour if not..
207 */
Sam Ravnborga439fe52008-07-27 23:00:59 +0200208static inline int pte_write(pte_t pte)
209{
David S. Millerf755f772012-05-12 13:48:10 -0700210 return pte_val(pte) & SRMMU_WRITE;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200211}
212
Sam Ravnborga439fe52008-07-27 23:00:59 +0200213static inline int pte_dirty(pte_t pte)
214{
David S. Millerf755f772012-05-12 13:48:10 -0700215 return pte_val(pte) & SRMMU_DIRTY;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200216}
217
Sam Ravnborga439fe52008-07-27 23:00:59 +0200218static inline int pte_young(pte_t pte)
219{
David S. Millerf755f772012-05-12 13:48:10 -0700220 return pte_val(pte) & SRMMU_REF;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200221}
222
223/*
224 * The following only work if pte_present() is not true.
225 */
Sam Ravnborga439fe52008-07-27 23:00:59 +0200226static inline int pte_file(pte_t pte)
227{
David S. Miller301d5bb2012-05-12 13:54:58 -0700228 return pte_val(pte) & SRMMU_FILE;
Sam Ravnborga439fe52008-07-27 23:00:59 +0200229}
230
231static inline int pte_special(pte_t pte)
232{
233 return 0;
234}
235
Sam Ravnborga439fe52008-07-27 23:00:59 +0200236static inline pte_t pte_wrprotect(pte_t pte)
237{
David S. Miller301d5bb2012-05-12 13:54:58 -0700238 return __pte(pte_val(pte) & ~SRMMU_WRITE);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200239}
240
Sam Ravnborga439fe52008-07-27 23:00:59 +0200241static inline pte_t pte_mkclean(pte_t pte)
242{
David S. Miller301d5bb2012-05-12 13:54:58 -0700243 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200244}
245
Sam Ravnborga439fe52008-07-27 23:00:59 +0200246static inline pte_t pte_mkold(pte_t pte)
247{
David S. Miller301d5bb2012-05-12 13:54:58 -0700248 return __pte(pte_val(pte) & ~SRMMU_REF);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200249}
250
David S. Miller301d5bb2012-05-12 13:54:58 -0700251static inline pte_t pte_mkwrite(pte_t pte)
252{
253 return __pte(pte_val(pte) | SRMMU_WRITE);
254}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200255
David S. Miller301d5bb2012-05-12 13:54:58 -0700256static inline pte_t pte_mkdirty(pte_t pte)
257{
258 return __pte(pte_val(pte) | SRMMU_DIRTY);
259}
260
261static inline pte_t pte_mkyoung(pte_t pte)
262{
263 return __pte(pte_val(pte) | SRMMU_REF);
264}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200265
266#define pte_mkspecial(pte) (pte)
267
268#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
269
David S. Miller3d827362012-05-12 12:33:08 -0700270static inline unsigned long pte_pfn(pte_t pte)
271{
272 if (srmmu_device_memory(pte_val(pte))) {
273 /* Just return something that will cause
274 * pfn_valid() to return false. This makes
275 * copy_one_pte() to just directly copy to
276 * PTE over.
277 */
278 return ~0UL;
279 }
280 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
281}
282
Sam Ravnborga439fe52008-07-27 23:00:59 +0200283#define pte_page(pte) pfn_to_page(pte_pfn(pte))
284
285/*
286 * Conversion functions: convert a page and protection to a page entry,
287 * and a page entry and page directory to the page they refer to.
288 */
David S. Miller62875cf2012-05-12 13:39:23 -0700289static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
290{
291 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
292}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200293
David S. Miller62875cf2012-05-12 13:39:23 -0700294static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
295{
296 return __pte(((page) >> 4) | pgprot_val(pgprot));
297}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200298
David S. Miller62875cf2012-05-12 13:39:23 -0700299static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
300{
301 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
302}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200303
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000304#define pgprot_noncached pgprot_noncached
305static inline pgprot_t pgprot_noncached(pgprot_t prot)
306{
307 prot &= ~__pgprot(SRMMU_CACHE);
308 return prot;
309}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200310
Sam Ravnborga439fe52008-07-27 23:00:59 +0200311static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
312static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
313{
Sam Ravnborg9701b262012-05-13 10:21:25 +0200314 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
Sam Ravnborga439fe52008-07-27 23:00:59 +0200315 pgprot_val(newprot));
316}
317
318#define pgd_index(address) ((address) >> PGDIR_SHIFT)
319
320/* to find an entry in a page-table-directory */
321#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
322
323/* to find an entry in a kernel page-table-directory */
324#define pgd_offset_k(address) pgd_offset(&init_mm, address)
325
326/* Find an entry in the second-level page table.. */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200327static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
328{
329 return (pmd_t *) pgd_page_vaddr(*dir) +
330 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
331}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200332
333/* Find an entry in the third-level page table.. */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200334pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200335
336/*
David S. Milleree906c92012-05-12 00:35:45 -0700337 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
Sam Ravnborga439fe52008-07-27 23:00:59 +0200338 */
339#define pte_offset_map(d, a) pte_offset_kernel(d,a)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200340#define pte_unmap(pte) do{}while(0)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200341
Sam Ravnborga439fe52008-07-27 23:00:59 +0200342struct seq_file;
Sam Ravnborg9701b262012-05-13 10:21:25 +0200343void mmu_info(struct seq_file *m);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200344
345/* Fault handler stuff... */
346#define FAULT_CODE_PROT 0x1
347#define FAULT_CODE_WRITE 0x2
348#define FAULT_CODE_USER 0x4
349
David S. Millerf6139142012-05-13 13:16:39 -0700350#define update_mmu_cache(vma, address, ptep) do { } while (0)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200351
Sam Ravnborg9701b262012-05-13 10:21:25 +0200352void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
353 unsigned long xva, unsigned int len);
354void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
Sam Ravnborga439fe52008-07-27 23:00:59 +0200355
Sam Ravnborga439fe52008-07-27 23:00:59 +0200356/* Encode and de-code a swap entry */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200357static inline unsigned long __swp_type(swp_entry_t entry)
358{
359 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
360}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200361
Sam Ravnborg9701b262012-05-13 10:21:25 +0200362static inline unsigned long __swp_offset(swp_entry_t entry)
363{
364 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
365}
366
367static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
368{
369 return (swp_entry_t) {
370 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
371 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
372}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200373
374#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
375#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
376
377/* file-offset-in-pte helpers */
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000378static inline unsigned long pte_to_pgoff(pte_t pte)
379{
380 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
381}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200382
Sam Ravnborgafaedde2012-05-11 11:35:17 +0000383static inline pte_t pgoff_to_pte(unsigned long pgoff)
384{
385 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
386}
Sam Ravnborga439fe52008-07-27 23:00:59 +0200387
388/*
389 * This is made a constant because mm/fremap.c required a constant.
Sam Ravnborga439fe52008-07-27 23:00:59 +0200390 */
391#define PTE_FILE_MAX_BITS 24
392
Sam Ravnborga439fe52008-07-27 23:00:59 +0200393static inline unsigned long
394__get_phys (unsigned long addr)
395{
396 switch (sparc_cpu_model){
Sam Ravnborga439fe52008-07-27 23:00:59 +0200397 case sun4m:
398 case sun4d:
399 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
400 default:
401 return 0;
402 }
403}
404
405static inline int
406__get_iospace (unsigned long addr)
407{
408 switch (sparc_cpu_model){
Sam Ravnborga439fe52008-07-27 23:00:59 +0200409 case sun4m:
410 case sun4d:
411 return (srmmu_get_pte (addr) >> 28);
412 default:
413 return -1;
414 }
415}
416
417extern unsigned long *sparc_valid_addr_bitmap;
418
419/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
420#define kern_addr_valid(addr) \
421 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
422
Sam Ravnborga439fe52008-07-27 23:00:59 +0200423/*
424 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
425 * its high 4 bits. These macros/functions put it there or get it from there.
426 */
427#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
428#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
429#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
430
David S. Miller3e37fd32011-11-17 18:17:59 -0800431extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
432 unsigned long, pgprot_t);
433
434static inline int io_remap_pfn_range(struct vm_area_struct *vma,
435 unsigned long from, unsigned long pfn,
436 unsigned long size, pgprot_t prot)
437{
438 unsigned long long offset, space, phys_base;
439
440 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
441 space = GET_IOSPACE(pfn);
442 phys_base = offset | (space << 32ULL);
443
444 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
445}
446
Sam Ravnborga439fe52008-07-27 23:00:59 +0200447#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
448#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
449({ \
450 int __changed = !pte_same(*(__ptep), __entry); \
451 if (__changed) { \
452 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
453 flush_tlb_page(__vma, __address); \
454 } \
Sam Ravnborg1ee0e142012-05-10 23:12:10 +0200455 __changed; \
Sam Ravnborga439fe52008-07-27 23:00:59 +0200456})
457
458#include <asm-generic/pgtable.h>
459
460#endif /* !(__ASSEMBLY__) */
461
Sam Ravnborgeb485d62011-04-21 15:48:39 -0700462#define VMALLOC_START _AC(0xfe600000,UL)
Sam Ravnborgeb485d62011-04-21 15:48:39 -0700463#define VMALLOC_END _AC(0xffc00000,UL)
Sam Ravnborga439fe52008-07-27 23:00:59 +0200464
Sam Ravnborga439fe52008-07-27 23:00:59 +0200465/* We provide our own get_unmapped_area to cope with VA holes for userland */
466#define HAVE_ARCH_UNMAPPED_AREA
467
468/*
469 * No page table caches to initialise
470 */
471#define pgtable_cache_init() do { } while (0)
472
473#endif /* !(_SPARC_PGTABLE_H) */