Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_HUGETLB_H |
| 2 | #define _ASM_POWERPC_HUGETLB_H |
| 3 | |
| 4 | #include <asm/page.h> |
| 5 | |
| 6 | |
| 7 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, |
| 8 | unsigned long len); |
| 9 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 10 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 11 | unsigned long end, unsigned long floor, |
| 12 | unsigned long ceiling); |
| 13 | |
| 14 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 15 | pte_t *ptep, pte_t pte); |
| 16 | |
| 17 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 18 | pte_t *ptep); |
| 19 | |
| 20 | /* |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 21 | * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs |
| 22 | * to override the version in mm/hugetlb.c |
| 23 | */ |
| 24 | #define vma_mmu_pagesize vma_mmu_pagesize |
| 25 | |
| 26 | /* |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 27 | * If the arch doesn't supply something else, assume that hugepage |
| 28 | * size aligned regions are ok without further preparation. |
| 29 | */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 30 | static inline int prepare_hugepage_range(struct file *file, |
| 31 | unsigned long addr, unsigned long len) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 32 | { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 33 | struct hstate *h = hstate_file(file); |
| 34 | if (len & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 35 | return -EINVAL; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 36 | if (addr & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 37 | return -EINVAL; |
| 38 | return 0; |
| 39 | } |
| 40 | |
| 41 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) |
| 42 | { |
| 43 | } |
| 44 | |
Gerald Schaefer | 8fe627e | 2008-04-28 02:13:28 -0700 | [diff] [blame] | 45 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
| 46 | unsigned long addr, pte_t *ptep) |
| 47 | { |
| 48 | } |
| 49 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 50 | static inline int huge_pte_none(pte_t pte) |
| 51 | { |
| 52 | return pte_none(pte); |
| 53 | } |
| 54 | |
| 55 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
| 56 | { |
| 57 | return pte_wrprotect(pte); |
| 58 | } |
| 59 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 60 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
| 61 | unsigned long addr, pte_t *ptep, |
| 62 | pte_t pte, int dirty) |
| 63 | { |
| 64 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
| 65 | } |
| 66 | |
| 67 | static inline pte_t huge_ptep_get(pte_t *ptep) |
| 68 | { |
| 69 | return *ptep; |
| 70 | } |
| 71 | |
| 72 | static inline int arch_prepare_hugepage(struct page *page) |
| 73 | { |
| 74 | return 0; |
| 75 | } |
| 76 | |
| 77 | static inline void arch_release_hugepage(struct page *page) |
| 78 | { |
| 79 | } |
| 80 | |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 81 | #endif /* _ASM_POWERPC_HUGETLB_H */ |