blob: c45fc7f5a979b6064b7e8a403de1922b21adf6df [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IA-64 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6 *
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/hugetlb.h>
15#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/sysctl.h>
vignesh babu9be26f42007-06-07 15:27:46 +053018#include <linux/log2.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/mman.h>
20#include <asm/pgalloc.h>
21#include <asm/tlb.h>
22#include <asm/tlbflush.h>
23
24unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
25
David Gibson63551ae2005-06-21 17:14:44 -070026pte_t *
Andi Kleena5516432008-07-23 21:27:41 -070027huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028{
29 unsigned long taddr = htlbpage_to_page(addr);
30 pgd_t *pgd;
31 pud_t *pud;
32 pmd_t *pmd;
33 pte_t *pte = NULL;
34
35 pgd = pgd_offset(mm, taddr);
36 pud = pud_alloc(mm, pgd, taddr);
37 if (pud) {
38 pmd = pmd_alloc(mm, pud, taddr);
39 if (pmd)
40 pte = pte_alloc_map(mm, pmd, taddr);
41 }
42 return pte;
43}
44
David Gibson63551ae2005-06-21 17:14:44 -070045pte_t *
Linus Torvalds1da177e2005-04-16 15:20:36 -070046huge_pte_offset (struct mm_struct *mm, unsigned long addr)
47{
48 unsigned long taddr = htlbpage_to_page(addr);
49 pgd_t *pgd;
50 pud_t *pud;
51 pmd_t *pmd;
52 pte_t *pte = NULL;
53
54 pgd = pgd_offset(mm, taddr);
55 if (pgd_present(*pgd)) {
56 pud = pud_offset(pgd, taddr);
57 if (pud_present(*pud)) {
58 pmd = pmd_offset(pud, taddr);
59 if (pmd_present(*pmd))
60 pte = pte_offset_map(pmd, taddr);
61 }
62 }
63
64 return pte;
65}
66
Chen, Kenneth W39dde652006-12-06 20:32:03 -080067int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68{
69 return 0;
70}
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/*
David Gibson42b88be2006-03-22 00:09:01 -080075 * Don't actually need to do any preparation, but need to make sure
76 * the address is in the right region.
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 */
Andi Kleena5516432008-07-23 21:27:41 -070078int prepare_hugepage_range(struct file *file,
79 unsigned long addr, unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
81 if (len & ~HPAGE_MASK)
82 return -EINVAL;
83 if (addr & ~HPAGE_MASK)
84 return -EINVAL;
Peter Chubb0a41e252005-08-16 19:54:00 -070085 if (REGION_NUMBER(addr) != RGN_HPAGE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return -EINVAL;
87
88 return 0;
89}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
92{
93 struct page *page;
94 pte_t *ptep;
95
Peter Chubb0a41e252005-08-16 19:54:00 -070096 if (REGION_NUMBER(addr) != RGN_HPAGE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 return ERR_PTR(-EINVAL);
98
99 ptep = huge_pte_offset(mm, addr);
100 if (!ptep || pte_none(*ptep))
101 return NULL;
102 page = pte_page(*ptep);
103 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
104 return page;
105}
106int pmd_huge(pmd_t pmd)
107{
108 return 0;
109}
Andi Kleenceb86872008-07-23 21:27:50 -0700110
111int pud_huge(pud_t pud)
112{
113 return 0;
114}
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116struct page *
117follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
118{
119 return NULL;
120}
121
Jan Beulich42b77722008-07-23 21:27:10 -0700122void hugetlb_free_pgd_range(struct mmu_gather *tlb,
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700123 unsigned long addr, unsigned long end,
124 unsigned long floor, unsigned long ceiling)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700126 /*
Chen, Kenneth W2332c9a2006-03-22 10:49:00 -0800127 * This is called to free hugetlb page tables.
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700128 *
129 * The offset of these addresses from the base of the hugetlb
130 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
131 * the standard free_pgd_range will free the right page tables.
132 *
133 * If floor and ceiling are also in the hugetlb region, they
134 * must likewise be scaled down; but if outside, left unchanged.
135 */
136
137 addr = htlbpage_to_page(addr);
138 end = htlbpage_to_page(end);
Chen, Kenneth W2332c9a2006-03-22 10:49:00 -0800139 if (REGION_NUMBER(floor) == RGN_HPAGE)
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700140 floor = htlbpage_to_page(floor);
Chen, Kenneth W2332c9a2006-03-22 10:49:00 -0800141 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
Hugh Dickins3bf5ee92005-04-19 13:29:16 -0700142 ceiling = htlbpage_to_page(ceiling);
143
144 free_pgd_range(tlb, addr, end, floor, ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
148 unsigned long pgoff, unsigned long flags)
149{
150 struct vm_area_struct *vmm;
151
152 if (len > RGN_MAP_LIMIT)
153 return -ENOMEM;
154 if (len & ~HPAGE_MASK)
155 return -EINVAL;
Benjamin Herrenschmidtafa37392007-05-06 14:50:09 -0700156
157 /* Handle MAP_FIXED */
158 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700159 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidtafa37392007-05-06 14:50:09 -0700160 return -EINVAL;
161 return addr;
162 }
163
Peter Chubb0a41e252005-08-16 19:54:00 -0700164 /* This code assumes that RGN_HPAGE != 0. */
165 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 addr = HPAGE_REGION_BASE;
167 else
168 addr = ALIGN(addr, HPAGE_SIZE);
169 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
170 /* At this point: (!vmm || addr < vmm->vm_end). */
171 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
172 return -ENOMEM;
173 if (!vmm || (addr + len) <= vmm->vm_start)
174 return addr;
175 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
176 }
177}
178
179static int __init hugetlb_setup_sz(char *str)
180{
181 u64 tr_pages;
182 unsigned long long size;
183
184 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
185 /*
186 * shouldn't happen, but just in case.
187 */
188 tr_pages = 0x15557000UL;
189
190 size = memparse(str, &str);
vignesh babu9be26f42007-06-07 15:27:46 +0530191 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 size <= PAGE_SIZE ||
193 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
194 printk(KERN_WARNING "Invalid huge page size specified\n");
195 return 1;
196 }
197
198 hpage_shift = __ffs(size);
199 /*
200 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
201 * override here with new page shift.
202 */
203 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
Mel Gormand9c23402007-10-16 01:26:01 -0700204 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
Mel Gormand9c23402007-10-16 01:26:01 -0700206early_param("hugepagesz", hugetlb_setup_sz);