blob: 5ee3526f71b8e53db672e3e2b68aeb69c16fb7ef [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07002/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07009#include <linux/vmalloc.h>
10#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040011#include <linux/sched.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070012#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050013#include <linux/export.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070014#include <asm/cacheflush.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070015
Toshi Kani0ddab1d2015-04-14 15:47:20 -070016#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030017static int __read_mostly ioremap_p4d_capable;
Toshi Kani6b637832015-04-14 15:47:32 -070018static int __read_mostly ioremap_pud_capable;
19static int __read_mostly ioremap_pmd_capable;
20static int __read_mostly ioremap_huge_disabled;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070021
22static int __init set_nohugeiomap(char *str)
23{
24 ioremap_huge_disabled = 1;
25 return 0;
26}
27early_param("nohugeiomap", set_nohugeiomap);
28
29void __init ioremap_huge_init(void)
30{
31 if (!ioremap_huge_disabled) {
Anshuman Khandual0f472d02019-07-16 16:27:33 -070032 if (arch_ioremap_p4d_supported())
33 ioremap_p4d_capable = 1;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070034 if (arch_ioremap_pud_supported())
35 ioremap_pud_capable = 1;
36 if (arch_ioremap_pmd_supported())
37 ioremap_pmd_capable = 1;
38 }
39}
40
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030041static inline int ioremap_p4d_enabled(void)
42{
43 return ioremap_p4d_capable;
44}
45
Toshi Kani0ddab1d2015-04-14 15:47:20 -070046static inline int ioremap_pud_enabled(void)
47{
48 return ioremap_pud_capable;
49}
50
51static inline int ioremap_pmd_enabled(void)
52{
53 return ioremap_pmd_capable;
54}
55
56#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030057static inline int ioremap_p4d_enabled(void) { return 0; }
Toshi Kani0ddab1d2015-04-14 15:47:20 -070058static inline int ioremap_pud_enabled(void) { return 0; }
59static inline int ioremap_pmd_enabled(void) { return 0; }
60#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
61
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070062static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070063 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
64 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070065{
66 pte_t *pte;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090067 u64 pfn;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070068
69 pfn = phys_addr >> PAGE_SHIFT;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070070 pte = pte_alloc_kernel_track(pmd, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070071 if (!pte)
72 return -ENOMEM;
73 do {
74 BUG_ON(!pte_none(*pte));
75 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
76 pfn++;
77 } while (pte++, addr += PAGE_SIZE, addr != end);
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070078 *mask |= PGTBL_PTE_MODIFIED;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070079 return 0;
80}
81
Will Deacond2398652018-12-28 00:37:38 -080082static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
83 unsigned long end, phys_addr_t phys_addr,
84 pgprot_t prot)
85{
86 if (!ioremap_pmd_enabled())
87 return 0;
88
89 if ((end - addr) != PMD_SIZE)
90 return 0;
91
Anshuman Khandual6b95ab42019-07-16 16:27:30 -070092 if (!IS_ALIGNED(addr, PMD_SIZE))
93 return 0;
94
Will Deacond2398652018-12-28 00:37:38 -080095 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
96 return 0;
97
98 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
99 return 0;
100
101 return pmd_set_huge(pmd, phys_addr, prot);
102}
103
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700104static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700105 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
106 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700107{
108 pmd_t *pmd;
109 unsigned long next;
110
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700111 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700112 if (!pmd)
113 return -ENOMEM;
114 do {
115 next = pmd_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700116
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700117 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
118 *mask |= PGTBL_PMD_MODIFIED;
Will Deacond2398652018-12-28 00:37:38 -0800119 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700120 }
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700121
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700122 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700123 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800124 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700125 return 0;
126}
127
Will Deacond2398652018-12-28 00:37:38 -0800128static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
129 unsigned long end, phys_addr_t phys_addr,
130 pgprot_t prot)
131{
132 if (!ioremap_pud_enabled())
133 return 0;
134
135 if ((end - addr) != PUD_SIZE)
136 return 0;
137
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700138 if (!IS_ALIGNED(addr, PUD_SIZE))
139 return 0;
140
Will Deacond2398652018-12-28 00:37:38 -0800141 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
142 return 0;
143
144 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
145 return 0;
146
147 return pud_set_huge(pud, phys_addr, prot);
148}
149
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300150static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700151 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
152 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700153{
154 pud_t *pud;
155 unsigned long next;
156
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700157 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700158 if (!pud)
159 return -ENOMEM;
160 do {
161 next = pud_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700162
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700163 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
164 *mask |= PGTBL_PUD_MODIFIED;
Will Deacond2398652018-12-28 00:37:38 -0800165 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700166 }
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700167
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700168 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700169 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800170 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700171 return 0;
172}
173
Will Deacon8e2d4342018-12-28 00:37:53 -0800174static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
175 unsigned long end, phys_addr_t phys_addr,
176 pgprot_t prot)
177{
178 if (!ioremap_p4d_enabled())
179 return 0;
180
181 if ((end - addr) != P4D_SIZE)
182 return 0;
183
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700184 if (!IS_ALIGNED(addr, P4D_SIZE))
185 return 0;
186
Will Deacon8e2d4342018-12-28 00:37:53 -0800187 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
188 return 0;
189
190 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
191 return 0;
192
193 return p4d_set_huge(p4d, phys_addr, prot);
194}
195
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300196static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700197 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
198 pgtbl_mod_mask *mask)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300199{
200 p4d_t *p4d;
201 unsigned long next;
202
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700203 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300204 if (!p4d)
205 return -ENOMEM;
206 do {
207 next = p4d_addr_end(addr, end);
208
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700209 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
210 *mask |= PGTBL_P4D_MODIFIED;
Will Deacon8e2d4342018-12-28 00:37:53 -0800211 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700212 }
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300213
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700214 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300215 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800216 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300217 return 0;
218}
219
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700220int ioremap_page_range(unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900221 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700222{
223 pgd_t *pgd;
224 unsigned long start;
225 unsigned long next;
226 int err;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700227 pgtbl_mod_mask mask = 0;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700228
Linus Torvaldsb39ab982017-10-30 10:09:56 -0700229 might_sleep();
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700230 BUG_ON(addr >= end);
231
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700232 start = addr;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700233 pgd = pgd_offset_k(addr);
234 do {
235 next = pgd_addr_end(addr, end);
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700236 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
237 &mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700238 if (err)
239 break;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800240 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700241
Haavard Skinnemoendb71daa2006-09-30 23:29:14 -0700242 flush_cache_vmap(start, end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700243
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700244 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
245 arch_sync_kernel_mappings(start, end);
246
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700247 return err;
248}
Christoph Hellwig80b0ca92019-08-13 11:24:04 +0200249
250#ifdef CONFIG_GENERIC_IOREMAP
251void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
252{
253 unsigned long offset, vaddr;
254 phys_addr_t last_addr;
255 struct vm_struct *area;
256
257 /* Disallow wrap-around or zero size */
258 last_addr = addr + size - 1;
259 if (!size || last_addr < addr)
260 return NULL;
261
262 /* Page-align mappings */
263 offset = addr & (~PAGE_MASK);
264 addr -= offset;
265 size = PAGE_ALIGN(size + offset);
266
267 area = get_vm_area_caller(size, VM_IOREMAP,
268 __builtin_return_address(0));
269 if (!area)
270 return NULL;
271 vaddr = (unsigned long)area->addr;
272
273 if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
274 free_vm_area(area);
275 return NULL;
276 }
277
278 return (void __iomem *)(vaddr + offset);
279}
280EXPORT_SYMBOL(ioremap_prot);
281
282void iounmap(volatile void __iomem *addr)
283{
284 vunmap((void *)((unsigned long)addr & PAGE_MASK));
285}
286EXPORT_SYMBOL(iounmap);
287#endif /* CONFIG_GENERIC_IOREMAP */