blob: ad485f08173bdc260130b777df1d6fb66409e4a7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07002/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07009#include <linux/vmalloc.h>
10#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040011#include <linux/sched.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070012#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050013#include <linux/export.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070014#include <asm/cacheflush.h>
15#include <asm/pgtable.h>
16
Toshi Kani0ddab1d2015-04-14 15:47:20 -070017#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030018static int __read_mostly ioremap_p4d_capable;
Toshi Kani6b637832015-04-14 15:47:32 -070019static int __read_mostly ioremap_pud_capable;
20static int __read_mostly ioremap_pmd_capable;
21static int __read_mostly ioremap_huge_disabled;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070022
23static int __init set_nohugeiomap(char *str)
24{
25 ioremap_huge_disabled = 1;
26 return 0;
27}
28early_param("nohugeiomap", set_nohugeiomap);
29
30void __init ioremap_huge_init(void)
31{
32 if (!ioremap_huge_disabled) {
Anshuman Khandual0f472d02019-07-16 16:27:33 -070033 if (arch_ioremap_p4d_supported())
34 ioremap_p4d_capable = 1;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070035 if (arch_ioremap_pud_supported())
36 ioremap_pud_capable = 1;
37 if (arch_ioremap_pmd_supported())
38 ioremap_pmd_capable = 1;
39 }
40}
41
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030042static inline int ioremap_p4d_enabled(void)
43{
44 return ioremap_p4d_capable;
45}
46
Toshi Kani0ddab1d2015-04-14 15:47:20 -070047static inline int ioremap_pud_enabled(void)
48{
49 return ioremap_pud_capable;
50}
51
52static inline int ioremap_pmd_enabled(void)
53{
54 return ioremap_pmd_capable;
55}
56
57#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030058static inline int ioremap_p4d_enabled(void) { return 0; }
Toshi Kani0ddab1d2015-04-14 15:47:20 -070059static inline int ioremap_pud_enabled(void) { return 0; }
60static inline int ioremap_pmd_enabled(void) { return 0; }
61#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070063static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070064 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
65 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070066{
67 pte_t *pte;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090068 u64 pfn;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070069
70 pfn = phys_addr >> PAGE_SHIFT;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070071 pte = pte_alloc_kernel_track(pmd, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070072 if (!pte)
73 return -ENOMEM;
74 do {
75 BUG_ON(!pte_none(*pte));
76 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
77 pfn++;
78 } while (pte++, addr += PAGE_SIZE, addr != end);
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070079 *mask |= PGTBL_PTE_MODIFIED;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070080 return 0;
81}
82
Will Deacond2398652018-12-28 00:37:38 -080083static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
84 unsigned long end, phys_addr_t phys_addr,
85 pgprot_t prot)
86{
87 if (!ioremap_pmd_enabled())
88 return 0;
89
90 if ((end - addr) != PMD_SIZE)
91 return 0;
92
Anshuman Khandual6b95ab42019-07-16 16:27:30 -070093 if (!IS_ALIGNED(addr, PMD_SIZE))
94 return 0;
95
Will Deacond2398652018-12-28 00:37:38 -080096 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
97 return 0;
98
99 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
100 return 0;
101
102 return pmd_set_huge(pmd, phys_addr, prot);
103}
104
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700105static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700106 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
107 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700108{
109 pmd_t *pmd;
110 unsigned long next;
111
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700112 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700113 if (!pmd)
114 return -ENOMEM;
115 do {
116 next = pmd_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700117
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700118 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
119 *mask |= PGTBL_PMD_MODIFIED;
Will Deacond2398652018-12-28 00:37:38 -0800120 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700121 }
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700122
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700123 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700124 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800125 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700126 return 0;
127}
128
Will Deacond2398652018-12-28 00:37:38 -0800129static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
130 unsigned long end, phys_addr_t phys_addr,
131 pgprot_t prot)
132{
133 if (!ioremap_pud_enabled())
134 return 0;
135
136 if ((end - addr) != PUD_SIZE)
137 return 0;
138
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700139 if (!IS_ALIGNED(addr, PUD_SIZE))
140 return 0;
141
Will Deacond2398652018-12-28 00:37:38 -0800142 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
143 return 0;
144
145 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
146 return 0;
147
148 return pud_set_huge(pud, phys_addr, prot);
149}
150
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300151static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700152 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
153 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700154{
155 pud_t *pud;
156 unsigned long next;
157
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700158 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700159 if (!pud)
160 return -ENOMEM;
161 do {
162 next = pud_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700163
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700164 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
165 *mask |= PGTBL_PUD_MODIFIED;
Will Deacond2398652018-12-28 00:37:38 -0800166 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700167 }
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700168
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700169 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700170 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800171 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700172 return 0;
173}
174
Will Deacon8e2d4342018-12-28 00:37:53 -0800175static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
176 unsigned long end, phys_addr_t phys_addr,
177 pgprot_t prot)
178{
179 if (!ioremap_p4d_enabled())
180 return 0;
181
182 if ((end - addr) != P4D_SIZE)
183 return 0;
184
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700185 if (!IS_ALIGNED(addr, P4D_SIZE))
186 return 0;
187
Will Deacon8e2d4342018-12-28 00:37:53 -0800188 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
189 return 0;
190
191 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
192 return 0;
193
194 return p4d_set_huge(p4d, phys_addr, prot);
195}
196
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300197static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700198 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
199 pgtbl_mod_mask *mask)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300200{
201 p4d_t *p4d;
202 unsigned long next;
203
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700204 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300205 if (!p4d)
206 return -ENOMEM;
207 do {
208 next = p4d_addr_end(addr, end);
209
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700210 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
211 *mask |= PGTBL_P4D_MODIFIED;
Will Deacon8e2d4342018-12-28 00:37:53 -0800212 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700213 }
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300214
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700215 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300216 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800217 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300218 return 0;
219}
220
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700221int ioremap_page_range(unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900222 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700223{
224 pgd_t *pgd;
225 unsigned long start;
226 unsigned long next;
227 int err;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700228 pgtbl_mod_mask mask = 0;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700229
Linus Torvaldsb39ab982017-10-30 10:09:56 -0700230 might_sleep();
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700231 BUG_ON(addr >= end);
232
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700233 start = addr;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700234 pgd = pgd_offset_k(addr);
235 do {
236 next = pgd_addr_end(addr, end);
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700237 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
238 &mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700239 if (err)
240 break;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800241 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700242
Haavard Skinnemoendb71daa2006-09-30 23:29:14 -0700243 flush_cache_vmap(start, end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700244
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700245 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
246 arch_sync_kernel_mappings(start, end);
247
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700248 return err;
249}
Christoph Hellwig80b0ca92019-08-13 11:24:04 +0200250
251#ifdef CONFIG_GENERIC_IOREMAP
252void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
253{
254 unsigned long offset, vaddr;
255 phys_addr_t last_addr;
256 struct vm_struct *area;
257
258 /* Disallow wrap-around or zero size */
259 last_addr = addr + size - 1;
260 if (!size || last_addr < addr)
261 return NULL;
262
263 /* Page-align mappings */
264 offset = addr & (~PAGE_MASK);
265 addr -= offset;
266 size = PAGE_ALIGN(size + offset);
267
268 area = get_vm_area_caller(size, VM_IOREMAP,
269 __builtin_return_address(0));
270 if (!area)
271 return NULL;
272 vaddr = (unsigned long)area->addr;
273
274 if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
275 free_vm_area(area);
276 return NULL;
277 }
278
279 return (void __iomem *)(vaddr + offset);
280}
281EXPORT_SYMBOL(ioremap_prot);
282
283void iounmap(volatile void __iomem *addr)
284{
285 vunmap((void *)((unsigned long)addr & PAGE_MASK));
286}
287EXPORT_SYMBOL(iounmap);
288#endif /* CONFIG_GENERIC_IOREMAP */