blob: 5fa1ab41d152605768bddd5edc5dfd63df839d15 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07002/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07009#include <linux/vmalloc.h>
10#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040011#include <linux/sched.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070012#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050013#include <linux/export.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070014#include <asm/cacheflush.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070015
Joerg Roedel2a681cf2020-08-06 23:22:55 -070016#include "pgalloc-track.h"
17
Toshi Kani0ddab1d2015-04-14 15:47:20 -070018#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030019static int __read_mostly ioremap_p4d_capable;
Toshi Kani6b637832015-04-14 15:47:32 -070020static int __read_mostly ioremap_pud_capable;
21static int __read_mostly ioremap_pmd_capable;
22static int __read_mostly ioremap_huge_disabled;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070023
24static int __init set_nohugeiomap(char *str)
25{
26 ioremap_huge_disabled = 1;
27 return 0;
28}
29early_param("nohugeiomap", set_nohugeiomap);
30
31void __init ioremap_huge_init(void)
32{
33 if (!ioremap_huge_disabled) {
Anshuman Khandual0f472d02019-07-16 16:27:33 -070034 if (arch_ioremap_p4d_supported())
35 ioremap_p4d_capable = 1;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070036 if (arch_ioremap_pud_supported())
37 ioremap_pud_capable = 1;
38 if (arch_ioremap_pmd_supported())
39 ioremap_pmd_capable = 1;
40 }
41}
42
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030043static inline int ioremap_p4d_enabled(void)
44{
45 return ioremap_p4d_capable;
46}
47
Toshi Kani0ddab1d2015-04-14 15:47:20 -070048static inline int ioremap_pud_enabled(void)
49{
50 return ioremap_pud_capable;
51}
52
53static inline int ioremap_pmd_enabled(void)
54{
55 return ioremap_pmd_capable;
56}
57
58#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030059static inline int ioremap_p4d_enabled(void) { return 0; }
Toshi Kani0ddab1d2015-04-14 15:47:20 -070060static inline int ioremap_pud_enabled(void) { return 0; }
61static inline int ioremap_pmd_enabled(void) { return 0; }
62#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070064static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070065 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
66 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070067{
68 pte_t *pte;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090069 u64 pfn;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070070
71 pfn = phys_addr >> PAGE_SHIFT;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070072 pte = pte_alloc_kernel_track(pmd, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070073 if (!pte)
74 return -ENOMEM;
75 do {
76 BUG_ON(!pte_none(*pte));
77 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
78 pfn++;
79 } while (pte++, addr += PAGE_SIZE, addr != end);
Joerg Roedel6c0c7d22020-06-01 21:52:26 -070080 *mask |= PGTBL_PTE_MODIFIED;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070081 return 0;
82}
83
Will Deacond2398652018-12-28 00:37:38 -080084static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
85 unsigned long end, phys_addr_t phys_addr,
86 pgprot_t prot)
87{
88 if (!ioremap_pmd_enabled())
89 return 0;
90
91 if ((end - addr) != PMD_SIZE)
92 return 0;
93
Anshuman Khandual6b95ab42019-07-16 16:27:30 -070094 if (!IS_ALIGNED(addr, PMD_SIZE))
95 return 0;
96
Will Deacond2398652018-12-28 00:37:38 -080097 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
98 return 0;
99
100 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
101 return 0;
102
103 return pmd_set_huge(pmd, phys_addr, prot);
104}
105
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700106static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700107 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
108 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700109{
110 pmd_t *pmd;
111 unsigned long next;
112
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700113 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700114 if (!pmd)
115 return -ENOMEM;
116 do {
117 next = pmd_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700118
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700119 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
120 *mask |= PGTBL_PMD_MODIFIED;
Will Deacond2398652018-12-28 00:37:38 -0800121 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700122 }
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700123
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700124 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700125 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800126 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700127 return 0;
128}
129
Will Deacond2398652018-12-28 00:37:38 -0800130static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
131 unsigned long end, phys_addr_t phys_addr,
132 pgprot_t prot)
133{
134 if (!ioremap_pud_enabled())
135 return 0;
136
137 if ((end - addr) != PUD_SIZE)
138 return 0;
139
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700140 if (!IS_ALIGNED(addr, PUD_SIZE))
141 return 0;
142
Will Deacond2398652018-12-28 00:37:38 -0800143 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
144 return 0;
145
146 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
147 return 0;
148
149 return pud_set_huge(pud, phys_addr, prot);
150}
151
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300152static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700153 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
154 pgtbl_mod_mask *mask)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700155{
156 pud_t *pud;
157 unsigned long next;
158
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700159 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700160 if (!pud)
161 return -ENOMEM;
162 do {
163 next = pud_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700164
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700165 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
166 *mask |= PGTBL_PUD_MODIFIED;
Will Deacond2398652018-12-28 00:37:38 -0800167 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700168 }
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700169
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700170 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700171 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800172 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700173 return 0;
174}
175
Will Deacon8e2d4342018-12-28 00:37:53 -0800176static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
177 unsigned long end, phys_addr_t phys_addr,
178 pgprot_t prot)
179{
180 if (!ioremap_p4d_enabled())
181 return 0;
182
183 if ((end - addr) != P4D_SIZE)
184 return 0;
185
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700186 if (!IS_ALIGNED(addr, P4D_SIZE))
187 return 0;
188
Will Deacon8e2d4342018-12-28 00:37:53 -0800189 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
190 return 0;
191
192 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
193 return 0;
194
195 return p4d_set_huge(p4d, phys_addr, prot);
196}
197
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300198static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700199 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
200 pgtbl_mod_mask *mask)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300201{
202 p4d_t *p4d;
203 unsigned long next;
204
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700205 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300206 if (!p4d)
207 return -ENOMEM;
208 do {
209 next = p4d_addr_end(addr, end);
210
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700211 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
212 *mask |= PGTBL_P4D_MODIFIED;
Will Deacon8e2d4342018-12-28 00:37:53 -0800213 continue;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700214 }
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300215
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700216 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300217 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800218 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300219 return 0;
220}
221
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700222int ioremap_page_range(unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900223 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700224{
225 pgd_t *pgd;
226 unsigned long start;
227 unsigned long next;
228 int err;
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700229 pgtbl_mod_mask mask = 0;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700230
Linus Torvaldsb39ab982017-10-30 10:09:56 -0700231 might_sleep();
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700232 BUG_ON(addr >= end);
233
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700234 start = addr;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700235 pgd = pgd_offset_k(addr);
236 do {
237 next = pgd_addr_end(addr, end);
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700238 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
239 &mask);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700240 if (err)
241 break;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800242 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700243
Haavard Skinnemoendb71daa2006-09-30 23:29:14 -0700244 flush_cache_vmap(start, end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700245
Joerg Roedel6c0c7d22020-06-01 21:52:26 -0700246 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
247 arch_sync_kernel_mappings(start, end);
248
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700249 return err;
250}
Christoph Hellwig80b0ca92019-08-13 11:24:04 +0200251
252#ifdef CONFIG_GENERIC_IOREMAP
253void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
254{
255 unsigned long offset, vaddr;
256 phys_addr_t last_addr;
257 struct vm_struct *area;
258
259 /* Disallow wrap-around or zero size */
260 last_addr = addr + size - 1;
261 if (!size || last_addr < addr)
262 return NULL;
263
264 /* Page-align mappings */
265 offset = addr & (~PAGE_MASK);
266 addr -= offset;
267 size = PAGE_ALIGN(size + offset);
268
269 area = get_vm_area_caller(size, VM_IOREMAP,
270 __builtin_return_address(0));
271 if (!area)
272 return NULL;
273 vaddr = (unsigned long)area->addr;
274
275 if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
276 free_vm_area(area);
277 return NULL;
278 }
279
280 return (void __iomem *)(vaddr + offset);
281}
282EXPORT_SYMBOL(ioremap_prot);
283
284void iounmap(volatile void __iomem *addr)
285{
286 vunmap((void *)((unsigned long)addr & PAGE_MASK));
287}
288EXPORT_SYMBOL(iounmap);
289#endif /* CONFIG_GENERIC_IOREMAP */