blob: a95161d9c883cc54c4971d6eb44ffa0e57fcf83b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07002/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07009#include <linux/vmalloc.h>
10#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040011#include <linux/sched.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070012#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050013#include <linux/export.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070014#include <asm/cacheflush.h>
15#include <asm/pgtable.h>
16
Toshi Kani0ddab1d2015-04-14 15:47:20 -070017#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030018static int __read_mostly ioremap_p4d_capable;
Toshi Kani6b637832015-04-14 15:47:32 -070019static int __read_mostly ioremap_pud_capable;
20static int __read_mostly ioremap_pmd_capable;
21static int __read_mostly ioremap_huge_disabled;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070022
23static int __init set_nohugeiomap(char *str)
24{
25 ioremap_huge_disabled = 1;
26 return 0;
27}
28early_param("nohugeiomap", set_nohugeiomap);
29
30void __init ioremap_huge_init(void)
31{
32 if (!ioremap_huge_disabled) {
33 if (arch_ioremap_pud_supported())
34 ioremap_pud_capable = 1;
35 if (arch_ioremap_pmd_supported())
36 ioremap_pmd_capable = 1;
37 }
38}
39
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030040static inline int ioremap_p4d_enabled(void)
41{
42 return ioremap_p4d_capable;
43}
44
Toshi Kani0ddab1d2015-04-14 15:47:20 -070045static inline int ioremap_pud_enabled(void)
46{
47 return ioremap_pud_capable;
48}
49
50static inline int ioremap_pmd_enabled(void)
51{
52 return ioremap_pmd_capable;
53}
54
55#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030056static inline int ioremap_p4d_enabled(void) { return 0; }
Toshi Kani0ddab1d2015-04-14 15:47:20 -070057static inline int ioremap_pud_enabled(void) { return 0; }
58static inline int ioremap_pmd_enabled(void) { return 0; }
59#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
60
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070061static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090062 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070063{
64 pte_t *pte;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090065 u64 pfn;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070066
67 pfn = phys_addr >> PAGE_SHIFT;
68 pte = pte_alloc_kernel(pmd, addr);
69 if (!pte)
70 return -ENOMEM;
71 do {
72 BUG_ON(!pte_none(*pte));
73 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
74 pfn++;
75 } while (pte++, addr += PAGE_SIZE, addr != end);
76 return 0;
77}
78
Will Deacond2398652018-12-28 00:37:38 -080079static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
80 unsigned long end, phys_addr_t phys_addr,
81 pgprot_t prot)
82{
83 if (!ioremap_pmd_enabled())
84 return 0;
85
86 if ((end - addr) != PMD_SIZE)
87 return 0;
88
Anshuman Khandual6b95ab42019-07-16 16:27:30 -070089 if (!IS_ALIGNED(addr, PMD_SIZE))
90 return 0;
91
Will Deacond2398652018-12-28 00:37:38 -080092 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
93 return 0;
94
95 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
96 return 0;
97
98 return pmd_set_huge(pmd, phys_addr, prot);
99}
100
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700101static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900102 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700103{
104 pmd_t *pmd;
105 unsigned long next;
106
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700107 pmd = pmd_alloc(&init_mm, pud, addr);
108 if (!pmd)
109 return -ENOMEM;
110 do {
111 next = pmd_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700112
Will Deacon36ddc5a2018-12-28 00:37:49 -0800113 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
Will Deacond2398652018-12-28 00:37:38 -0800114 continue;
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700115
Will Deacon36ddc5a2018-12-28 00:37:49 -0800116 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700117 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800118 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700119 return 0;
120}
121
Will Deacond2398652018-12-28 00:37:38 -0800122static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
123 unsigned long end, phys_addr_t phys_addr,
124 pgprot_t prot)
125{
126 if (!ioremap_pud_enabled())
127 return 0;
128
129 if ((end - addr) != PUD_SIZE)
130 return 0;
131
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700132 if (!IS_ALIGNED(addr, PUD_SIZE))
133 return 0;
134
Will Deacond2398652018-12-28 00:37:38 -0800135 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
136 return 0;
137
138 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
139 return 0;
140
141 return pud_set_huge(pud, phys_addr, prot);
142}
143
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300144static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900145 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700146{
147 pud_t *pud;
148 unsigned long next;
149
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300150 pud = pud_alloc(&init_mm, p4d, addr);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700151 if (!pud)
152 return -ENOMEM;
153 do {
154 next = pud_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700155
Will Deacon36ddc5a2018-12-28 00:37:49 -0800156 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
Will Deacond2398652018-12-28 00:37:38 -0800157 continue;
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700158
Will Deacon36ddc5a2018-12-28 00:37:49 -0800159 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700160 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800161 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700162 return 0;
163}
164
Will Deacon8e2d4342018-12-28 00:37:53 -0800165static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
166 unsigned long end, phys_addr_t phys_addr,
167 pgprot_t prot)
168{
169 if (!ioremap_p4d_enabled())
170 return 0;
171
172 if ((end - addr) != P4D_SIZE)
173 return 0;
174
Anshuman Khandual6b95ab42019-07-16 16:27:30 -0700175 if (!IS_ALIGNED(addr, P4D_SIZE))
176 return 0;
177
Will Deacon8e2d4342018-12-28 00:37:53 -0800178 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
179 return 0;
180
181 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
182 return 0;
183
184 return p4d_set_huge(p4d, phys_addr, prot);
185}
186
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300187static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
188 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
189{
190 p4d_t *p4d;
191 unsigned long next;
192
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300193 p4d = p4d_alloc(&init_mm, pgd, addr);
194 if (!p4d)
195 return -ENOMEM;
196 do {
197 next = p4d_addr_end(addr, end);
198
Will Deacon8e2d4342018-12-28 00:37:53 -0800199 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
200 continue;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300201
Will Deacon36ddc5a2018-12-28 00:37:49 -0800202 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300203 return -ENOMEM;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800204 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300205 return 0;
206}
207
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700208int ioremap_page_range(unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900209 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700210{
211 pgd_t *pgd;
212 unsigned long start;
213 unsigned long next;
214 int err;
215
Linus Torvaldsb39ab982017-10-30 10:09:56 -0700216 might_sleep();
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700217 BUG_ON(addr >= end);
218
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700219 start = addr;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700220 pgd = pgd_offset_k(addr);
221 do {
222 next = pgd_addr_end(addr, end);
Will Deacon36ddc5a2018-12-28 00:37:49 -0800223 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700224 if (err)
225 break;
Will Deacon36ddc5a2018-12-28 00:37:49 -0800226 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700227
Haavard Skinnemoendb71daa2006-09-30 23:29:14 -0700228 flush_cache_vmap(start, end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700229
230 return err;
231}