blob: 0b14dcf05da73603a4ad3679e0ba9ef23f2cd3e4 [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090025#include <linux/spinlock.h>
Paul Mundt90e7d642010-02-23 16:20:53 +090026#include <linux/vmalloc.h>
Paul Mundt281983d2010-03-04 16:44:20 +090027#include <asm/cacheflush.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090028#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090029#include <asm/system.h>
30#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090031#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090032#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090033#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090034#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090035
Paul Mundtd53a0d32010-02-17 21:17:02 +090036struct pmb_entry;
37
38struct pmb_entry {
39 unsigned long vpn;
40 unsigned long ppn;
41 unsigned long flags;
42 unsigned long size;
43
44 spinlock_t lock;
45
46 /*
47 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
48 * PMB_NO_ENTRY to search for a free one
49 */
50 int entry;
51
52 /* Adjacent entry link for contiguous multi-entry mappings */
53 struct pmb_entry *link;
54};
55
Paul Mundt90e7d642010-02-23 16:20:53 +090056static struct {
57 unsigned long size;
58 int flag;
59} pmb_sizes[] = {
60 { .size = SZ_512M, .flag = PMB_SZ_512M, },
61 { .size = SZ_128M, .flag = PMB_SZ_128M, },
62 { .size = SZ_64M, .flag = PMB_SZ_64M, },
63 { .size = SZ_16M, .flag = PMB_SZ_16M, },
64};
65
Paul Mundtd01447b2010-02-18 18:13:51 +090066static void pmb_unmap_entry(struct pmb_entry *, int depth);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000067
Paul Mundtd53a0d32010-02-17 21:17:02 +090068static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000069static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090070static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090071
Paul Mundt4cfa8e72010-03-02 16:49:50 +090072static unsigned int pmb_iomapping_enabled;
73
Paul Mundt51becfd2010-02-17 15:33:30 +090074static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090075{
76 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
77}
78
Paul Mundt51becfd2010-02-17 15:33:30 +090079static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090080{
81 return mk_pmb_entry(entry) | PMB_ADDR;
82}
83
Paul Mundt51becfd2010-02-17 15:33:30 +090084static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090085{
86 return mk_pmb_entry(entry) | PMB_DATA;
87}
88
Paul Mundt90e7d642010-02-23 16:20:53 +090089static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
90{
91 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
92}
93
94/*
95 * Ensure that the PMB entries match our cache configuration.
96 *
97 * When we are in 32-bit address extended mode, CCR.CB becomes
98 * invalid, so care must be taken to manually adjust cacheable
99 * translations.
100 */
101static __always_inline unsigned long pmb_cache_flags(void)
102{
103 unsigned long flags = 0;
104
105#if defined(CONFIG_CACHE_OFF)
106 flags |= PMB_WT | PMB_UB;
107#elif defined(CONFIG_CACHE_WRITETHROUGH)
108 flags |= PMB_C | PMB_WT | PMB_UB;
109#elif defined(CONFIG_CACHE_WRITEBACK)
110 flags |= PMB_C;
111#endif
112
113 return flags;
114}
115
116/*
117 * Convert typical pgprot value to the PMB equivalent
118 */
119static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
120{
121 unsigned long pmb_flags = 0;
122 u64 flags = pgprot_val(prot);
123
124 if (flags & _PAGE_CACHABLE)
125 pmb_flags |= PMB_C;
126 if (flags & _PAGE_WT)
127 pmb_flags |= PMB_WT | PMB_UB;
128
129 return pmb_flags;
130}
131
Paul Mundta1042aa2010-03-03 13:13:25 +0900132static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
Paul Mundt90e7d642010-02-23 16:20:53 +0900133{
134 return (b->vpn == (a->vpn + a->size)) &&
135 (b->ppn == (a->ppn + a->size)) &&
136 (b->flags == a->flags);
137}
138
Paul Mundta1042aa2010-03-03 13:13:25 +0900139static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
140 unsigned long size)
141{
142 int i;
143
144 read_lock(&pmb_rwlock);
145
146 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
147 struct pmb_entry *pmbe, *iter;
148 unsigned long span;
149
150 if (!test_bit(i, pmb_map))
151 continue;
152
153 pmbe = &pmb_entry_list[i];
154
155 /*
156 * See if VPN and PPN are bounded by an existing mapping.
157 */
158 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
159 continue;
160 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
161 continue;
162
163 /*
164 * Now see if we're in range of a simple mapping.
165 */
166 if (size <= pmbe->size) {
167 read_unlock(&pmb_rwlock);
168 return true;
169 }
170
171 span = pmbe->size;
172
173 /*
174 * Finally for sizes that involve compound mappings, walk
175 * the chain.
176 */
177 for (iter = pmbe->link; iter; iter = iter->link)
178 span += iter->size;
179
180 /*
181 * Nothing else to do if the range requirements are met.
182 */
183 if (size <= span) {
184 read_unlock(&pmb_rwlock);
185 return true;
186 }
187 }
188
189 read_unlock(&pmb_rwlock);
190 return false;
191}
192
Paul Mundt90e7d642010-02-23 16:20:53 +0900193static bool pmb_size_valid(unsigned long size)
194{
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
198 if (pmb_sizes[i].size == size)
199 return true;
200
201 return false;
202}
203
204static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
205{
206 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
207}
208
209static inline bool pmb_prot_valid(pgprot_t prot)
210{
211 return (pgprot_val(prot) & _PAGE_USER) == 0;
212}
213
214static int pmb_size_to_flags(unsigned long size)
215{
216 int i;
217
218 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
219 if (pmb_sizes[i].size == size)
220 return pmb_sizes[i].flag;
221
222 return 0;
223}
224
Matt Fleming067784f2009-10-06 21:22:23 +0000225static int pmb_alloc_entry(void)
226{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900227 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +0000228
Paul Mundt51becfd2010-02-17 15:33:30 +0900229 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900230 if (pos >= 0 && pos < NR_PMB_ENTRIES)
231 __set_bit(pos, pmb_map);
232 else
233 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +0000234
235 return pos;
236}
237
Matt Fleming8386aeb2009-10-06 21:22:28 +0000238static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +0000239 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900240{
241 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900242 unsigned long irqflags;
243 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +0000244 int pos;
245
Paul Mundtd53a0d32010-02-17 21:17:02 +0900246 write_lock_irqsave(&pmb_rwlock, irqflags);
247
Matt Fleming20b50142009-10-06 21:22:33 +0000248 if (entry == PMB_NO_ENTRY) {
249 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900250 if (unlikely(pos < 0)) {
251 ret = ERR_PTR(pos);
252 goto out;
253 }
Matt Fleming20b50142009-10-06 21:22:33 +0000254 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900255 if (__test_and_set_bit(entry, pmb_map)) {
256 ret = ERR_PTR(-ENOSPC);
257 goto out;
258 }
259
Matt Fleming20b50142009-10-06 21:22:33 +0000260 pos = entry;
261 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900262
Paul Mundtd53a0d32010-02-17 21:17:02 +0900263 write_unlock_irqrestore(&pmb_rwlock, irqflags);
264
Matt Flemingedd7de82009-10-06 21:22:29 +0000265 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900266
Paul Mundtd01447b2010-02-18 18:13:51 +0900267 memset(pmbe, 0, sizeof(struct pmb_entry));
268
Paul Mundtd53a0d32010-02-17 21:17:02 +0900269 spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900270
271 pmbe->vpn = vpn;
272 pmbe->ppn = ppn;
273 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000274 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900275
276 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900277
278out:
279 write_unlock_irqrestore(&pmb_rwlock, irqflags);
280 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900281}
282
Matt Fleming8386aeb2009-10-06 21:22:28 +0000283static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900284{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900285 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd01447b2010-02-18 18:13:51 +0900286
287 pmbe->entry = PMB_NO_ENTRY;
288 pmbe->link = NULL;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900289}
290
291/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900292 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900293 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900294static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900295{
Paul Mundt281983d2010-03-04 16:44:20 +0900296 unsigned long addr, data;
297
298 addr = mk_pmb_addr(pmbe->entry);
299 data = mk_pmb_data(pmbe->entry);
300
301 jump_to_uncached();
302
Paul Mundt90e7d642010-02-23 16:20:53 +0900303 /* Set V-bit */
Paul Mundt281983d2010-03-04 16:44:20 +0900304 __raw_writel(pmbe->vpn | PMB_V, addr);
305 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
306
307 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900308}
309
Paul Mundtd53a0d32010-02-17 21:17:02 +0900310static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900311{
Paul Mundt2e450642010-02-18 13:26:05 +0900312 unsigned long addr, data;
313 unsigned long addr_val, data_val;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900314
Paul Mundt2e450642010-02-18 13:26:05 +0900315 addr = mk_pmb_addr(pmbe->entry);
316 data = mk_pmb_data(pmbe->entry);
317
318 addr_val = __raw_readl(addr);
319 data_val = __raw_readl(data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900320
321 /* Clear V-bit */
Paul Mundt2e450642010-02-18 13:26:05 +0900322 writel_uncached(addr_val & ~PMB_V, addr);
323 writel_uncached(data_val & ~PMB_V, data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900324}
325
Paul Mundtd53a0d32010-02-17 21:17:02 +0900326static void set_pmb_entry(struct pmb_entry *pmbe)
327{
328 unsigned long flags;
329
330 spin_lock_irqsave(&pmbe->lock, flags);
331 __set_pmb_entry(pmbe);
332 spin_unlock_irqrestore(&pmbe->lock, flags);
333}
334
Paul Mundt90e7d642010-02-23 16:20:53 +0900335int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
336 unsigned long size, pgprot_t prot)
337{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000338 struct pmb_entry *pmbp, *pmbe;
Paul Mundt281983d2010-03-04 16:44:20 +0900339 unsigned long orig_addr, orig_size;
Paul Mundta1042aa2010-03-03 13:13:25 +0900340 unsigned long flags, pmb_flags;
Paul Mundt90e7d642010-02-23 16:20:53 +0900341 int i, mapped;
Paul Mundt7bdda622010-02-17 13:23:00 +0900342
Paul Mundt6eb3c732010-03-02 17:22:29 +0900343 if (!pmb_addr_valid(vaddr, size))
344 return -EFAULT;
Paul Mundta1042aa2010-03-03 13:13:25 +0900345 if (pmb_mapping_exists(vaddr, phys, size))
346 return 0;
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900347
Paul Mundt281983d2010-03-04 16:44:20 +0900348 orig_addr = vaddr;
349 orig_size = size;
350
351 flush_tlb_kernel_range(vaddr, vaddr + size);
352
Paul Mundt90e7d642010-02-23 16:20:53 +0900353 pmb_flags = pgprot_to_pmb_flags(prot);
Paul Mundt6eb3c732010-03-02 17:22:29 +0900354 pmbp = NULL;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900355
Paul Mundta1042aa2010-03-03 13:13:25 +0900356 do {
357 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
358 if (size < pmb_sizes[i].size)
359 continue;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900360
Paul Mundta1042aa2010-03-03 13:13:25 +0900361 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
362 pmb_sizes[i].flag, PMB_NO_ENTRY);
363 if (IS_ERR(pmbe)) {
364 pmb_unmap_entry(pmbp, mapped);
365 return PTR_ERR(pmbe);
366 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900367
Paul Mundta1042aa2010-03-03 13:13:25 +0900368 spin_lock_irqsave(&pmbe->lock, flags);
369
370 pmbe->size = pmb_sizes[i].size;
371
372 __set_pmb_entry(pmbe);
373
374 phys += pmbe->size;
375 vaddr += pmbe->size;
376 size -= pmbe->size;
377
378 /*
379 * Link adjacent entries that span multiple PMB
380 * entries for easier tear-down.
381 */
382 if (likely(pmbp)) {
383 spin_lock(&pmbp->lock);
384 pmbp->link = pmbe;
385 spin_unlock(&pmbp->lock);
386 }
387
388 pmbp = pmbe;
389
390 /*
391 * Instead of trying smaller sizes on every
392 * iteration (even if we succeed in allocating
393 * space), try using pmb_sizes[i].size again.
394 */
395 i--;
396 mapped++;
397
398 spin_unlock_irqrestore(&pmbe->lock, flags);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000399 }
Paul Mundta1042aa2010-03-03 13:13:25 +0900400 } while (size >= SZ_16M);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900401
Paul Mundt281983d2010-03-04 16:44:20 +0900402 flush_cache_vmap(orig_addr, orig_addr + orig_size);
403
Paul Mundt6eb3c732010-03-02 17:22:29 +0900404 return 0;
405}
406
407void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
408 pgprot_t prot, void *caller)
409{
Paul Mundt281983d2010-03-04 16:44:20 +0900410 unsigned long vaddr;
Paul Mundt6eb3c732010-03-02 17:22:29 +0900411 phys_addr_t offset, last_addr;
412 phys_addr_t align_mask;
413 unsigned long aligned;
414 struct vm_struct *area;
415 int i, ret;
416
417 if (!pmb_iomapping_enabled)
418 return NULL;
419
420 /*
421 * Small mappings need to go through the TLB.
422 */
423 if (size < SZ_16M)
424 return ERR_PTR(-EINVAL);
425 if (!pmb_prot_valid(prot))
426 return ERR_PTR(-EINVAL);
427
428 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
429 if (size >= pmb_sizes[i].size)
430 break;
431
432 last_addr = phys + size;
433 align_mask = ~(pmb_sizes[i].size - 1);
434 offset = phys & ~align_mask;
435 phys &= align_mask;
436 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
437
Paul Mundt281983d2010-03-04 16:44:20 +0900438 /*
439 * XXX: This should really start from uncached_end, but this
440 * causes the MMU to reset, so for now we restrict it to the
441 * 0xb000...0xc000 range.
442 */
443 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
Paul Mundt6eb3c732010-03-02 17:22:29 +0900444 P3SEG, caller);
445 if (!area)
446 return NULL;
447
448 area->phys_addr = phys;
Paul Mundt281983d2010-03-04 16:44:20 +0900449 vaddr = (unsigned long)area->addr;
Paul Mundt6eb3c732010-03-02 17:22:29 +0900450
451 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
Paul Mundta1042aa2010-03-03 13:13:25 +0900452 if (unlikely(ret != 0))
Paul Mundt6eb3c732010-03-02 17:22:29 +0900453 return ERR_PTR(ret);
454
Paul Mundt281983d2010-03-04 16:44:20 +0900455 return (void __iomem *)(offset + (char *)vaddr);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900456}
457
Paul Mundt90e7d642010-02-23 16:20:53 +0900458int pmb_unmap(void __iomem *addr)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900459{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900460 struct pmb_entry *pmbe = NULL;
Paul Mundt90e7d642010-02-23 16:20:53 +0900461 unsigned long vaddr = (unsigned long __force)addr;
462 int i, found = 0;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900463
Paul Mundtd53a0d32010-02-17 21:17:02 +0900464 read_lock(&pmb_rwlock);
465
Matt Flemingedd7de82009-10-06 21:22:29 +0000466 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900467 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000468 pmbe = &pmb_entry_list[i];
Paul Mundt90e7d642010-02-23 16:20:53 +0900469 if (pmbe->vpn == vaddr) {
470 found = 1;
Matt Flemingedd7de82009-10-06 21:22:29 +0000471 break;
Paul Mundt90e7d642010-02-23 16:20:53 +0900472 }
Matt Flemingedd7de82009-10-06 21:22:29 +0000473 }
474 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900475
476 read_unlock(&pmb_rwlock);
477
Paul Mundt90e7d642010-02-23 16:20:53 +0900478 if (found) {
479 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
480 return 0;
481 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900482
Paul Mundt90e7d642010-02-23 16:20:53 +0900483 return -EINVAL;
Paul Mundtd01447b2010-02-18 18:13:51 +0900484}
485
486static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
487{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900488 do {
489 struct pmb_entry *pmblink = pmbe;
490
Matt Fleming067784f2009-10-06 21:22:23 +0000491 /*
492 * We may be called before this pmb_entry has been
493 * entered into the PMB table via set_pmb_entry(), but
494 * that's OK because we've allocated a unique slot for
495 * this entry in pmb_alloc() (even if we haven't filled
496 * it yet).
497 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900498 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000499 * other mapping can be using that slot.
500 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900501 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000502
Paul Mundt281983d2010-03-04 16:44:20 +0900503 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
504
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900505 pmbe = pmblink->link;
506
507 pmb_free(pmblink);
Paul Mundtd01447b2010-02-18 18:13:51 +0900508 } while (pmbe && --depth);
509}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900510
Paul Mundtd01447b2010-02-18 18:13:51 +0900511static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
512{
513 unsigned long flags;
514
515 if (unlikely(!pmbe))
516 return;
517
518 write_lock_irqsave(&pmb_rwlock, flags);
519 __pmb_unmap_entry(pmbe, depth);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900520 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900521}
522
Paul Mundtd01447b2010-02-18 18:13:51 +0900523static void __init pmb_notify(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000524{
Paul Mundtd01447b2010-02-18 18:13:51 +0900525 int i;
Matt Fleming3d467672010-01-18 19:33:10 +0900526
Paul Mundtefd54ea2010-02-16 18:39:30 +0900527 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900528
Paul Mundtd01447b2010-02-18 18:13:51 +0900529 read_lock(&pmb_rwlock);
530
531 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
532 struct pmb_entry *pmbe;
533
534 if (!test_bit(i, pmb_map))
535 continue;
536
537 pmbe = &pmb_entry_list[i];
538
539 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
540 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
541 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
542 }
543
544 read_unlock(&pmb_rwlock);
545}
546
547/*
548 * Sync our software copy of the PMB mappings with those in hardware. The
549 * mappings in the hardware PMB were either set up by the bootloader or
550 * very early on by the kernel.
551 */
552static void __init pmb_synchronize(void)
553{
554 struct pmb_entry *pmbp = NULL;
555 int i, j;
556
Matt Fleming3d467672010-01-18 19:33:10 +0900557 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900558 * Run through the initial boot mappings, log the established
559 * ones, and blow away anything that falls outside of the valid
560 * PPN range. Specifically, we only care about existing mappings
561 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900562 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900563 * Note that touching these can be a bit of a minefield; the boot
564 * loader can establish multi-page mappings with the same caching
565 * attributes, so we need to ensure that we aren't modifying a
566 * mapping that we're presently executing from, or may execute
567 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900568 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900569 * In the future we will have to tidy up after the boot loader by
570 * jumping between the cached and uncached mappings and tearing
571 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900572 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900573 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900574 unsigned long addr, data;
575 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900576 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900577 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900578 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900579 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900580
581 addr = mk_pmb_addr(i);
582 data = mk_pmb_data(i);
583
584 addr_val = __raw_readl(addr);
585 data_val = __raw_readl(data);
586
587 /*
588 * Skip over any bogus entries
589 */
590 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
591 continue;
592
593 ppn = data_val & PMB_PFN_MASK;
594 vpn = addr_val & PMB_PFN_MASK;
595
596 /*
597 * Only preserve in-range mappings.
598 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900599 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900600 /*
601 * Invalidate anything out of bounds.
602 */
Paul Mundt2e450642010-02-18 13:26:05 +0900603 writel_uncached(addr_val & ~PMB_V, addr);
604 writel_uncached(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900605 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900606 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900607
608 /*
609 * Update the caching attributes if necessary
610 */
611 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900612 data_val &= ~PMB_CACHE_MASK;
613 data_val |= pmb_cache_flags();
Paul Mundt2e450642010-02-18 13:26:05 +0900614
615 writel_uncached(data_val, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900616 }
617
Paul Mundtd7813bc2010-02-17 17:56:38 +0900618 size = data_val & PMB_SZ_MASK;
619 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900620
621 pmbe = pmb_alloc(vpn, ppn, flags, i);
622 if (IS_ERR(pmbe)) {
623 WARN_ON_ONCE(1);
624 continue;
625 }
626
Paul Mundtd53a0d32010-02-17 21:17:02 +0900627 spin_lock_irqsave(&pmbe->lock, irqflags);
628
Paul Mundtd7813bc2010-02-17 17:56:38 +0900629 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
630 if (pmb_sizes[j].flag == size)
631 pmbe->size = pmb_sizes[j].size;
632
Paul Mundtd53a0d32010-02-17 21:17:02 +0900633 if (pmbp) {
634 spin_lock(&pmbp->lock);
635
636 /*
637 * Compare the previous entry against the current one to
638 * see if the entries span a contiguous mapping. If so,
Paul Mundtd01447b2010-02-18 18:13:51 +0900639 * setup the entry links accordingly. Compound mappings
640 * are later coalesced.
Paul Mundtd53a0d32010-02-17 21:17:02 +0900641 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900642 if (pmb_can_merge(pmbp, pmbe))
Paul Mundtd53a0d32010-02-17 21:17:02 +0900643 pmbp->link = pmbe;
644
645 spin_unlock(&pmbp->lock);
646 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900647
648 pmbp = pmbe;
649
Paul Mundtd53a0d32010-02-17 21:17:02 +0900650 spin_unlock_irqrestore(&pmbe->lock, irqflags);
Matt Fleming3d467672010-01-18 19:33:10 +0900651 }
Matt Fleming3d467672010-01-18 19:33:10 +0900652}
Matt Fleming3d467672010-01-18 19:33:10 +0900653
Paul Mundtd01447b2010-02-18 18:13:51 +0900654static void __init pmb_merge(struct pmb_entry *head)
Matt Fleming3d467672010-01-18 19:33:10 +0900655{
Paul Mundtd01447b2010-02-18 18:13:51 +0900656 unsigned long span, newsize;
657 struct pmb_entry *tail;
658 int i = 1, depth = 0;
659
660 span = newsize = head->size;
661
662 tail = head->link;
663 while (tail) {
664 span += tail->size;
665
666 if (pmb_size_valid(span)) {
667 newsize = span;
668 depth = i;
669 }
670
671 /* This is the end of the line.. */
672 if (!tail->link)
673 break;
674
675 tail = tail->link;
676 i++;
677 }
Matt Fleming20b50142009-10-06 21:22:33 +0000678
Matt Fleming3d467672010-01-18 19:33:10 +0900679 /*
Paul Mundtd01447b2010-02-18 18:13:51 +0900680 * The merged page size must be valid.
Matt Fleming3d467672010-01-18 19:33:10 +0900681 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900682 if (!pmb_size_valid(newsize))
683 return;
684
685 head->flags &= ~PMB_SZ_MASK;
686 head->flags |= pmb_size_to_flags(newsize);
687
688 head->size = newsize;
689
690 __pmb_unmap_entry(head->link, depth);
691 __set_pmb_entry(head);
692}
693
694static void __init pmb_coalesce(void)
695{
696 unsigned long flags;
697 int i;
698
699 write_lock_irqsave(&pmb_rwlock, flags);
700
701 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
702 struct pmb_entry *pmbe;
703
704 if (!test_bit(i, pmb_map))
705 continue;
706
707 pmbe = &pmb_entry_list[i];
708
709 /*
710 * We're only interested in compound mappings
711 */
712 if (!pmbe->link)
713 continue;
714
715 /*
716 * Nothing to do if it already uses the largest possible
717 * page size.
718 */
719 if (pmbe->size == SZ_512M)
720 continue;
721
722 pmb_merge(pmbe);
723 }
724
725 write_unlock_irqrestore(&pmb_rwlock, flags);
726}
727
728#ifdef CONFIG_UNCACHED_MAPPING
729static void __init pmb_resize(void)
730{
731 int i;
732
733 /*
734 * If the uncached mapping was constructed by the kernel, it will
735 * already be a reasonable size.
736 */
737 if (uncached_size == SZ_16M)
738 return;
739
740 read_lock(&pmb_rwlock);
741
742 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
743 struct pmb_entry *pmbe;
744 unsigned long flags;
745
746 if (!test_bit(i, pmb_map))
747 continue;
748
749 pmbe = &pmb_entry_list[i];
750
751 if (pmbe->vpn != uncached_start)
752 continue;
753
754 /*
755 * Found it, now resize it.
756 */
757 spin_lock_irqsave(&pmbe->lock, flags);
758
759 pmbe->size = SZ_16M;
760 pmbe->flags &= ~PMB_SZ_MASK;
761 pmbe->flags |= pmb_size_to_flags(pmbe->size);
762
763 uncached_resize(pmbe->size);
764
765 __set_pmb_entry(pmbe);
766
767 spin_unlock_irqrestore(&pmbe->lock, flags);
768 }
769
770 read_lock(&pmb_rwlock);
771}
772#endif
773
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900774static int __init early_pmb(char *p)
775{
776 if (!p)
777 return 0;
778
779 if (strstr(p, "iomap"))
780 pmb_iomapping_enabled = 1;
781
782 return 0;
783}
784early_param("pmb", early_pmb);
785
Paul Mundtd01447b2010-02-18 18:13:51 +0900786void __init pmb_init(void)
787{
788 /* Synchronize software state */
789 pmb_synchronize();
790
791 /* Attempt to combine compound mappings */
792 pmb_coalesce();
793
794#ifdef CONFIG_UNCACHED_MAPPING
795 /* Resize initial mappings, if necessary */
796 pmb_resize();
797#endif
798
799 /* Log them */
800 pmb_notify();
Matt Fleming20b50142009-10-06 21:22:33 +0000801
Paul Mundt2e450642010-02-18 13:26:05 +0900802 writel_uncached(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900803
Paul Mundta0ab3662010-01-13 18:31:48 +0900804 /* Flush out the TLB */
Matt Flemingb5b6c7e2010-03-21 19:51:52 +0000805 local_flush_tlb_all();
Paul Mundt2e450642010-02-18 13:26:05 +0900806 ctrl_barrier();
Matt Fleming20b50142009-10-06 21:22:33 +0000807}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900808
Paul Mundt2efa53b2010-01-20 16:40:48 +0900809bool __in_29bit_mode(void)
810{
811 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
812}
813
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900814static int pmb_seq_show(struct seq_file *file, void *iter)
815{
816 int i;
817
818 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
819 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
820 seq_printf(file, "ety vpn ppn size flags\n");
821
822 for (i = 0; i < NR_PMB_ENTRIES; i++) {
823 unsigned long addr, data;
824 unsigned int size;
825 char *sz_str = NULL;
826
Paul Mundt9d56dd32010-01-26 12:58:40 +0900827 addr = __raw_readl(mk_pmb_addr(i));
828 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900829
830 size = data & PMB_SZ_MASK;
831 sz_str = (size == PMB_SZ_16M) ? " 16MB":
832 (size == PMB_SZ_64M) ? " 64MB":
833 (size == PMB_SZ_128M) ? "128MB":
834 "512MB";
835
836 /* 02: V 0x88 0x08 128MB C CB B */
837 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
838 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
839 (addr >> 24) & 0xff, (data >> 24) & 0xff,
840 sz_str, (data & PMB_C) ? 'C' : ' ',
841 (data & PMB_WT) ? "WT" : "CB",
842 (data & PMB_UB) ? "UB" : " B");
843 }
844
845 return 0;
846}
847
848static int pmb_debugfs_open(struct inode *inode, struct file *file)
849{
850 return single_open(file, pmb_seq_show, NULL);
851}
852
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800853static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900854 .owner = THIS_MODULE,
855 .open = pmb_debugfs_open,
856 .read = seq_read,
857 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800858 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900859};
860
861static int __init pmb_debugfs_init(void)
862{
863 struct dentry *dentry;
864
865 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900866 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800867 if (!dentry)
868 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900869 if (IS_ERR(dentry))
870 return PTR_ERR(dentry);
871
872 return 0;
873}
Pawel Moll62c8cbb2010-02-19 10:26:31 +0000874subsys_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000875
876#ifdef CONFIG_PM
877static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
878{
879 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000880 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000881
882 /* Restore the PMB after a resume from hibernation */
883 if (state.event == PM_EVENT_ON &&
884 prev_state.event == PM_EVENT_FREEZE) {
885 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900886
887 read_lock(&pmb_rwlock);
888
Matt Flemingedd7de82009-10-06 21:22:29 +0000889 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900890 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000891 pmbe = &pmb_entry_list[i];
892 set_pmb_entry(pmbe);
893 }
894 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900895
896 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000897 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900898
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000899 prev_state = state;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900900
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000901 return 0;
902}
903
904static int pmb_sysdev_resume(struct sys_device *dev)
905{
906 return pmb_sysdev_suspend(dev, PMSG_ON);
907}
908
909static struct sysdev_driver pmb_sysdev_driver = {
910 .suspend = pmb_sysdev_suspend,
911 .resume = pmb_sysdev_resume,
912};
913
914static int __init pmb_sysdev_init(void)
915{
916 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
917}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000918subsys_initcall(pmb_sysdev_init);
919#endif