blob: 75b8861ec62410391a0d113443e100a462072819 [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090025#include <linux/spinlock.h>
Paul Mundt90e7d642010-02-23 16:20:53 +090026#include <linux/vmalloc.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090027#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/system.h>
29#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090030#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090031#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090032#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090033#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090034
Paul Mundtd53a0d32010-02-17 21:17:02 +090035struct pmb_entry;
36
37struct pmb_entry {
38 unsigned long vpn;
39 unsigned long ppn;
40 unsigned long flags;
41 unsigned long size;
42
43 spinlock_t lock;
44
45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
48 */
49 int entry;
50
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
53};
54
Paul Mundt90e7d642010-02-23 16:20:53 +090055static struct {
56 unsigned long size;
57 int flag;
58} pmb_sizes[] = {
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
63};
64
Paul Mundtd01447b2010-02-18 18:13:51 +090065static void pmb_unmap_entry(struct pmb_entry *, int depth);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000066
Paul Mundtd53a0d32010-02-17 21:17:02 +090067static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000068static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090069static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090070
Paul Mundt4cfa8e72010-03-02 16:49:50 +090071static unsigned int pmb_iomapping_enabled;
72
Paul Mundt51becfd2010-02-17 15:33:30 +090073static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090074{
75 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76}
77
Paul Mundt51becfd2010-02-17 15:33:30 +090078static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090079{
80 return mk_pmb_entry(entry) | PMB_ADDR;
81}
82
Paul Mundt51becfd2010-02-17 15:33:30 +090083static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090084{
85 return mk_pmb_entry(entry) | PMB_DATA;
86}
87
Paul Mundt90e7d642010-02-23 16:20:53 +090088static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89{
90 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
91}
92
93/*
94 * Ensure that the PMB entries match our cache configuration.
95 *
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
98 * translations.
99 */
100static __always_inline unsigned long pmb_cache_flags(void)
101{
102 unsigned long flags = 0;
103
104#if defined(CONFIG_CACHE_OFF)
105 flags |= PMB_WT | PMB_UB;
106#elif defined(CONFIG_CACHE_WRITETHROUGH)
107 flags |= PMB_C | PMB_WT | PMB_UB;
108#elif defined(CONFIG_CACHE_WRITEBACK)
109 flags |= PMB_C;
110#endif
111
112 return flags;
113}
114
115/*
116 * Convert typical pgprot value to the PMB equivalent
117 */
118static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119{
120 unsigned long pmb_flags = 0;
121 u64 flags = pgprot_val(prot);
122
123 if (flags & _PAGE_CACHABLE)
124 pmb_flags |= PMB_C;
125 if (flags & _PAGE_WT)
126 pmb_flags |= PMB_WT | PMB_UB;
127
128 return pmb_flags;
129}
130
Paul Mundta1042aa2010-03-03 13:13:25 +0900131static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
Paul Mundt90e7d642010-02-23 16:20:53 +0900132{
133 return (b->vpn == (a->vpn + a->size)) &&
134 (b->ppn == (a->ppn + a->size)) &&
135 (b->flags == a->flags);
136}
137
Paul Mundta1042aa2010-03-03 13:13:25 +0900138static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
139 unsigned long size)
140{
141 int i;
142
143 read_lock(&pmb_rwlock);
144
145 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
146 struct pmb_entry *pmbe, *iter;
147 unsigned long span;
148
149 if (!test_bit(i, pmb_map))
150 continue;
151
152 pmbe = &pmb_entry_list[i];
153
154 /*
155 * See if VPN and PPN are bounded by an existing mapping.
156 */
157 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
158 continue;
159 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
160 continue;
161
162 /*
163 * Now see if we're in range of a simple mapping.
164 */
165 if (size <= pmbe->size) {
166 read_unlock(&pmb_rwlock);
167 return true;
168 }
169
170 span = pmbe->size;
171
172 /*
173 * Finally for sizes that involve compound mappings, walk
174 * the chain.
175 */
176 for (iter = pmbe->link; iter; iter = iter->link)
177 span += iter->size;
178
179 /*
180 * Nothing else to do if the range requirements are met.
181 */
182 if (size <= span) {
183 read_unlock(&pmb_rwlock);
184 return true;
185 }
186 }
187
188 read_unlock(&pmb_rwlock);
189 return false;
190}
191
Paul Mundt90e7d642010-02-23 16:20:53 +0900192static bool pmb_size_valid(unsigned long size)
193{
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
197 if (pmb_sizes[i].size == size)
198 return true;
199
200 return false;
201}
202
203static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
204{
205 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
206}
207
208static inline bool pmb_prot_valid(pgprot_t prot)
209{
210 return (pgprot_val(prot) & _PAGE_USER) == 0;
211}
212
213static int pmb_size_to_flags(unsigned long size)
214{
215 int i;
216
217 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
218 if (pmb_sizes[i].size == size)
219 return pmb_sizes[i].flag;
220
221 return 0;
222}
223
Matt Fleming067784f2009-10-06 21:22:23 +0000224static int pmb_alloc_entry(void)
225{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900226 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +0000227
Paul Mundt51becfd2010-02-17 15:33:30 +0900228 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900229 if (pos >= 0 && pos < NR_PMB_ENTRIES)
230 __set_bit(pos, pmb_map);
231 else
232 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +0000233
234 return pos;
235}
236
Matt Fleming8386aeb2009-10-06 21:22:28 +0000237static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +0000238 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900239{
240 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900241 unsigned long irqflags;
242 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +0000243 int pos;
244
Paul Mundtd53a0d32010-02-17 21:17:02 +0900245 write_lock_irqsave(&pmb_rwlock, irqflags);
246
Matt Fleming20b50142009-10-06 21:22:33 +0000247 if (entry == PMB_NO_ENTRY) {
248 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900249 if (unlikely(pos < 0)) {
250 ret = ERR_PTR(pos);
251 goto out;
252 }
Matt Fleming20b50142009-10-06 21:22:33 +0000253 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900254 if (__test_and_set_bit(entry, pmb_map)) {
255 ret = ERR_PTR(-ENOSPC);
256 goto out;
257 }
258
Matt Fleming20b50142009-10-06 21:22:33 +0000259 pos = entry;
260 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900261
Paul Mundtd53a0d32010-02-17 21:17:02 +0900262 write_unlock_irqrestore(&pmb_rwlock, irqflags);
263
Matt Flemingedd7de82009-10-06 21:22:29 +0000264 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900265
Paul Mundtd01447b2010-02-18 18:13:51 +0900266 memset(pmbe, 0, sizeof(struct pmb_entry));
267
Paul Mundtd53a0d32010-02-17 21:17:02 +0900268 spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900269
270 pmbe->vpn = vpn;
271 pmbe->ppn = ppn;
272 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000273 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900274
275 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900276
277out:
278 write_unlock_irqrestore(&pmb_rwlock, irqflags);
279 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900280}
281
Matt Fleming8386aeb2009-10-06 21:22:28 +0000282static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900283{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900284 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd01447b2010-02-18 18:13:51 +0900285
286 pmbe->entry = PMB_NO_ENTRY;
287 pmbe->link = NULL;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900288}
289
290/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900291 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900292 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900293static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900294{
Paul Mundt90e7d642010-02-23 16:20:53 +0900295 /* Set V-bit */
296 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
297 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900298}
299
Paul Mundtd53a0d32010-02-17 21:17:02 +0900300static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900301{
Paul Mundt2e450642010-02-18 13:26:05 +0900302 unsigned long addr, data;
303 unsigned long addr_val, data_val;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900304
Paul Mundt2e450642010-02-18 13:26:05 +0900305 addr = mk_pmb_addr(pmbe->entry);
306 data = mk_pmb_data(pmbe->entry);
307
308 addr_val = __raw_readl(addr);
309 data_val = __raw_readl(data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900310
311 /* Clear V-bit */
Paul Mundt2e450642010-02-18 13:26:05 +0900312 writel_uncached(addr_val & ~PMB_V, addr);
313 writel_uncached(data_val & ~PMB_V, data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900314}
315
Paul Mundtd53a0d32010-02-17 21:17:02 +0900316static void set_pmb_entry(struct pmb_entry *pmbe)
317{
318 unsigned long flags;
319
320 spin_lock_irqsave(&pmbe->lock, flags);
321 __set_pmb_entry(pmbe);
322 spin_unlock_irqrestore(&pmbe->lock, flags);
323}
324
Paul Mundt90e7d642010-02-23 16:20:53 +0900325int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
326 unsigned long size, pgprot_t prot)
327{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000328 struct pmb_entry *pmbp, *pmbe;
Paul Mundta1042aa2010-03-03 13:13:25 +0900329 unsigned long flags, pmb_flags;
Paul Mundt90e7d642010-02-23 16:20:53 +0900330 int i, mapped;
Paul Mundt7bdda622010-02-17 13:23:00 +0900331
Paul Mundt6eb3c732010-03-02 17:22:29 +0900332 if (!pmb_addr_valid(vaddr, size))
333 return -EFAULT;
Paul Mundta1042aa2010-03-03 13:13:25 +0900334 if (pmb_mapping_exists(vaddr, phys, size))
335 return 0;
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900336
Paul Mundt90e7d642010-02-23 16:20:53 +0900337 pmb_flags = pgprot_to_pmb_flags(prot);
Paul Mundt6eb3c732010-03-02 17:22:29 +0900338 pmbp = NULL;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900339
Paul Mundta1042aa2010-03-03 13:13:25 +0900340 do {
341 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
342 if (size < pmb_sizes[i].size)
343 continue;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900344
Paul Mundta1042aa2010-03-03 13:13:25 +0900345 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
346 pmb_sizes[i].flag, PMB_NO_ENTRY);
347 if (IS_ERR(pmbe)) {
348 pmb_unmap_entry(pmbp, mapped);
349 return PTR_ERR(pmbe);
350 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900351
Paul Mundta1042aa2010-03-03 13:13:25 +0900352 spin_lock_irqsave(&pmbe->lock, flags);
353
354 pmbe->size = pmb_sizes[i].size;
355
356 __set_pmb_entry(pmbe);
357
358 phys += pmbe->size;
359 vaddr += pmbe->size;
360 size -= pmbe->size;
361
362 /*
363 * Link adjacent entries that span multiple PMB
364 * entries for easier tear-down.
365 */
366 if (likely(pmbp)) {
367 spin_lock(&pmbp->lock);
368 pmbp->link = pmbe;
369 spin_unlock(&pmbp->lock);
370 }
371
372 pmbp = pmbe;
373
374 /*
375 * Instead of trying smaller sizes on every
376 * iteration (even if we succeed in allocating
377 * space), try using pmb_sizes[i].size again.
378 */
379 i--;
380 mapped++;
381
382 spin_unlock_irqrestore(&pmbe->lock, flags);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000383 }
Paul Mundta1042aa2010-03-03 13:13:25 +0900384 } while (size >= SZ_16M);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900385
Paul Mundt6eb3c732010-03-02 17:22:29 +0900386 return 0;
387}
388
389void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
390 pgprot_t prot, void *caller)
391{
392 unsigned long orig_addr, vaddr;
393 phys_addr_t offset, last_addr;
394 phys_addr_t align_mask;
395 unsigned long aligned;
396 struct vm_struct *area;
397 int i, ret;
398
399 if (!pmb_iomapping_enabled)
400 return NULL;
401
402 /*
403 * Small mappings need to go through the TLB.
404 */
405 if (size < SZ_16M)
406 return ERR_PTR(-EINVAL);
407 if (!pmb_prot_valid(prot))
408 return ERR_PTR(-EINVAL);
409
410 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
411 if (size >= pmb_sizes[i].size)
412 break;
413
414 last_addr = phys + size;
415 align_mask = ~(pmb_sizes[i].size - 1);
416 offset = phys & ~align_mask;
417 phys &= align_mask;
418 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
419
420 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
421 P3SEG, caller);
422 if (!area)
423 return NULL;
424
425 area->phys_addr = phys;
426 orig_addr = vaddr = (unsigned long)area->addr;
427
428 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
Paul Mundta1042aa2010-03-03 13:13:25 +0900429 if (unlikely(ret != 0))
Paul Mundt6eb3c732010-03-02 17:22:29 +0900430 return ERR_PTR(ret);
431
Paul Mundt90e7d642010-02-23 16:20:53 +0900432 return (void __iomem *)(offset + (char *)orig_addr);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900433}
434
Paul Mundt90e7d642010-02-23 16:20:53 +0900435int pmb_unmap(void __iomem *addr)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900436{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900437 struct pmb_entry *pmbe = NULL;
Paul Mundt90e7d642010-02-23 16:20:53 +0900438 unsigned long vaddr = (unsigned long __force)addr;
439 int i, found = 0;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900440
Paul Mundtd53a0d32010-02-17 21:17:02 +0900441 read_lock(&pmb_rwlock);
442
Matt Flemingedd7de82009-10-06 21:22:29 +0000443 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900444 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000445 pmbe = &pmb_entry_list[i];
Paul Mundt90e7d642010-02-23 16:20:53 +0900446 if (pmbe->vpn == vaddr) {
447 found = 1;
Matt Flemingedd7de82009-10-06 21:22:29 +0000448 break;
Paul Mundt90e7d642010-02-23 16:20:53 +0900449 }
Matt Flemingedd7de82009-10-06 21:22:29 +0000450 }
451 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900452
453 read_unlock(&pmb_rwlock);
454
Paul Mundt90e7d642010-02-23 16:20:53 +0900455 if (found) {
456 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
457 return 0;
458 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900459
Paul Mundt90e7d642010-02-23 16:20:53 +0900460 return -EINVAL;
Paul Mundtd01447b2010-02-18 18:13:51 +0900461}
462
463static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
464{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900465 do {
466 struct pmb_entry *pmblink = pmbe;
467
Matt Fleming067784f2009-10-06 21:22:23 +0000468 /*
469 * We may be called before this pmb_entry has been
470 * entered into the PMB table via set_pmb_entry(), but
471 * that's OK because we've allocated a unique slot for
472 * this entry in pmb_alloc() (even if we haven't filled
473 * it yet).
474 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900475 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000476 * other mapping can be using that slot.
477 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900478 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000479
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900480 pmbe = pmblink->link;
481
482 pmb_free(pmblink);
Paul Mundtd01447b2010-02-18 18:13:51 +0900483 } while (pmbe && --depth);
484}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900485
Paul Mundtd01447b2010-02-18 18:13:51 +0900486static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
487{
488 unsigned long flags;
489
490 if (unlikely(!pmbe))
491 return;
492
493 write_lock_irqsave(&pmb_rwlock, flags);
494 __pmb_unmap_entry(pmbe, depth);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900495 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900496}
497
Paul Mundtd01447b2010-02-18 18:13:51 +0900498static void __init pmb_notify(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000499{
Paul Mundtd01447b2010-02-18 18:13:51 +0900500 int i;
Matt Fleming3d467672010-01-18 19:33:10 +0900501
Paul Mundtefd54ea2010-02-16 18:39:30 +0900502 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900503
Paul Mundtd01447b2010-02-18 18:13:51 +0900504 read_lock(&pmb_rwlock);
505
506 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
507 struct pmb_entry *pmbe;
508
509 if (!test_bit(i, pmb_map))
510 continue;
511
512 pmbe = &pmb_entry_list[i];
513
514 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
515 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
516 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
517 }
518
519 read_unlock(&pmb_rwlock);
520}
521
522/*
523 * Sync our software copy of the PMB mappings with those in hardware. The
524 * mappings in the hardware PMB were either set up by the bootloader or
525 * very early on by the kernel.
526 */
527static void __init pmb_synchronize(void)
528{
529 struct pmb_entry *pmbp = NULL;
530 int i, j;
531
Matt Fleming3d467672010-01-18 19:33:10 +0900532 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900533 * Run through the initial boot mappings, log the established
534 * ones, and blow away anything that falls outside of the valid
535 * PPN range. Specifically, we only care about existing mappings
536 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900537 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900538 * Note that touching these can be a bit of a minefield; the boot
539 * loader can establish multi-page mappings with the same caching
540 * attributes, so we need to ensure that we aren't modifying a
541 * mapping that we're presently executing from, or may execute
542 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900543 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900544 * In the future we will have to tidy up after the boot loader by
545 * jumping between the cached and uncached mappings and tearing
546 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900547 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900548 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900549 unsigned long addr, data;
550 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900551 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900552 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900553 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900554 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900555
556 addr = mk_pmb_addr(i);
557 data = mk_pmb_data(i);
558
559 addr_val = __raw_readl(addr);
560 data_val = __raw_readl(data);
561
562 /*
563 * Skip over any bogus entries
564 */
565 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
566 continue;
567
568 ppn = data_val & PMB_PFN_MASK;
569 vpn = addr_val & PMB_PFN_MASK;
570
571 /*
572 * Only preserve in-range mappings.
573 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900574 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900575 /*
576 * Invalidate anything out of bounds.
577 */
Paul Mundt2e450642010-02-18 13:26:05 +0900578 writel_uncached(addr_val & ~PMB_V, addr);
579 writel_uncached(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900580 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900581 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900582
583 /*
584 * Update the caching attributes if necessary
585 */
586 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900587 data_val &= ~PMB_CACHE_MASK;
588 data_val |= pmb_cache_flags();
Paul Mundt2e450642010-02-18 13:26:05 +0900589
590 writel_uncached(data_val, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900591 }
592
Paul Mundtd7813bc2010-02-17 17:56:38 +0900593 size = data_val & PMB_SZ_MASK;
594 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900595
596 pmbe = pmb_alloc(vpn, ppn, flags, i);
597 if (IS_ERR(pmbe)) {
598 WARN_ON_ONCE(1);
599 continue;
600 }
601
Paul Mundtd53a0d32010-02-17 21:17:02 +0900602 spin_lock_irqsave(&pmbe->lock, irqflags);
603
Paul Mundtd7813bc2010-02-17 17:56:38 +0900604 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
605 if (pmb_sizes[j].flag == size)
606 pmbe->size = pmb_sizes[j].size;
607
Paul Mundtd53a0d32010-02-17 21:17:02 +0900608 if (pmbp) {
609 spin_lock(&pmbp->lock);
610
611 /*
612 * Compare the previous entry against the current one to
613 * see if the entries span a contiguous mapping. If so,
Paul Mundtd01447b2010-02-18 18:13:51 +0900614 * setup the entry links accordingly. Compound mappings
615 * are later coalesced.
Paul Mundtd53a0d32010-02-17 21:17:02 +0900616 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900617 if (pmb_can_merge(pmbp, pmbe))
Paul Mundtd53a0d32010-02-17 21:17:02 +0900618 pmbp->link = pmbe;
619
620 spin_unlock(&pmbp->lock);
621 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900622
623 pmbp = pmbe;
624
Paul Mundtd53a0d32010-02-17 21:17:02 +0900625 spin_unlock_irqrestore(&pmbe->lock, irqflags);
Matt Fleming3d467672010-01-18 19:33:10 +0900626 }
Matt Fleming3d467672010-01-18 19:33:10 +0900627}
Matt Fleming3d467672010-01-18 19:33:10 +0900628
Paul Mundtd01447b2010-02-18 18:13:51 +0900629static void __init pmb_merge(struct pmb_entry *head)
Matt Fleming3d467672010-01-18 19:33:10 +0900630{
Paul Mundtd01447b2010-02-18 18:13:51 +0900631 unsigned long span, newsize;
632 struct pmb_entry *tail;
633 int i = 1, depth = 0;
634
635 span = newsize = head->size;
636
637 tail = head->link;
638 while (tail) {
639 span += tail->size;
640
641 if (pmb_size_valid(span)) {
642 newsize = span;
643 depth = i;
644 }
645
646 /* This is the end of the line.. */
647 if (!tail->link)
648 break;
649
650 tail = tail->link;
651 i++;
652 }
Matt Fleming20b50142009-10-06 21:22:33 +0000653
Matt Fleming3d467672010-01-18 19:33:10 +0900654 /*
Paul Mundtd01447b2010-02-18 18:13:51 +0900655 * The merged page size must be valid.
Matt Fleming3d467672010-01-18 19:33:10 +0900656 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900657 if (!pmb_size_valid(newsize))
658 return;
659
660 head->flags &= ~PMB_SZ_MASK;
661 head->flags |= pmb_size_to_flags(newsize);
662
663 head->size = newsize;
664
665 __pmb_unmap_entry(head->link, depth);
666 __set_pmb_entry(head);
667}
668
669static void __init pmb_coalesce(void)
670{
671 unsigned long flags;
672 int i;
673
674 write_lock_irqsave(&pmb_rwlock, flags);
675
676 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
677 struct pmb_entry *pmbe;
678
679 if (!test_bit(i, pmb_map))
680 continue;
681
682 pmbe = &pmb_entry_list[i];
683
684 /*
685 * We're only interested in compound mappings
686 */
687 if (!pmbe->link)
688 continue;
689
690 /*
691 * Nothing to do if it already uses the largest possible
692 * page size.
693 */
694 if (pmbe->size == SZ_512M)
695 continue;
696
697 pmb_merge(pmbe);
698 }
699
700 write_unlock_irqrestore(&pmb_rwlock, flags);
701}
702
703#ifdef CONFIG_UNCACHED_MAPPING
704static void __init pmb_resize(void)
705{
706 int i;
707
708 /*
709 * If the uncached mapping was constructed by the kernel, it will
710 * already be a reasonable size.
711 */
712 if (uncached_size == SZ_16M)
713 return;
714
715 read_lock(&pmb_rwlock);
716
717 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
718 struct pmb_entry *pmbe;
719 unsigned long flags;
720
721 if (!test_bit(i, pmb_map))
722 continue;
723
724 pmbe = &pmb_entry_list[i];
725
726 if (pmbe->vpn != uncached_start)
727 continue;
728
729 /*
730 * Found it, now resize it.
731 */
732 spin_lock_irqsave(&pmbe->lock, flags);
733
734 pmbe->size = SZ_16M;
735 pmbe->flags &= ~PMB_SZ_MASK;
736 pmbe->flags |= pmb_size_to_flags(pmbe->size);
737
738 uncached_resize(pmbe->size);
739
740 __set_pmb_entry(pmbe);
741
742 spin_unlock_irqrestore(&pmbe->lock, flags);
743 }
744
745 read_lock(&pmb_rwlock);
746}
747#endif
748
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900749static int __init early_pmb(char *p)
750{
751 if (!p)
752 return 0;
753
754 if (strstr(p, "iomap"))
755 pmb_iomapping_enabled = 1;
756
757 return 0;
758}
759early_param("pmb", early_pmb);
760
Paul Mundtd01447b2010-02-18 18:13:51 +0900761void __init pmb_init(void)
762{
763 /* Synchronize software state */
764 pmb_synchronize();
765
766 /* Attempt to combine compound mappings */
767 pmb_coalesce();
768
769#ifdef CONFIG_UNCACHED_MAPPING
770 /* Resize initial mappings, if necessary */
771 pmb_resize();
772#endif
773
774 /* Log them */
775 pmb_notify();
Matt Fleming20b50142009-10-06 21:22:33 +0000776
Paul Mundt2e450642010-02-18 13:26:05 +0900777 writel_uncached(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900778
Paul Mundta0ab3662010-01-13 18:31:48 +0900779 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900780 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundt2e450642010-02-18 13:26:05 +0900781 ctrl_barrier();
Matt Fleming20b50142009-10-06 21:22:33 +0000782}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900783
Paul Mundt2efa53b2010-01-20 16:40:48 +0900784bool __in_29bit_mode(void)
785{
786 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
787}
788
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900789static int pmb_seq_show(struct seq_file *file, void *iter)
790{
791 int i;
792
793 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
794 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
795 seq_printf(file, "ety vpn ppn size flags\n");
796
797 for (i = 0; i < NR_PMB_ENTRIES; i++) {
798 unsigned long addr, data;
799 unsigned int size;
800 char *sz_str = NULL;
801
Paul Mundt9d56dd32010-01-26 12:58:40 +0900802 addr = __raw_readl(mk_pmb_addr(i));
803 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900804
805 size = data & PMB_SZ_MASK;
806 sz_str = (size == PMB_SZ_16M) ? " 16MB":
807 (size == PMB_SZ_64M) ? " 64MB":
808 (size == PMB_SZ_128M) ? "128MB":
809 "512MB";
810
811 /* 02: V 0x88 0x08 128MB C CB B */
812 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
813 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
814 (addr >> 24) & 0xff, (data >> 24) & 0xff,
815 sz_str, (data & PMB_C) ? 'C' : ' ',
816 (data & PMB_WT) ? "WT" : "CB",
817 (data & PMB_UB) ? "UB" : " B");
818 }
819
820 return 0;
821}
822
823static int pmb_debugfs_open(struct inode *inode, struct file *file)
824{
825 return single_open(file, pmb_seq_show, NULL);
826}
827
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800828static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900829 .owner = THIS_MODULE,
830 .open = pmb_debugfs_open,
831 .read = seq_read,
832 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800833 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900834};
835
836static int __init pmb_debugfs_init(void)
837{
838 struct dentry *dentry;
839
840 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900841 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800842 if (!dentry)
843 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900844 if (IS_ERR(dentry))
845 return PTR_ERR(dentry);
846
847 return 0;
848}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900849postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000850
851#ifdef CONFIG_PM
852static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
853{
854 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000855 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000856
857 /* Restore the PMB after a resume from hibernation */
858 if (state.event == PM_EVENT_ON &&
859 prev_state.event == PM_EVENT_FREEZE) {
860 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900861
862 read_lock(&pmb_rwlock);
863
Matt Flemingedd7de82009-10-06 21:22:29 +0000864 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900865 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000866 pmbe = &pmb_entry_list[i];
867 set_pmb_entry(pmbe);
868 }
869 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900870
871 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000872 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900873
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000874 prev_state = state;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900875
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000876 return 0;
877}
878
879static int pmb_sysdev_resume(struct sys_device *dev)
880{
881 return pmb_sysdev_suspend(dev, PMSG_ON);
882}
883
884static struct sysdev_driver pmb_sysdev_driver = {
885 .suspend = pmb_sysdev_suspend,
886 .resume = pmb_sysdev_resume,
887};
888
889static int __init pmb_sysdev_init(void)
890{
891 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
892}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000893subsys_initcall(pmb_sysdev_init);
894#endif