blob: 9429355c18ca5d7517e820751f0e3afe2eeebba8 [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090025#include <linux/spinlock.h>
Paul Mundt90e7d642010-02-23 16:20:53 +090026#include <linux/vmalloc.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090027#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/system.h>
29#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090030#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090031#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090032#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090033#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090034
Paul Mundtd53a0d32010-02-17 21:17:02 +090035struct pmb_entry;
36
37struct pmb_entry {
38 unsigned long vpn;
39 unsigned long ppn;
40 unsigned long flags;
41 unsigned long size;
42
43 spinlock_t lock;
44
45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
48 */
49 int entry;
50
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
53};
54
Paul Mundt90e7d642010-02-23 16:20:53 +090055static struct {
56 unsigned long size;
57 int flag;
58} pmb_sizes[] = {
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
63};
64
Paul Mundtd01447b2010-02-18 18:13:51 +090065static void pmb_unmap_entry(struct pmb_entry *, int depth);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000066
Paul Mundtd53a0d32010-02-17 21:17:02 +090067static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000068static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090069static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090070
Paul Mundt4cfa8e72010-03-02 16:49:50 +090071static unsigned int pmb_iomapping_enabled;
72
Paul Mundt51becfd2010-02-17 15:33:30 +090073static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090074{
75 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76}
77
Paul Mundt51becfd2010-02-17 15:33:30 +090078static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090079{
80 return mk_pmb_entry(entry) | PMB_ADDR;
81}
82
Paul Mundt51becfd2010-02-17 15:33:30 +090083static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090084{
85 return mk_pmb_entry(entry) | PMB_DATA;
86}
87
Paul Mundt90e7d642010-02-23 16:20:53 +090088static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89{
90 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
91}
92
93/*
94 * Ensure that the PMB entries match our cache configuration.
95 *
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
98 * translations.
99 */
100static __always_inline unsigned long pmb_cache_flags(void)
101{
102 unsigned long flags = 0;
103
104#if defined(CONFIG_CACHE_OFF)
105 flags |= PMB_WT | PMB_UB;
106#elif defined(CONFIG_CACHE_WRITETHROUGH)
107 flags |= PMB_C | PMB_WT | PMB_UB;
108#elif defined(CONFIG_CACHE_WRITEBACK)
109 flags |= PMB_C;
110#endif
111
112 return flags;
113}
114
115/*
116 * Convert typical pgprot value to the PMB equivalent
117 */
118static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119{
120 unsigned long pmb_flags = 0;
121 u64 flags = pgprot_val(prot);
122
123 if (flags & _PAGE_CACHABLE)
124 pmb_flags |= PMB_C;
125 if (flags & _PAGE_WT)
126 pmb_flags |= PMB_WT | PMB_UB;
127
128 return pmb_flags;
129}
130
131static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132{
133 return (b->vpn == (a->vpn + a->size)) &&
134 (b->ppn == (a->ppn + a->size)) &&
135 (b->flags == a->flags);
136}
137
138static bool pmb_size_valid(unsigned long size)
139{
140 int i;
141
142 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
143 if (pmb_sizes[i].size == size)
144 return true;
145
146 return false;
147}
148
149static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
150{
151 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
152}
153
154static inline bool pmb_prot_valid(pgprot_t prot)
155{
156 return (pgprot_val(prot) & _PAGE_USER) == 0;
157}
158
159static int pmb_size_to_flags(unsigned long size)
160{
161 int i;
162
163 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
164 if (pmb_sizes[i].size == size)
165 return pmb_sizes[i].flag;
166
167 return 0;
168}
169
Matt Fleming067784f2009-10-06 21:22:23 +0000170static int pmb_alloc_entry(void)
171{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900172 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +0000173
Paul Mundt51becfd2010-02-17 15:33:30 +0900174 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900175 if (pos >= 0 && pos < NR_PMB_ENTRIES)
176 __set_bit(pos, pmb_map);
177 else
178 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +0000179
180 return pos;
181}
182
Matt Fleming8386aeb2009-10-06 21:22:28 +0000183static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +0000184 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900185{
186 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900187 unsigned long irqflags;
188 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +0000189 int pos;
190
Paul Mundtd53a0d32010-02-17 21:17:02 +0900191 write_lock_irqsave(&pmb_rwlock, irqflags);
192
Matt Fleming20b50142009-10-06 21:22:33 +0000193 if (entry == PMB_NO_ENTRY) {
194 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900195 if (unlikely(pos < 0)) {
196 ret = ERR_PTR(pos);
197 goto out;
198 }
Matt Fleming20b50142009-10-06 21:22:33 +0000199 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900200 if (__test_and_set_bit(entry, pmb_map)) {
201 ret = ERR_PTR(-ENOSPC);
202 goto out;
203 }
204
Matt Fleming20b50142009-10-06 21:22:33 +0000205 pos = entry;
206 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900207
Paul Mundtd53a0d32010-02-17 21:17:02 +0900208 write_unlock_irqrestore(&pmb_rwlock, irqflags);
209
Matt Flemingedd7de82009-10-06 21:22:29 +0000210 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900211
Paul Mundtd01447b2010-02-18 18:13:51 +0900212 memset(pmbe, 0, sizeof(struct pmb_entry));
213
Paul Mundtd53a0d32010-02-17 21:17:02 +0900214 spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900215
216 pmbe->vpn = vpn;
217 pmbe->ppn = ppn;
218 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000219 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900220
221 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900222
223out:
224 write_unlock_irqrestore(&pmb_rwlock, irqflags);
225 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900226}
227
Matt Fleming8386aeb2009-10-06 21:22:28 +0000228static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900229{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900230 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd01447b2010-02-18 18:13:51 +0900231
232 pmbe->entry = PMB_NO_ENTRY;
233 pmbe->link = NULL;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900234}
235
236/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900237 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900238 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900239static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900240{
Paul Mundt90e7d642010-02-23 16:20:53 +0900241 /* Set V-bit */
242 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
243 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900244}
245
Paul Mundtd53a0d32010-02-17 21:17:02 +0900246static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900247{
Paul Mundt2e450642010-02-18 13:26:05 +0900248 unsigned long addr, data;
249 unsigned long addr_val, data_val;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900250
Paul Mundt2e450642010-02-18 13:26:05 +0900251 addr = mk_pmb_addr(pmbe->entry);
252 data = mk_pmb_data(pmbe->entry);
253
254 addr_val = __raw_readl(addr);
255 data_val = __raw_readl(data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900256
257 /* Clear V-bit */
Paul Mundt2e450642010-02-18 13:26:05 +0900258 writel_uncached(addr_val & ~PMB_V, addr);
259 writel_uncached(data_val & ~PMB_V, data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900260}
261
Paul Mundtd53a0d32010-02-17 21:17:02 +0900262static void set_pmb_entry(struct pmb_entry *pmbe)
263{
264 unsigned long flags;
265
266 spin_lock_irqsave(&pmbe->lock, flags);
267 __set_pmb_entry(pmbe);
268 spin_unlock_irqrestore(&pmbe->lock, flags);
269}
270
Paul Mundt90e7d642010-02-23 16:20:53 +0900271int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
272 unsigned long size, pgprot_t prot)
273{
274 return 0;
275}
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900276
Paul Mundt90e7d642010-02-23 16:20:53 +0900277void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
278 pgprot_t prot, void *caller)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900279{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000280 struct pmb_entry *pmbp, *pmbe;
Paul Mundt90e7d642010-02-23 16:20:53 +0900281 unsigned long pmb_flags;
282 int i, mapped;
283 unsigned long orig_addr, vaddr;
284 phys_addr_t offset, last_addr;
285 phys_addr_t align_mask;
286 unsigned long aligned;
287 struct vm_struct *area;
Paul Mundt7bdda622010-02-17 13:23:00 +0900288
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900289 if (!pmb_iomapping_enabled)
290 return NULL;
291
Paul Mundt90e7d642010-02-23 16:20:53 +0900292 /*
293 * Small mappings need to go through the TLB.
294 */
295 if (size < SZ_16M)
296 return ERR_PTR(-EINVAL);
297 if (!pmb_prot_valid(prot))
298 return ERR_PTR(-EINVAL);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900299
300 pmbp = NULL;
Paul Mundt90e7d642010-02-23 16:20:53 +0900301 pmb_flags = pgprot_to_pmb_flags(prot);
302 mapped = 0;
303
304 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
305 if (size >= pmb_sizes[i].size)
306 break;
307
308 last_addr = phys + size;
309 align_mask = ~(pmb_sizes[i].size - 1);
310 offset = phys & ~align_mask;
311 phys &= align_mask;
312 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
313
314 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
315 P3SEG, caller);
316 if (!area)
317 return NULL;
318
319 area->phys_addr = phys;
320 orig_addr = vaddr = (unsigned long)area->addr;
321
322 if (!pmb_addr_valid(vaddr, aligned))
323 return ERR_PTR(-EFAULT);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900324
325again:
326 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900327 unsigned long flags;
328
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900329 if (size < pmb_sizes[i].size)
330 continue;
331
Matt Fleming20b50142009-10-06 21:22:33 +0000332 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
333 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000334 if (IS_ERR(pmbe)) {
Paul Mundt90e7d642010-02-23 16:20:53 +0900335 pmb_unmap_entry(pmbp, mapped);
336 return pmbe;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000337 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900338
Paul Mundtd53a0d32010-02-17 21:17:02 +0900339 spin_lock_irqsave(&pmbe->lock, flags);
340
Paul Mundt90e7d642010-02-23 16:20:53 +0900341 pmbe->size = pmb_sizes[i].size;
342
Paul Mundtd53a0d32010-02-17 21:17:02 +0900343 __set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900344
Paul Mundt90e7d642010-02-23 16:20:53 +0900345 phys += pmbe->size;
346 vaddr += pmbe->size;
347 size -= pmbe->size;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900348
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900349 /*
350 * Link adjacent entries that span multiple PMB entries
351 * for easier tear-down.
352 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900353 if (likely(pmbp)) {
354 spin_lock(&pmbp->lock);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900355 pmbp->link = pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900356 spin_unlock(&pmbp->lock);
357 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900358
359 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000360
361 /*
362 * Instead of trying smaller sizes on every iteration
363 * (even if we succeed in allocating space), try using
364 * pmb_sizes[i].size again.
365 */
366 i--;
Paul Mundt90e7d642010-02-23 16:20:53 +0900367 mapped++;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900368
369 spin_unlock_irqrestore(&pmbe->lock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900370 }
371
Paul Mundtd53a0d32010-02-17 21:17:02 +0900372 if (size >= SZ_16M)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900373 goto again;
374
Paul Mundt90e7d642010-02-23 16:20:53 +0900375 return (void __iomem *)(offset + (char *)orig_addr);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900376}
377
Paul Mundt90e7d642010-02-23 16:20:53 +0900378int pmb_unmap(void __iomem *addr)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900379{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900380 struct pmb_entry *pmbe = NULL;
Paul Mundt90e7d642010-02-23 16:20:53 +0900381 unsigned long vaddr = (unsigned long __force)addr;
382 int i, found = 0;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900383
Paul Mundtd53a0d32010-02-17 21:17:02 +0900384 read_lock(&pmb_rwlock);
385
Matt Flemingedd7de82009-10-06 21:22:29 +0000386 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900387 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000388 pmbe = &pmb_entry_list[i];
Paul Mundt90e7d642010-02-23 16:20:53 +0900389 if (pmbe->vpn == vaddr) {
390 found = 1;
Matt Flemingedd7de82009-10-06 21:22:29 +0000391 break;
Paul Mundt90e7d642010-02-23 16:20:53 +0900392 }
Matt Flemingedd7de82009-10-06 21:22:29 +0000393 }
394 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900395
396 read_unlock(&pmb_rwlock);
397
Paul Mundt90e7d642010-02-23 16:20:53 +0900398 if (found) {
399 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
400 return 0;
401 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900402
Paul Mundt90e7d642010-02-23 16:20:53 +0900403 return -EINVAL;
Paul Mundtd01447b2010-02-18 18:13:51 +0900404}
405
406static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
407{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900408 do {
409 struct pmb_entry *pmblink = pmbe;
410
Matt Fleming067784f2009-10-06 21:22:23 +0000411 /*
412 * We may be called before this pmb_entry has been
413 * entered into the PMB table via set_pmb_entry(), but
414 * that's OK because we've allocated a unique slot for
415 * this entry in pmb_alloc() (even if we haven't filled
416 * it yet).
417 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900418 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000419 * other mapping can be using that slot.
420 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900421 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000422
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900423 pmbe = pmblink->link;
424
425 pmb_free(pmblink);
Paul Mundtd01447b2010-02-18 18:13:51 +0900426 } while (pmbe && --depth);
427}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900428
Paul Mundtd01447b2010-02-18 18:13:51 +0900429static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
430{
431 unsigned long flags;
432
433 if (unlikely(!pmbe))
434 return;
435
436 write_lock_irqsave(&pmb_rwlock, flags);
437 __pmb_unmap_entry(pmbe, depth);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900438 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900439}
440
Paul Mundtd01447b2010-02-18 18:13:51 +0900441static void __init pmb_notify(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000442{
Paul Mundtd01447b2010-02-18 18:13:51 +0900443 int i;
Matt Fleming3d467672010-01-18 19:33:10 +0900444
Paul Mundtefd54ea2010-02-16 18:39:30 +0900445 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900446
Paul Mundtd01447b2010-02-18 18:13:51 +0900447 read_lock(&pmb_rwlock);
448
449 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
450 struct pmb_entry *pmbe;
451
452 if (!test_bit(i, pmb_map))
453 continue;
454
455 pmbe = &pmb_entry_list[i];
456
457 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
458 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
459 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
460 }
461
462 read_unlock(&pmb_rwlock);
463}
464
465/*
466 * Sync our software copy of the PMB mappings with those in hardware. The
467 * mappings in the hardware PMB were either set up by the bootloader or
468 * very early on by the kernel.
469 */
470static void __init pmb_synchronize(void)
471{
472 struct pmb_entry *pmbp = NULL;
473 int i, j;
474
Matt Fleming3d467672010-01-18 19:33:10 +0900475 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900476 * Run through the initial boot mappings, log the established
477 * ones, and blow away anything that falls outside of the valid
478 * PPN range. Specifically, we only care about existing mappings
479 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900480 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900481 * Note that touching these can be a bit of a minefield; the boot
482 * loader can establish multi-page mappings with the same caching
483 * attributes, so we need to ensure that we aren't modifying a
484 * mapping that we're presently executing from, or may execute
485 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900486 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900487 * In the future we will have to tidy up after the boot loader by
488 * jumping between the cached and uncached mappings and tearing
489 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900490 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900491 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900492 unsigned long addr, data;
493 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900494 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900495 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900496 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900497 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900498
499 addr = mk_pmb_addr(i);
500 data = mk_pmb_data(i);
501
502 addr_val = __raw_readl(addr);
503 data_val = __raw_readl(data);
504
505 /*
506 * Skip over any bogus entries
507 */
508 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
509 continue;
510
511 ppn = data_val & PMB_PFN_MASK;
512 vpn = addr_val & PMB_PFN_MASK;
513
514 /*
515 * Only preserve in-range mappings.
516 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900517 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900518 /*
519 * Invalidate anything out of bounds.
520 */
Paul Mundt2e450642010-02-18 13:26:05 +0900521 writel_uncached(addr_val & ~PMB_V, addr);
522 writel_uncached(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900523 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900524 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900525
526 /*
527 * Update the caching attributes if necessary
528 */
529 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900530 data_val &= ~PMB_CACHE_MASK;
531 data_val |= pmb_cache_flags();
Paul Mundt2e450642010-02-18 13:26:05 +0900532
533 writel_uncached(data_val, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900534 }
535
Paul Mundtd7813bc2010-02-17 17:56:38 +0900536 size = data_val & PMB_SZ_MASK;
537 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900538
539 pmbe = pmb_alloc(vpn, ppn, flags, i);
540 if (IS_ERR(pmbe)) {
541 WARN_ON_ONCE(1);
542 continue;
543 }
544
Paul Mundtd53a0d32010-02-17 21:17:02 +0900545 spin_lock_irqsave(&pmbe->lock, irqflags);
546
Paul Mundtd7813bc2010-02-17 17:56:38 +0900547 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
548 if (pmb_sizes[j].flag == size)
549 pmbe->size = pmb_sizes[j].size;
550
Paul Mundtd53a0d32010-02-17 21:17:02 +0900551 if (pmbp) {
552 spin_lock(&pmbp->lock);
553
554 /*
555 * Compare the previous entry against the current one to
556 * see if the entries span a contiguous mapping. If so,
Paul Mundtd01447b2010-02-18 18:13:51 +0900557 * setup the entry links accordingly. Compound mappings
558 * are later coalesced.
Paul Mundtd53a0d32010-02-17 21:17:02 +0900559 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900560 if (pmb_can_merge(pmbp, pmbe))
Paul Mundtd53a0d32010-02-17 21:17:02 +0900561 pmbp->link = pmbe;
562
563 spin_unlock(&pmbp->lock);
564 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900565
566 pmbp = pmbe;
567
Paul Mundtd53a0d32010-02-17 21:17:02 +0900568 spin_unlock_irqrestore(&pmbe->lock, irqflags);
Matt Fleming3d467672010-01-18 19:33:10 +0900569 }
Matt Fleming3d467672010-01-18 19:33:10 +0900570}
Matt Fleming3d467672010-01-18 19:33:10 +0900571
Paul Mundtd01447b2010-02-18 18:13:51 +0900572static void __init pmb_merge(struct pmb_entry *head)
Matt Fleming3d467672010-01-18 19:33:10 +0900573{
Paul Mundtd01447b2010-02-18 18:13:51 +0900574 unsigned long span, newsize;
575 struct pmb_entry *tail;
576 int i = 1, depth = 0;
577
578 span = newsize = head->size;
579
580 tail = head->link;
581 while (tail) {
582 span += tail->size;
583
584 if (pmb_size_valid(span)) {
585 newsize = span;
586 depth = i;
587 }
588
589 /* This is the end of the line.. */
590 if (!tail->link)
591 break;
592
593 tail = tail->link;
594 i++;
595 }
Matt Fleming20b50142009-10-06 21:22:33 +0000596
Matt Fleming3d467672010-01-18 19:33:10 +0900597 /*
Paul Mundtd01447b2010-02-18 18:13:51 +0900598 * The merged page size must be valid.
Matt Fleming3d467672010-01-18 19:33:10 +0900599 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900600 if (!pmb_size_valid(newsize))
601 return;
602
603 head->flags &= ~PMB_SZ_MASK;
604 head->flags |= pmb_size_to_flags(newsize);
605
606 head->size = newsize;
607
608 __pmb_unmap_entry(head->link, depth);
609 __set_pmb_entry(head);
610}
611
612static void __init pmb_coalesce(void)
613{
614 unsigned long flags;
615 int i;
616
617 write_lock_irqsave(&pmb_rwlock, flags);
618
619 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
620 struct pmb_entry *pmbe;
621
622 if (!test_bit(i, pmb_map))
623 continue;
624
625 pmbe = &pmb_entry_list[i];
626
627 /*
628 * We're only interested in compound mappings
629 */
630 if (!pmbe->link)
631 continue;
632
633 /*
634 * Nothing to do if it already uses the largest possible
635 * page size.
636 */
637 if (pmbe->size == SZ_512M)
638 continue;
639
640 pmb_merge(pmbe);
641 }
642
643 write_unlock_irqrestore(&pmb_rwlock, flags);
644}
645
646#ifdef CONFIG_UNCACHED_MAPPING
647static void __init pmb_resize(void)
648{
649 int i;
650
651 /*
652 * If the uncached mapping was constructed by the kernel, it will
653 * already be a reasonable size.
654 */
655 if (uncached_size == SZ_16M)
656 return;
657
658 read_lock(&pmb_rwlock);
659
660 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
661 struct pmb_entry *pmbe;
662 unsigned long flags;
663
664 if (!test_bit(i, pmb_map))
665 continue;
666
667 pmbe = &pmb_entry_list[i];
668
669 if (pmbe->vpn != uncached_start)
670 continue;
671
672 /*
673 * Found it, now resize it.
674 */
675 spin_lock_irqsave(&pmbe->lock, flags);
676
677 pmbe->size = SZ_16M;
678 pmbe->flags &= ~PMB_SZ_MASK;
679 pmbe->flags |= pmb_size_to_flags(pmbe->size);
680
681 uncached_resize(pmbe->size);
682
683 __set_pmb_entry(pmbe);
684
685 spin_unlock_irqrestore(&pmbe->lock, flags);
686 }
687
688 read_lock(&pmb_rwlock);
689}
690#endif
691
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900692static int __init early_pmb(char *p)
693{
694 if (!p)
695 return 0;
696
697 if (strstr(p, "iomap"))
698 pmb_iomapping_enabled = 1;
699
700 return 0;
701}
702early_param("pmb", early_pmb);
703
Paul Mundtd01447b2010-02-18 18:13:51 +0900704void __init pmb_init(void)
705{
706 /* Synchronize software state */
707 pmb_synchronize();
708
709 /* Attempt to combine compound mappings */
710 pmb_coalesce();
711
712#ifdef CONFIG_UNCACHED_MAPPING
713 /* Resize initial mappings, if necessary */
714 pmb_resize();
715#endif
716
717 /* Log them */
718 pmb_notify();
Matt Fleming20b50142009-10-06 21:22:33 +0000719
Paul Mundt2e450642010-02-18 13:26:05 +0900720 writel_uncached(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900721
Paul Mundta0ab3662010-01-13 18:31:48 +0900722 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900723 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundt2e450642010-02-18 13:26:05 +0900724 ctrl_barrier();
Matt Fleming20b50142009-10-06 21:22:33 +0000725}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900726
Paul Mundt2efa53b2010-01-20 16:40:48 +0900727bool __in_29bit_mode(void)
728{
729 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
730}
731
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900732static int pmb_seq_show(struct seq_file *file, void *iter)
733{
734 int i;
735
736 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
737 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
738 seq_printf(file, "ety vpn ppn size flags\n");
739
740 for (i = 0; i < NR_PMB_ENTRIES; i++) {
741 unsigned long addr, data;
742 unsigned int size;
743 char *sz_str = NULL;
744
Paul Mundt9d56dd32010-01-26 12:58:40 +0900745 addr = __raw_readl(mk_pmb_addr(i));
746 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900747
748 size = data & PMB_SZ_MASK;
749 sz_str = (size == PMB_SZ_16M) ? " 16MB":
750 (size == PMB_SZ_64M) ? " 64MB":
751 (size == PMB_SZ_128M) ? "128MB":
752 "512MB";
753
754 /* 02: V 0x88 0x08 128MB C CB B */
755 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
756 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
757 (addr >> 24) & 0xff, (data >> 24) & 0xff,
758 sz_str, (data & PMB_C) ? 'C' : ' ',
759 (data & PMB_WT) ? "WT" : "CB",
760 (data & PMB_UB) ? "UB" : " B");
761 }
762
763 return 0;
764}
765
766static int pmb_debugfs_open(struct inode *inode, struct file *file)
767{
768 return single_open(file, pmb_seq_show, NULL);
769}
770
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800771static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900772 .owner = THIS_MODULE,
773 .open = pmb_debugfs_open,
774 .read = seq_read,
775 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800776 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900777};
778
779static int __init pmb_debugfs_init(void)
780{
781 struct dentry *dentry;
782
783 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900784 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800785 if (!dentry)
786 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900787 if (IS_ERR(dentry))
788 return PTR_ERR(dentry);
789
790 return 0;
791}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900792postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000793
794#ifdef CONFIG_PM
795static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
796{
797 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000798 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000799
800 /* Restore the PMB after a resume from hibernation */
801 if (state.event == PM_EVENT_ON &&
802 prev_state.event == PM_EVENT_FREEZE) {
803 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900804
805 read_lock(&pmb_rwlock);
806
Matt Flemingedd7de82009-10-06 21:22:29 +0000807 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900808 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000809 pmbe = &pmb_entry_list[i];
810 set_pmb_entry(pmbe);
811 }
812 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900813
814 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000815 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900816
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000817 prev_state = state;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900818
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000819 return 0;
820}
821
822static int pmb_sysdev_resume(struct sys_device *dev)
823{
824 return pmb_sysdev_suspend(dev, PMSG_ON);
825}
826
827static struct sysdev_driver pmb_sysdev_driver = {
828 .suspend = pmb_sysdev_suspend,
829 .resume = pmb_sysdev_resume,
830};
831
832static int __init pmb_sysdev_init(void)
833{
834 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
835}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000836subsys_initcall(pmb_sysdev_init);
837#endif