blob: b796b6c021b4095c793bd4edddba2a912b8ad126 [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
24#include <asm/system.h>
25#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090026#include <asm/pgtable.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090027#include <asm/mmu.h>
28#include <asm/io.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090029#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090030
31#define NR_PMB_ENTRIES 16
32
Matt Flemingfc2bdef2009-10-06 21:22:22 +000033static void __pmb_unmap(struct pmb_entry *);
34
Matt Flemingedd7de82009-10-06 21:22:29 +000035static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090036static unsigned long pmb_map;
37
Paul Mundt0c7b1df2006-09-27 15:08:07 +090038static inline unsigned long mk_pmb_entry(unsigned int entry)
39{
40 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
41}
42
43static inline unsigned long mk_pmb_addr(unsigned int entry)
44{
45 return mk_pmb_entry(entry) | PMB_ADDR;
46}
47
48static inline unsigned long mk_pmb_data(unsigned int entry)
49{
50 return mk_pmb_entry(entry) | PMB_DATA;
51}
52
Matt Fleming067784f2009-10-06 21:22:23 +000053static int pmb_alloc_entry(void)
54{
55 unsigned int pos;
56
57repeat:
58 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
59
60 if (unlikely(pos > NR_PMB_ENTRIES))
61 return -ENOSPC;
62
63 if (test_and_set_bit(pos, &pmb_map))
64 goto repeat;
65
66 return pos;
67}
68
Matt Fleming8386aeb2009-10-06 21:22:28 +000069static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +000070 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090071{
72 struct pmb_entry *pmbe;
Matt Fleming067784f2009-10-06 21:22:23 +000073 int pos;
74
Matt Fleming20b50142009-10-06 21:22:33 +000075 if (entry == PMB_NO_ENTRY) {
76 pos = pmb_alloc_entry();
77 if (pos < 0)
78 return ERR_PTR(pos);
79 } else {
80 if (test_bit(entry, &pmb_map))
81 return ERR_PTR(-ENOSPC);
82 pos = entry;
83 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +090084
Matt Flemingedd7de82009-10-06 21:22:29 +000085 pmbe = &pmb_entry_list[pos];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090086 if (!pmbe)
87 return ERR_PTR(-ENOMEM);
88
89 pmbe->vpn = vpn;
90 pmbe->ppn = ppn;
91 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +000092 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +090093
94 return pmbe;
95}
96
Matt Fleming8386aeb2009-10-06 21:22:28 +000097static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090098{
Matt Flemingedd7de82009-10-06 21:22:29 +000099 int pos = pmbe->entry;
Paul Mundt38c425f2007-05-11 11:26:10 +0900100
Matt Flemingedd7de82009-10-06 21:22:29 +0000101 pmbe->vpn = 0;
102 pmbe->ppn = 0;
103 pmbe->flags = 0;
104 pmbe->entry = 0;
105
106 clear_bit(pos, &pmb_map);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900107}
108
109/*
110 * Must be in P2 for __set_pmb_entry()
111 */
Matt Fleming8386aeb2009-10-06 21:22:28 +0000112static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
113 unsigned long flags, int pos)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900114{
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900115 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
116
Paul Mundte7bd34a2007-07-31 17:07:28 +0900117#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900118 /*
119 * When we are in 32-bit address extended mode, CCR.CB becomes
120 * invalid, so care must be taken to manually adjust cacheable
121 * translations.
122 */
123 if (likely(flags & PMB_C))
124 flags |= PMB_WT;
125#endif
126
127 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900128}
129
Matt Fleming8386aeb2009-10-06 21:22:28 +0000130static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900131{
Stuart Menefycbaa1182007-11-30 17:06:36 +0900132 jump_to_uncached();
Matt Fleming067784f2009-10-06 21:22:23 +0000133 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900134 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900135}
136
Matt Fleming8386aeb2009-10-06 21:22:28 +0000137static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900138{
139 unsigned int entry = pmbe->entry;
140 unsigned long addr;
141
Matt Fleming31051212009-10-06 21:22:30 +0000142 if (unlikely(entry >= NR_PMB_ENTRIES))
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900143 return;
144
Stuart Menefycbaa1182007-11-30 17:06:36 +0900145 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900146
147 /* Clear V-bit */
148 addr = mk_pmb_addr(entry);
149 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
150
151 addr = mk_pmb_data(entry);
152 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
153
Stuart Menefycbaa1182007-11-30 17:06:36 +0900154 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900155}
156
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900157
158static struct {
159 unsigned long size;
160 int flag;
161} pmb_sizes[] = {
162 { .size = 0x20000000, .flag = PMB_SZ_512M, },
163 { .size = 0x08000000, .flag = PMB_SZ_128M, },
164 { .size = 0x04000000, .flag = PMB_SZ_64M, },
165 { .size = 0x01000000, .flag = PMB_SZ_16M, },
166};
167
168long pmb_remap(unsigned long vaddr, unsigned long phys,
169 unsigned long size, unsigned long flags)
170{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000171 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900172 unsigned long wanted;
173 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000174 long err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900175
176 /* Convert typical pgprot value to the PMB equivalent */
177 if (flags & _PAGE_CACHABLE) {
178 if (flags & _PAGE_WT)
179 pmb_flags = PMB_WT;
180 else
181 pmb_flags = PMB_C;
182 } else
183 pmb_flags = PMB_WT | PMB_UB;
184
185 pmbp = NULL;
186 wanted = size;
187
188again:
189 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900190 if (size < pmb_sizes[i].size)
191 continue;
192
Matt Fleming20b50142009-10-06 21:22:33 +0000193 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
194 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000195 if (IS_ERR(pmbe)) {
196 err = PTR_ERR(pmbe);
197 goto out;
198 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900199
Matt Fleming067784f2009-10-06 21:22:23 +0000200 set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900201
202 phys += pmb_sizes[i].size;
203 vaddr += pmb_sizes[i].size;
204 size -= pmb_sizes[i].size;
205
206 /*
207 * Link adjacent entries that span multiple PMB entries
208 * for easier tear-down.
209 */
210 if (likely(pmbp))
211 pmbp->link = pmbe;
212
213 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000214
215 /*
216 * Instead of trying smaller sizes on every iteration
217 * (even if we succeed in allocating space), try using
218 * pmb_sizes[i].size again.
219 */
220 i--;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900221 }
222
223 if (size >= 0x1000000)
224 goto again;
225
226 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000227
228out:
229 if (pmbp)
230 __pmb_unmap(pmbp);
231
232 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900233}
234
235void pmb_unmap(unsigned long addr)
236{
Matt Flemingedd7de82009-10-06 21:22:29 +0000237 struct pmb_entry *pmbe = NULL;
238 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900239
Matt Flemingedd7de82009-10-06 21:22:29 +0000240 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
241 if (test_bit(i, &pmb_map)) {
242 pmbe = &pmb_entry_list[i];
243 if (pmbe->vpn == addr)
244 break;
245 }
246 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900247
248 if (unlikely(!pmbe))
249 return;
250
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000251 __pmb_unmap(pmbe);
252}
253
254static void __pmb_unmap(struct pmb_entry *pmbe)
255{
Matt Flemingedd7de82009-10-06 21:22:29 +0000256 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900257
258 do {
259 struct pmb_entry *pmblink = pmbe;
260
Matt Fleming067784f2009-10-06 21:22:23 +0000261 /*
262 * We may be called before this pmb_entry has been
263 * entered into the PMB table via set_pmb_entry(), but
264 * that's OK because we've allocated a unique slot for
265 * this entry in pmb_alloc() (even if we haven't filled
266 * it yet).
267 *
268 * Therefore, calling clear_pmb_entry() is safe as no
269 * other mapping can be using that slot.
270 */
271 clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000272
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900273 pmbe = pmblink->link;
274
275 pmb_free(pmblink);
276 } while (pmbe);
277}
278
Paul Mundta0ab3662010-01-13 18:31:48 +0900279#ifdef CONFIG_PMB_LEGACY
Matt Fleming3d467672010-01-18 19:33:10 +0900280static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
281{
282 return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
283}
284
Paul Mundta0ab3662010-01-13 18:31:48 +0900285static int pmb_apply_legacy_mappings(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000286{
Matt Fleming3d467672010-01-18 19:33:10 +0900287 unsigned int applied = 0;
288 int i;
289
290 pr_info("PMB: Preserving legacy mappings:\n");
291
292 /*
293 * The following entries are setup by the bootloader.
294 *
295 * Entry VPN PPN V SZ C UB
296 * --------------------------------------------------------
297 * 0 0xA0000000 0x00000000 1 64MB 0 0
298 * 1 0xA4000000 0x04000000 1 16MB 0 0
299 * 2 0xA6000000 0x08000000 1 16MB 0 0
300 * 9 0x88000000 0x48000000 1 128MB 1 1
301 * 10 0x90000000 0x50000000 1 128MB 1 1
302 * 11 0x98000000 0x58000000 1 128MB 1 1
303 * 13 0xA8000000 0x48000000 1 128MB 0 0
304 * 14 0xB0000000 0x50000000 1 128MB 0 0
305 * 15 0xB8000000 0x58000000 1 128MB 0 0
306 *
307 * The only entries the we need are the ones that map the kernel
308 * at the cached and uncached addresses.
309 */
310 for (i = 0; i < PMB_ENTRY_MAX; i++) {
311 unsigned long addr, data;
312 unsigned long addr_val, data_val;
313 unsigned long ppn, vpn;
314
315 addr = mk_pmb_addr(i);
316 data = mk_pmb_data(i);
317
318 addr_val = __raw_readl(addr);
319 data_val = __raw_readl(data);
320
321 /*
322 * Skip over any bogus entries
323 */
324 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
325 continue;
326
327 ppn = data_val & PMB_PFN_MASK;
328 vpn = addr_val & PMB_PFN_MASK;
329
330 /*
331 * Only preserve in-range mappings.
332 */
333 if (pmb_ppn_in_range(ppn)) {
334 unsigned int size;
335 char *sz_str = NULL;
336
337 size = data_val & PMB_SZ_MASK;
338
339 sz_str = (size == PMB_SZ_16M) ? " 16MB":
340 (size == PMB_SZ_64M) ? " 64MB":
341 (size == PMB_SZ_128M) ? "128MB":
342 "512MB";
343
344 pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
345 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
346 (data_val & PMB_C) ? "" : "un");
347
348 applied++;
349 } else {
350 /*
351 * Invalidate anything out of bounds.
352 */
353 __raw_writel(addr_val & ~PMB_V, addr);
354 __raw_writel(data_val & ~PMB_V, data);
355 }
356 }
357
358 return (applied == 0);
359}
360#else
361static inline int pmb_apply_legacy_mappings(void)
362{
363 return 1;
364}
365#endif
366
367int __uses_jump_to_uncached pmb_init(void)
368{
Matt Fleming20b50142009-10-06 21:22:33 +0000369 int i;
370 unsigned long addr, data;
Matt Fleming3d467672010-01-18 19:33:10 +0900371 unsigned long ret;
Matt Fleming20b50142009-10-06 21:22:33 +0000372
Matt Fleming3d467672010-01-18 19:33:10 +0900373 jump_to_uncached();
374
375 /*
376 * Attempt to apply the legacy boot mappings if configured. If
377 * this is successful then we simply carry on with those and
378 * don't bother establishing additional memory mappings. Dynamic
379 * device mappings through pmb_remap() can still be bolted on
380 * after this.
381 */
382 ret = pmb_apply_legacy_mappings();
383 if (ret == 0) {
384 back_to_cached();
385 return 0;
386 }
387
388 /*
389 * Sync our software copy of the PMB mappings with those in
390 * hardware. The mappings in the hardware PMB were either set up
391 * by the bootloader or very early on by the kernel.
392 */
Matt Fleming20b50142009-10-06 21:22:33 +0000393 for (i = 0; i < PMB_ENTRY_MAX; i++) {
394 struct pmb_entry *pmbe;
395 unsigned long vpn, ppn, flags;
396
397 addr = PMB_DATA + (i << PMB_E_SHIFT);
398 data = ctrl_inl(addr);
399 if (!(data & PMB_V))
400 continue;
401
402 if (data & PMB_C) {
403#if defined(CONFIG_CACHE_WRITETHROUGH)
404 data |= PMB_WT;
405#elif defined(CONFIG_CACHE_WRITEBACK)
406 data &= ~PMB_WT;
407#else
408 data &= ~(PMB_C | PMB_WT);
409#endif
410 }
411 ctrl_outl(data, addr);
412
413 ppn = data & PMB_PFN_MASK;
414
415 flags = data & (PMB_C | PMB_WT | PMB_UB);
416 flags |= data & PMB_SZ_MASK;
417
418 addr = PMB_ADDR + (i << PMB_E_SHIFT);
419 data = ctrl_inl(addr);
420
421 vpn = data & PMB_PFN_MASK;
422
423 pmbe = pmb_alloc(vpn, ppn, flags, i);
424 WARN_ON(IS_ERR(pmbe));
425 }
426
Paul Mundta0ab3662010-01-13 18:31:48 +0900427 ctrl_outl(0, PMB_IRMCR);
428
Paul Mundta0ab3662010-01-13 18:31:48 +0900429 /* Flush out the TLB */
430 i = ctrl_inl(MMUCR);
431 i |= MMUCR_TI;
432 ctrl_outl(i, MMUCR);
433
Matt Fleming20b50142009-10-06 21:22:33 +0000434 back_to_cached();
435
436 return 0;
437}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900438
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900439static int pmb_seq_show(struct seq_file *file, void *iter)
440{
441 int i;
442
443 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
444 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
445 seq_printf(file, "ety vpn ppn size flags\n");
446
447 for (i = 0; i < NR_PMB_ENTRIES; i++) {
448 unsigned long addr, data;
449 unsigned int size;
450 char *sz_str = NULL;
451
452 addr = ctrl_inl(mk_pmb_addr(i));
453 data = ctrl_inl(mk_pmb_data(i));
454
455 size = data & PMB_SZ_MASK;
456 sz_str = (size == PMB_SZ_16M) ? " 16MB":
457 (size == PMB_SZ_64M) ? " 64MB":
458 (size == PMB_SZ_128M) ? "128MB":
459 "512MB";
460
461 /* 02: V 0x88 0x08 128MB C CB B */
462 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
463 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
464 (addr >> 24) & 0xff, (data >> 24) & 0xff,
465 sz_str, (data & PMB_C) ? 'C' : ' ',
466 (data & PMB_WT) ? "WT" : "CB",
467 (data & PMB_UB) ? "UB" : " B");
468 }
469
470 return 0;
471}
472
473static int pmb_debugfs_open(struct inode *inode, struct file *file)
474{
475 return single_open(file, pmb_seq_show, NULL);
476}
477
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800478static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900479 .owner = THIS_MODULE,
480 .open = pmb_debugfs_open,
481 .read = seq_read,
482 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800483 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900484};
485
486static int __init pmb_debugfs_init(void)
487{
488 struct dentry *dentry;
489
490 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900491 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800492 if (!dentry)
493 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900494 if (IS_ERR(dentry))
495 return PTR_ERR(dentry);
496
497 return 0;
498}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900499postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000500
501#ifdef CONFIG_PM
502static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
503{
504 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000505 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000506
507 /* Restore the PMB after a resume from hibernation */
508 if (state.event == PM_EVENT_ON &&
509 prev_state.event == PM_EVENT_FREEZE) {
510 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000511 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
512 if (test_bit(i, &pmb_map)) {
513 pmbe = &pmb_entry_list[i];
514 set_pmb_entry(pmbe);
515 }
516 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000517 }
518 prev_state = state;
519 return 0;
520}
521
522static int pmb_sysdev_resume(struct sys_device *dev)
523{
524 return pmb_sysdev_suspend(dev, PMSG_ON);
525}
526
527static struct sysdev_driver pmb_sysdev_driver = {
528 .suspend = pmb_sysdev_suspend,
529 .resume = pmb_sysdev_resume,
530};
531
532static int __init pmb_sysdev_init(void)
533{
534 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
535}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000536subsys_initcall(pmb_sysdev_init);
537#endif