blob: baf365fcdb4a2b4129c5552a8d86164f23d25edb [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Paul Mundt38c425f2007-05-11 11:26:10 +09006 * Copyright (C) 2005, 2006, 2007 Paul Mundt
Paul Mundt0c7b1df2006-09-27 15:08:07 +09007 *
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16#include <linux/init.h>
17#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000018#include <linux/sysdev.h>
19#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090020#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/bitops.h>
23#include <linux/debugfs.h>
24#include <linux/fs.h>
25#include <linux/seq_file.h>
26#include <linux/err.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090029#include <asm/pgtable.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090030#include <asm/mmu.h>
31#include <asm/io.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090032#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090033
34#define NR_PMB_ENTRIES 16
35
Matt Flemingfc2bdef2009-10-06 21:22:22 +000036static void __pmb_unmap(struct pmb_entry *);
37
Matt Flemingedd7de82009-10-06 21:22:29 +000038static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090039static unsigned long pmb_map;
40
41static struct pmb_entry pmb_init_map[] = {
42 /* vpn ppn flags (ub/sz/c/wt) */
43
44 /* P1 Section Mappings */
45 { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
46 { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
47 { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
48 { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
49 { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
50 { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
51
52 /* P2 Section Mappings */
53 { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
54 { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
55 { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
56 { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
57 { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
58 { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
59};
60
61static inline unsigned long mk_pmb_entry(unsigned int entry)
62{
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
64}
65
66static inline unsigned long mk_pmb_addr(unsigned int entry)
67{
68 return mk_pmb_entry(entry) | PMB_ADDR;
69}
70
71static inline unsigned long mk_pmb_data(unsigned int entry)
72{
73 return mk_pmb_entry(entry) | PMB_DATA;
74}
75
Matt Fleming067784f2009-10-06 21:22:23 +000076static int pmb_alloc_entry(void)
77{
78 unsigned int pos;
79
80repeat:
81 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
82
83 if (unlikely(pos > NR_PMB_ENTRIES))
84 return -ENOSPC;
85
86 if (test_and_set_bit(pos, &pmb_map))
87 goto repeat;
88
89 return pos;
90}
91
Matt Fleming8386aeb2009-10-06 21:22:28 +000092static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
93 unsigned long flags)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090094{
95 struct pmb_entry *pmbe;
Matt Fleming067784f2009-10-06 21:22:23 +000096 int pos;
97
98 pos = pmb_alloc_entry();
99 if (pos < 0)
100 return ERR_PTR(pos);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900101
Matt Flemingedd7de82009-10-06 21:22:29 +0000102 pmbe = &pmb_entry_list[pos];
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900103 if (!pmbe)
104 return ERR_PTR(-ENOMEM);
105
106 pmbe->vpn = vpn;
107 pmbe->ppn = ppn;
108 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000109 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900110
111 return pmbe;
112}
113
Matt Fleming8386aeb2009-10-06 21:22:28 +0000114static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900115{
Matt Flemingedd7de82009-10-06 21:22:29 +0000116 int pos = pmbe->entry;
Paul Mundt38c425f2007-05-11 11:26:10 +0900117
Matt Flemingedd7de82009-10-06 21:22:29 +0000118 pmbe->vpn = 0;
119 pmbe->ppn = 0;
120 pmbe->flags = 0;
121 pmbe->entry = 0;
122
123 clear_bit(pos, &pmb_map);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900124}
125
126/*
127 * Must be in P2 for __set_pmb_entry()
128 */
Matt Fleming8386aeb2009-10-06 21:22:28 +0000129static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
130 unsigned long flags, int pos)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900131{
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900132 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
133
Paul Mundte7bd34a2007-07-31 17:07:28 +0900134#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900135 /*
136 * When we are in 32-bit address extended mode, CCR.CB becomes
137 * invalid, so care must be taken to manually adjust cacheable
138 * translations.
139 */
140 if (likely(flags & PMB_C))
141 flags |= PMB_WT;
142#endif
143
144 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900145}
146
Matt Fleming8386aeb2009-10-06 21:22:28 +0000147static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900148{
Stuart Menefycbaa1182007-11-30 17:06:36 +0900149 jump_to_uncached();
Matt Fleming067784f2009-10-06 21:22:23 +0000150 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900151 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900152}
153
Matt Fleming8386aeb2009-10-06 21:22:28 +0000154static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900155{
156 unsigned int entry = pmbe->entry;
157 unsigned long addr;
158
159 /*
160 * Don't allow clearing of wired init entries, P1 or P2 access
161 * without a corresponding mapping in the PMB will lead to reset
162 * by the TLB.
163 */
164 if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
165 entry >= NR_PMB_ENTRIES))
166 return;
167
Stuart Menefycbaa1182007-11-30 17:06:36 +0900168 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900169
170 /* Clear V-bit */
171 addr = mk_pmb_addr(entry);
172 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
173
174 addr = mk_pmb_data(entry);
175 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
176
Stuart Menefycbaa1182007-11-30 17:06:36 +0900177 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900178}
179
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900180
181static struct {
182 unsigned long size;
183 int flag;
184} pmb_sizes[] = {
185 { .size = 0x20000000, .flag = PMB_SZ_512M, },
186 { .size = 0x08000000, .flag = PMB_SZ_128M, },
187 { .size = 0x04000000, .flag = PMB_SZ_64M, },
188 { .size = 0x01000000, .flag = PMB_SZ_16M, },
189};
190
191long pmb_remap(unsigned long vaddr, unsigned long phys,
192 unsigned long size, unsigned long flags)
193{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000194 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900195 unsigned long wanted;
196 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000197 long err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900198
199 /* Convert typical pgprot value to the PMB equivalent */
200 if (flags & _PAGE_CACHABLE) {
201 if (flags & _PAGE_WT)
202 pmb_flags = PMB_WT;
203 else
204 pmb_flags = PMB_C;
205 } else
206 pmb_flags = PMB_WT | PMB_UB;
207
208 pmbp = NULL;
209 wanted = size;
210
211again:
212 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900213 if (size < pmb_sizes[i].size)
214 continue;
215
216 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000217 if (IS_ERR(pmbe)) {
218 err = PTR_ERR(pmbe);
219 goto out;
220 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900221
Matt Fleming067784f2009-10-06 21:22:23 +0000222 set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900223
224 phys += pmb_sizes[i].size;
225 vaddr += pmb_sizes[i].size;
226 size -= pmb_sizes[i].size;
227
228 /*
229 * Link adjacent entries that span multiple PMB entries
230 * for easier tear-down.
231 */
232 if (likely(pmbp))
233 pmbp->link = pmbe;
234
235 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000236
237 /*
238 * Instead of trying smaller sizes on every iteration
239 * (even if we succeed in allocating space), try using
240 * pmb_sizes[i].size again.
241 */
242 i--;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900243 }
244
245 if (size >= 0x1000000)
246 goto again;
247
248 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000249
250out:
251 if (pmbp)
252 __pmb_unmap(pmbp);
253
254 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900255}
256
257void pmb_unmap(unsigned long addr)
258{
Matt Flemingedd7de82009-10-06 21:22:29 +0000259 struct pmb_entry *pmbe = NULL;
260 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900261
Matt Flemingedd7de82009-10-06 21:22:29 +0000262 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
263 if (test_bit(i, &pmb_map)) {
264 pmbe = &pmb_entry_list[i];
265 if (pmbe->vpn == addr)
266 break;
267 }
268 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900269
270 if (unlikely(!pmbe))
271 return;
272
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000273 __pmb_unmap(pmbe);
274}
275
276static void __pmb_unmap(struct pmb_entry *pmbe)
277{
Matt Flemingedd7de82009-10-06 21:22:29 +0000278 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900279
280 do {
281 struct pmb_entry *pmblink = pmbe;
282
Matt Fleming067784f2009-10-06 21:22:23 +0000283 /*
284 * We may be called before this pmb_entry has been
285 * entered into the PMB table via set_pmb_entry(), but
286 * that's OK because we've allocated a unique slot for
287 * this entry in pmb_alloc() (even if we haven't filled
288 * it yet).
289 *
290 * Therefore, calling clear_pmb_entry() is safe as no
291 * other mapping can be using that slot.
292 */
293 clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000294
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900295 pmbe = pmblink->link;
296
297 pmb_free(pmblink);
298 } while (pmbe);
299}
300
Matt Fleming8386aeb2009-10-06 21:22:28 +0000301int __uses_jump_to_uncached pmb_init(void)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900302{
303 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
Nobuhiro Iwamatsu53ff0942007-11-30 12:33:17 +0900304 unsigned int entry, i;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900305
306 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
307
Stuart Menefycbaa1182007-11-30 17:06:36 +0900308 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900309
310 /*
311 * Ordering is important, P2 must be mapped in the PMB before we
312 * can set PMB.SE, and P1 must be mapped before we jump back to
313 * P1 space.
314 */
315 for (entry = 0; entry < nr_entries; entry++) {
316 struct pmb_entry *pmbe = pmb_init_map + entry;
317
Matt Fleming067784f2009-10-06 21:22:23 +0000318 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900319 }
320
321 ctrl_outl(0, PMB_IRMCR);
322
323 /* PMB.SE and UB[7] */
324 ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
325
Stuart Menefyeddeeb32007-11-26 21:32:40 +0900326 /* Flush out the TLB */
327 i = ctrl_inl(MMUCR);
328 i |= MMUCR_TI;
329 ctrl_outl(i, MMUCR);
330
Stuart Menefycbaa1182007-11-30 17:06:36 +0900331 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900332
333 return 0;
334}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900335
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900336static int pmb_seq_show(struct seq_file *file, void *iter)
337{
338 int i;
339
340 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
341 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
342 seq_printf(file, "ety vpn ppn size flags\n");
343
344 for (i = 0; i < NR_PMB_ENTRIES; i++) {
345 unsigned long addr, data;
346 unsigned int size;
347 char *sz_str = NULL;
348
349 addr = ctrl_inl(mk_pmb_addr(i));
350 data = ctrl_inl(mk_pmb_data(i));
351
352 size = data & PMB_SZ_MASK;
353 sz_str = (size == PMB_SZ_16M) ? " 16MB":
354 (size == PMB_SZ_64M) ? " 64MB":
355 (size == PMB_SZ_128M) ? "128MB":
356 "512MB";
357
358 /* 02: V 0x88 0x08 128MB C CB B */
359 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
360 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
361 (addr >> 24) & 0xff, (data >> 24) & 0xff,
362 sz_str, (data & PMB_C) ? 'C' : ' ',
363 (data & PMB_WT) ? "WT" : "CB",
364 (data & PMB_UB) ? "UB" : " B");
365 }
366
367 return 0;
368}
369
370static int pmb_debugfs_open(struct inode *inode, struct file *file)
371{
372 return single_open(file, pmb_seq_show, NULL);
373}
374
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800375static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900376 .owner = THIS_MODULE,
377 .open = pmb_debugfs_open,
378 .read = seq_read,
379 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800380 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900381};
382
383static int __init pmb_debugfs_init(void)
384{
385 struct dentry *dentry;
386
387 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900388 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800389 if (!dentry)
390 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900391 if (IS_ERR(dentry))
392 return PTR_ERR(dentry);
393
394 return 0;
395}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900396postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000397
398#ifdef CONFIG_PM
399static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
400{
401 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000402 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000403
404 /* Restore the PMB after a resume from hibernation */
405 if (state.event == PM_EVENT_ON &&
406 prev_state.event == PM_EVENT_FREEZE) {
407 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000408 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
409 if (test_bit(i, &pmb_map)) {
410 pmbe = &pmb_entry_list[i];
411 set_pmb_entry(pmbe);
412 }
413 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000414 }
415 prev_state = state;
416 return 0;
417}
418
419static int pmb_sysdev_resume(struct sys_device *dev)
420{
421 return pmb_sysdev_suspend(dev, PMSG_ON);
422}
423
424static struct sysdev_driver pmb_sysdev_driver = {
425 .suspend = pmb_sysdev_suspend,
426 .resume = pmb_sysdev_resume,
427};
428
429static int __init pmb_sysdev_init(void)
430{
431 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
432}
433
434subsys_initcall(pmb_sysdev_init);
435#endif