blob: 466abaed53828da5ff35ef5f741eeec25621b517 [file] [log] [blame]
Max Filippov655591002014-02-04 02:17:09 +04001/*
2 * High memory support for Xtensa architecture
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2014 Cadence Design Systems Inc.
9 */
10
11#include <linux/export.h>
12#include <linux/highmem.h>
13#include <asm/tlbflush.h>
14
15static pte_t *kmap_pte;
16
Max Filippov32544d92014-07-15 02:51:49 +040017static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
18{
19 return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
20 color;
21}
22
Max Filippov655591002014-02-04 02:17:09 +040023void *kmap_atomic(struct page *page)
24{
25 enum fixed_addresses idx;
26 unsigned long vaddr;
Max Filippov655591002014-02-04 02:17:09 +040027
28 pagefault_disable();
29 if (!PageHighMem(page))
30 return page_address(page);
31
Max Filippov32544d92014-07-15 02:51:49 +040032 idx = kmap_idx(kmap_atomic_idx_push(),
33 DCACHE_ALIAS(page_to_phys(page)));
Max Filippov655591002014-02-04 02:17:09 +040034 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
35#ifdef CONFIG_DEBUG_HIGHMEM
Max Filippov22def762014-07-15 02:27:50 +040036 BUG_ON(!pte_none(*(kmap_pte + idx)));
Max Filippov655591002014-02-04 02:17:09 +040037#endif
Max Filippov22def762014-07-15 02:27:50 +040038 set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
Max Filippov655591002014-02-04 02:17:09 +040039
40 return (void *)vaddr;
41}
42EXPORT_SYMBOL(kmap_atomic);
43
44void __kunmap_atomic(void *kvaddr)
45{
Max Filippov655591002014-02-04 02:17:09 +040046 if (kvaddr >= (void *)FIXADDR_START &&
47 kvaddr < (void *)FIXADDR_TOP) {
Max Filippov32544d92014-07-15 02:51:49 +040048 int idx = kmap_idx(kmap_atomic_idx(),
49 DCACHE_ALIAS((unsigned long)kvaddr));
Max Filippov655591002014-02-04 02:17:09 +040050
51 /*
52 * Force other mappings to Oops if they'll try to access this
53 * pte without first remap it. Keeping stale mappings around
54 * is a bad idea also, in case the page changes cacheability
55 * attributes or becomes a protected page in a hypervisor.
56 */
Max Filippov22def762014-07-15 02:27:50 +040057 pte_clear(&init_mm, kvaddr, kmap_pte + idx);
Max Filippov655591002014-02-04 02:17:09 +040058 local_flush_tlb_kernel_range((unsigned long)kvaddr,
59 (unsigned long)kvaddr + PAGE_SIZE);
60
61 kmap_atomic_idx_pop();
62 }
63
64 pagefault_enable();
65}
66EXPORT_SYMBOL(__kunmap_atomic);
67
68void __init kmap_init(void)
69{
70 unsigned long kmap_vstart;
71
72 /* cache the first kmap pte */
73 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
74 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
75}