Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 1 | /* |
| 2 | * High memory support for Xtensa architecture |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General |
| 5 | * Public License. See the file "COPYING" in the main directory of |
| 6 | * this archive for more details. |
| 7 | * |
| 8 | * Copyright (C) 2014 Cadence Design Systems Inc. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/export.h> |
| 12 | #include <linux/highmem.h> |
| 13 | #include <asm/tlbflush.h> |
| 14 | |
| 15 | static pte_t *kmap_pte; |
| 16 | |
Max Filippov | 32544d9 | 2014-07-15 02:51:49 +0400 | [diff] [blame^] | 17 | static inline enum fixed_addresses kmap_idx(int type, unsigned long color) |
| 18 | { |
| 19 | return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + |
| 20 | color; |
| 21 | } |
| 22 | |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 23 | void *kmap_atomic(struct page *page) |
| 24 | { |
| 25 | enum fixed_addresses idx; |
| 26 | unsigned long vaddr; |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 27 | |
| 28 | pagefault_disable(); |
| 29 | if (!PageHighMem(page)) |
| 30 | return page_address(page); |
| 31 | |
Max Filippov | 32544d9 | 2014-07-15 02:51:49 +0400 | [diff] [blame^] | 32 | idx = kmap_idx(kmap_atomic_idx_push(), |
| 33 | DCACHE_ALIAS(page_to_phys(page))); |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 34 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 35 | #ifdef CONFIG_DEBUG_HIGHMEM |
Max Filippov | 22def76 | 2014-07-15 02:27:50 +0400 | [diff] [blame] | 36 | BUG_ON(!pte_none(*(kmap_pte + idx))); |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 37 | #endif |
Max Filippov | 22def76 | 2014-07-15 02:27:50 +0400 | [diff] [blame] | 38 | set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 39 | |
| 40 | return (void *)vaddr; |
| 41 | } |
| 42 | EXPORT_SYMBOL(kmap_atomic); |
| 43 | |
| 44 | void __kunmap_atomic(void *kvaddr) |
| 45 | { |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 46 | if (kvaddr >= (void *)FIXADDR_START && |
| 47 | kvaddr < (void *)FIXADDR_TOP) { |
Max Filippov | 32544d9 | 2014-07-15 02:51:49 +0400 | [diff] [blame^] | 48 | int idx = kmap_idx(kmap_atomic_idx(), |
| 49 | DCACHE_ALIAS((unsigned long)kvaddr)); |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * Force other mappings to Oops if they'll try to access this |
| 53 | * pte without first remap it. Keeping stale mappings around |
| 54 | * is a bad idea also, in case the page changes cacheability |
| 55 | * attributes or becomes a protected page in a hypervisor. |
| 56 | */ |
Max Filippov | 22def76 | 2014-07-15 02:27:50 +0400 | [diff] [blame] | 57 | pte_clear(&init_mm, kvaddr, kmap_pte + idx); |
Max Filippov | 65559100 | 2014-02-04 02:17:09 +0400 | [diff] [blame] | 58 | local_flush_tlb_kernel_range((unsigned long)kvaddr, |
| 59 | (unsigned long)kvaddr + PAGE_SIZE); |
| 60 | |
| 61 | kmap_atomic_idx_pop(); |
| 62 | } |
| 63 | |
| 64 | pagefault_enable(); |
| 65 | } |
| 66 | EXPORT_SYMBOL(__kunmap_atomic); |
| 67 | |
| 68 | void __init kmap_init(void) |
| 69 | { |
| 70 | unsigned long kmap_vstart; |
| 71 | |
| 72 | /* cache the first kmap pte */ |
| 73 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
| 74 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); |
| 75 | } |