blob: a76f8ace9ce664972acdb1d03a2f78fecde86b56 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Nicolas Pitred73cd422008-09-15 16:44:55 -04002/*
3 * arch/arm/mm/highmem.c -- ARM highmem support
4 *
5 * Author: Nicolas Pitre
6 * Created: september 8, 2008
7 * Copyright: Marvell Semiconductors Inc.
Nicolas Pitred73cd422008-09-15 16:44:55 -04008 */
9
10#include <linux/module.h>
11#include <linux/highmem.h>
12#include <linux/interrupt.h>
13#include <asm/fixmap.h>
14#include <asm/cacheflush.h>
15#include <asm/tlbflush.h>
16#include "mm.h"
17
Liu Huaa05e54c2014-04-18 09:43:32 +010018static inline void set_fixmap_pte(int idx, pte_t pte)
19{
20 unsigned long vaddr = __fix_to_virt(idx);
Rob Herring836a2412014-07-02 02:01:15 -050021 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
22
23 set_pte_ext(ptep, pte, 0);
Liu Huaa05e54c2014-04-18 09:43:32 +010024 local_flush_tlb_kernel_page(vaddr);
25}
26
27static inline pte_t get_fixmap_pte(unsigned long vaddr)
28{
Rob Herring836a2412014-07-02 02:01:15 -050029 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
30
31 return *ptep;
Liu Huaa05e54c2014-04-18 09:43:32 +010032}
33
Nicolas Pitred73cd422008-09-15 16:44:55 -040034void *kmap(struct page *page)
35{
36 might_sleep();
37 if (!PageHighMem(page))
38 return page_address(page);
39 return kmap_high(page);
40}
41EXPORT_SYMBOL(kmap);
42
43void kunmap(struct page *page)
44{
45 BUG_ON(in_interrupt());
46 if (!PageHighMem(page))
47 return;
48 kunmap_high(page);
49}
50EXPORT_SYMBOL(kunmap);
51
Cong Wanga24401b2011-11-26 10:53:39 +080052void *kmap_atomic(struct page *page)
Nicolas Pitred73cd422008-09-15 16:44:55 -040053{
54 unsigned int idx;
55 unsigned long vaddr;
Nicolas Pitre7929eb92009-09-03 21:45:59 +010056 void *kmap;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070057 int type;
Nicolas Pitred73cd422008-09-15 16:44:55 -040058
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020059 preempt_disable();
Nicolas Pitred73cd422008-09-15 16:44:55 -040060 pagefault_disable();
61 if (!PageHighMem(page))
62 return page_address(page);
63
Nicolas Pitre17ebba12010-06-07 21:28:55 +010064#ifdef CONFIG_DEBUG_HIGHMEM
65 /*
66 * There is no cache coherency issue when non VIVT, so force the
67 * dedicated kmap usage for better debugging purposes in that case.
68 */
69 if (!cache_is_vivt())
70 kmap = NULL;
71 else
72#endif
73 kmap = kmap_high_get(page);
Nicolas Pitre7929eb92009-09-03 21:45:59 +010074 if (kmap)
75 return kmap;
76
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070077 type = kmap_atomic_idx_push();
78
Stefan Agnera5f4c562015-08-13 00:01:52 +010079 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
Liu Hua4221e2e2014-04-18 09:27:01 +010080 vaddr = __fix_to_virt(idx);
Nicolas Pitred73cd422008-09-15 16:44:55 -040081#ifdef CONFIG_DEBUG_HIGHMEM
82 /*
83 * With debugging enabled, kunmap_atomic forces that entry to 0.
84 * Make sure it was indeed properly unmapped.
85 */
Rob Herring836a2412014-07-02 02:01:15 -050086 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
Nicolas Pitred73cd422008-09-15 16:44:55 -040087#endif
Nicolas Pitred73cd422008-09-15 16:44:55 -040088 /*
89 * When debugging is off, kunmap_atomic leaves the previous mapping
Russell King67ece142011-07-02 15:20:44 +010090 * in place, so the contained TLB flush ensures the TLB is updated
91 * with the new mapping.
Nicolas Pitred73cd422008-09-15 16:44:55 -040092 */
Liu Huaa05e54c2014-04-18 09:43:32 +010093 set_fixmap_pte(idx, mk_pte(page, kmap_prot));
Nicolas Pitred73cd422008-09-15 16:44:55 -040094
95 return (void *)vaddr;
96}
Cong Wanga24401b2011-11-26 10:53:39 +080097EXPORT_SYMBOL(kmap_atomic);
Nicolas Pitred73cd422008-09-15 16:44:55 -040098
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070099void __kunmap_atomic(void *kvaddr)
Nicolas Pitred73cd422008-09-15 16:44:55 -0400100{
101 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700102 int idx, type;
Nicolas Pitred73cd422008-09-15 16:44:55 -0400103
104 if (kvaddr >= (void *)FIXADDR_START) {
Peter Zijlstra20273942010-10-27 15:32:58 -0700105 type = kmap_atomic_idx();
Stefan Agnera5f4c562015-08-13 00:01:52 +0100106 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700107
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100108 if (cache_is_vivt())
109 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400110#ifdef CONFIG_DEBUG_HIGHMEM
Liu Hua4221e2e2014-04-18 09:27:01 +0100111 BUG_ON(vaddr != __fix_to_virt(idx));
Liu Huaa05e54c2014-04-18 09:43:32 +0100112 set_fixmap_pte(idx, __pte(0));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400113#else
114 (void) idx; /* to kill a warning */
115#endif
Peter Zijlstra20273942010-10-27 15:32:58 -0700116 kmap_atomic_idx_pop();
Nicolas Pitre7929eb92009-09-03 21:45:59 +0100117 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
118 /* this address was obtained through kmap_high_get() */
119 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400120 }
121 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200122 preempt_enable();
Nicolas Pitred73cd422008-09-15 16:44:55 -0400123}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700124EXPORT_SYMBOL(__kunmap_atomic);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400125
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700126void *kmap_atomic_pfn(unsigned long pfn)
Nicolas Pitred73cd422008-09-15 16:44:55 -0400127{
Nicolas Pitred73cd422008-09-15 16:44:55 -0400128 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700129 int idx, type;
Thomas Petazzoni9ff0bb52014-10-20 19:42:18 +0100130 struct page *page = pfn_to_page(pfn);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400131
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200132 preempt_disable();
Nicolas Pitred73cd422008-09-15 16:44:55 -0400133 pagefault_disable();
Thomas Petazzoni9ff0bb52014-10-20 19:42:18 +0100134 if (!PageHighMem(page))
135 return page_address(page);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400136
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700137 type = kmap_atomic_idx_push();
Stefan Agnera5f4c562015-08-13 00:01:52 +0100138 idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
Liu Hua4221e2e2014-04-18 09:27:01 +0100139 vaddr = __fix_to_virt(idx);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400140#ifdef CONFIG_DEBUG_HIGHMEM
Rob Herring836a2412014-07-02 02:01:15 -0500141 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400142#endif
Liu Huaa05e54c2014-04-18 09:43:32 +0100143 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400144
145 return (void *)vaddr;
146}