blob: d6b74883fd1f8e61e1ef50bd2d564bed5de71903 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Vineet Gupta45890f62015-03-09 18:53:49 +05302/*
3 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta45890f62015-03-09 18:53:49 +05304 */
5
Mike Rapoport57c8a662018-10-30 15:09:49 -07006#include <linux/memblock.h>
Vineet Gupta45890f62015-03-09 18:53:49 +05307#include <linux/export.h>
8#include <linux/highmem.h>
Mike Rapoportca5999f2020-06-08 21:32:38 -07009#include <linux/pgtable.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070010#include <asm/processor.h>
Vineet Gupta45890f62015-03-09 18:53:49 +053011#include <asm/pgalloc.h>
12#include <asm/tlbflush.h>
13
14/*
15 * HIGHMEM API:
16 *
Adam Buchbinder7423cc02016-02-23 15:24:55 -080017 * kmap() API provides sleep semantics hence referred to as "permanent maps"
Vineet Gupta45890f62015-03-09 18:53:49 +053018 * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
19 * for book-keeping
20 *
21 * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
22 * shortlived ala "temporary mappings" which historically were implemented as
23 * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
24 *
25 * Both these facts combined (preemption disabled and per-cpu allocation)
26 * means the total number of concurrent fixmaps will be limited to max
27 * such allocations in a single control path. Thus KM_TYPE_NR (another
28 * historic relic) is a small'ish number which caps max percpu fixmaps
29 *
30 * ARC HIGHMEM Details
31 *
32 * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
33 * is now shared between vmalloc and kmap (non overlapping though)
34 *
35 * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
36 * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
37 * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
38 *
39 * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
40 * slots across NR_CPUS would be more than sufficient (generic code defines
41 * KM_TYPE_NR as 20).
42 *
43 * - pkmap being preemptible, in theory could do with more than 256 concurrent
44 * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
45 * the PGD and only works with a single page table @pkmap_page_table, hence
46 * sets the limit
47 */
48
49extern pte_t * pkmap_page_table;
50static pte_t * fixmap_page_table;
51
Ira Weiny20b271d2020-06-04 16:47:58 -070052void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
Vineet Gupta45890f62015-03-09 18:53:49 +053053{
54 int idx, cpu_idx;
55 unsigned long vaddr;
56
Vineet Gupta45890f62015-03-09 18:53:49 +053057 cpu_idx = kmap_atomic_idx_push();
58 idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
59 vaddr = FIXMAP_ADDR(idx);
60
61 set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
Ira Weiny20b271d2020-06-04 16:47:58 -070062 mk_pte(page, prot));
Vineet Gupta45890f62015-03-09 18:53:49 +053063
64 return (void *)vaddr;
65}
Ira Weiny20b271d2020-06-04 16:47:58 -070066EXPORT_SYMBOL(kmap_atomic_high_prot);
Vineet Gupta45890f62015-03-09 18:53:49 +053067
Ira Weinyabca2502020-06-04 16:47:46 -070068void kunmap_atomic_high(void *kv)
Vineet Gupta45890f62015-03-09 18:53:49 +053069{
70 unsigned long kvaddr = (unsigned long)kv;
71
72 if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
73
74 /*
75 * Because preemption is disabled, this vaddr can be associated
76 * with the current allocated index.
77 * But in case of multiple live kmap_atomic(), it still relies on
78 * callers to unmap in right order.
79 */
80 int cpu_idx = kmap_atomic_idx();
81 int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
82
83 WARN_ON(kvaddr != FIXMAP_ADDR(idx));
84
85 pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
86 local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
87
88 kmap_atomic_idx_pop();
89 }
Vineet Gupta45890f62015-03-09 18:53:49 +053090}
Ira Weinyabca2502020-06-04 16:47:46 -070091EXPORT_SYMBOL(kunmap_atomic_high);
Vineet Gupta45890f62015-03-09 18:53:49 +053092
Vineet Gupta899cfd22015-12-19 13:43:34 +053093static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
Vineet Gupta45890f62015-03-09 18:53:49 +053094{
95 pgd_t *pgd_k;
Vineet Gupta6aae3422019-11-30 17:51:06 -080096 p4d_t *p4d_k;
Vineet Gupta45890f62015-03-09 18:53:49 +053097 pud_t *pud_k;
98 pmd_t *pmd_k;
99 pte_t *pte_k;
100
101 pgd_k = pgd_offset_k(kvaddr);
Vineet Gupta6aae3422019-11-30 17:51:06 -0800102 p4d_k = p4d_offset(pgd_k, kvaddr);
103 pud_k = pud_offset(p4d_k, kvaddr);
Vineet Gupta45890f62015-03-09 18:53:49 +0530104 pmd_k = pmd_offset(pud_k, kvaddr);
105
Mike Rapoporte8625dc2018-10-30 15:08:54 -0700106 pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700107 if (!pte_k)
108 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
109 __func__, PAGE_SIZE, PAGE_SIZE);
110
Vineet Gupta45890f62015-03-09 18:53:49 +0530111 pmd_populate_kernel(&init_mm, pmd_k, pte_k);
112 return pte_k;
113}
114
Vineet Gupta899cfd22015-12-19 13:43:34 +0530115void __init kmap_init(void)
Vineet Gupta45890f62015-03-09 18:53:49 +0530116{
117 /* Due to recursive include hell, we can't do this in processor.h */
118 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
119
120 BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
121 pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
122
123 BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
124 fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
125}