Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 2 | /* |
| 3 | * MMU context allocation for 64-bit kernels. |
| 4 | * |
| 5 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 6 | */ |
| 7 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 8 | #include <linux/sched.h> |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/string.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/mm.h> |
Ram Pai | 4fb158f | 2018-01-18 17:50:25 -0800 | [diff] [blame] | 14 | #include <linux/pkeys.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/idr.h> |
Paul Gortmaker | 4b16f8e | 2011-07-22 18:24:23 -0400 | [diff] [blame] | 17 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/gfp.h> |
Tseng-Hui (Frank) Lin | 851d2e2 | 2011-05-02 20:43:04 +0000 | [diff] [blame] | 19 | #include <linux/slab.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 20 | |
| 21 | #include <asm/mmu_context.h> |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 22 | #include <asm/pgalloc.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 23 | |
Anton Blanchard | 7317ac8 | 2010-02-07 12:30:12 +0000 | [diff] [blame] | 24 | static DEFINE_IDA(mmu_context_ida); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 25 | |
Michael Ellerman | c1ff840 | 2017-03-29 22:10:45 +1100 | [diff] [blame] | 26 | static int alloc_context_id(int min_id, int max_id) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 27 | { |
Matthew Wilcox | b3fa641 | 2018-06-18 08:26:32 -0400 | [diff] [blame] | 28 | return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); |
Alexander Graf | e85a471 | 2009-11-02 12:02:30 +0000 | [diff] [blame] | 29 | } |
Michael Ellerman | a336f2f | 2017-03-29 22:00:46 +1100 | [diff] [blame] | 30 | |
Aneesh Kumar K.V | 82228e3 | 2017-03-22 09:07:00 +0530 | [diff] [blame] | 31 | void hash__reserve_context_id(int id) |
| 32 | { |
Matthew Wilcox | b3fa641 | 2018-06-18 08:26:32 -0400 | [diff] [blame] | 33 | int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); |
Aneesh Kumar K.V | 82228e3 | 2017-03-22 09:07:00 +0530 | [diff] [blame] | 34 | |
| 35 | WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); |
| 36 | } |
| 37 | |
Michael Ellerman | a336f2f | 2017-03-29 22:00:46 +1100 | [diff] [blame] | 38 | int hash__alloc_context_id(void) |
| 39 | { |
Aneesh Kumar K.V | e6f81a9 | 2017-03-29 17:21:53 +1100 | [diff] [blame] | 40 | unsigned long max; |
| 41 | |
| 42 | if (mmu_has_feature(MMU_FTR_68_BIT_VA)) |
| 43 | max = MAX_USER_CONTEXT; |
| 44 | else |
| 45 | max = MAX_USER_CONTEXT_65BIT_VA; |
| 46 | |
| 47 | return alloc_context_id(MIN_USER_CONTEXT, max); |
Michael Ellerman | a336f2f | 2017-03-29 22:00:46 +1100 | [diff] [blame] | 48 | } |
| 49 | EXPORT_SYMBOL_GPL(hash__alloc_context_id); |
| 50 | |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 51 | void slb_setup_new_exec(void); |
| 52 | |
Michael Ellerman | 65565a6 | 2019-06-13 15:00:34 +1000 | [diff] [blame] | 53 | static int realloc_context_ids(mm_context_t *ctx) |
| 54 | { |
| 55 | int i, id; |
| 56 | |
| 57 | /* |
| 58 | * id 0 (aka. ctx->id) is special, we always allocate a new one, even if |
| 59 | * there wasn't one allocated previously (which happens in the exec |
| 60 | * case where ctx is newly allocated). |
| 61 | * |
| 62 | * We have to be a bit careful here. We must keep the existing ids in |
| 63 | * the array, so that we can test if they're non-zero to decide if we |
| 64 | * need to allocate a new one. However in case of error we must free the |
| 65 | * ids we've allocated but *not* any of the existing ones (or risk a |
| 66 | * UAF). That's why we decrement i at the start of the error handling |
| 67 | * loop, to skip the id that we just tested but couldn't reallocate. |
| 68 | */ |
| 69 | for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { |
| 70 | if (i == 0 || ctx->extended_id[i]) { |
| 71 | id = hash__alloc_context_id(); |
| 72 | if (id < 0) |
| 73 | goto error; |
| 74 | |
| 75 | ctx->extended_id[i] = id; |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | /* The caller expects us to return id */ |
| 80 | return ctx->id; |
| 81 | |
| 82 | error: |
| 83 | for (i--; i >= 0; i--) { |
| 84 | if (ctx->extended_id[i]) |
| 85 | ida_free(&mmu_context_ida, ctx->extended_id[i]); |
| 86 | } |
| 87 | |
| 88 | return id; |
| 89 | } |
| 90 | |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 91 | static int hash__init_new_context(struct mm_struct *mm) |
Alexander Graf | e85a471 | 2009-11-02 12:02:30 +0000 | [diff] [blame] | 92 | { |
| 93 | int index; |
| 94 | |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 95 | mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), |
| 96 | GFP_KERNEL); |
Michael Ellerman | 65565a6 | 2019-06-13 15:00:34 +1000 | [diff] [blame] | 97 | if (!mm->context.hash_context) |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 98 | return -ENOMEM; |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 99 | |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 100 | /* |
| 101 | * The old code would re-promote on fork, we don't do that when using |
| 102 | * slices as it could cause problem promoting slices that have been |
| 103 | * forced down to 4K. |
| 104 | * |
| 105 | * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check |
| 106 | * explicitly against context.id == 0. This ensures that we properly |
| 107 | * initialize context slice details for newly allocated mm's (which will |
| 108 | * have id == 0) and don't alter context slice inherited via fork (which |
| 109 | * will have id != 0). |
| 110 | * |
| 111 | * We should not be calling init_new_context() on init_mm. Hence a |
| 112 | * check against 0 is OK. |
| 113 | */ |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 114 | if (mm->context.id == 0) { |
| 115 | memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); |
Nicholas Piggin | 1753dd1 | 2018-03-07 11:37:09 +1000 | [diff] [blame] | 116 | slice_init_new_context_exec(mm); |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 117 | } else { |
| 118 | /* This is fork. Copy hash_context details from current->mm */ |
| 119 | memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 120 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
| 121 | /* inherit subpage prot detalis if we have one. */ |
| 122 | if (current->mm->context.hash_context->spt) { |
| 123 | mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), |
| 124 | GFP_KERNEL); |
| 125 | if (!mm->context.hash_context->spt) { |
Aneesh Kumar K.V | ef629cc | 2019-04-17 18:33:51 +0530 | [diff] [blame] | 126 | kfree(mm->context.hash_context); |
| 127 | return -ENOMEM; |
| 128 | } |
| 129 | } |
| 130 | #endif |
Michael Ellerman | 65565a6 | 2019-06-13 15:00:34 +1000 | [diff] [blame] | 131 | } |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 132 | |
Michael Ellerman | 65565a6 | 2019-06-13 15:00:34 +1000 | [diff] [blame] | 133 | index = realloc_context_ids(&mm->context); |
| 134 | if (index < 0) { |
| 135 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
| 136 | kfree(mm->context.hash_context->spt); |
| 137 | #endif |
| 138 | kfree(mm->context.hash_context); |
| 139 | return index; |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 140 | } |
Aneesh Kumar K.V | 7e381c0 | 2016-04-29 23:26:02 +1000 | [diff] [blame] | 141 | |
Ram Pai | 4fb158f | 2018-01-18 17:50:25 -0800 | [diff] [blame] | 142 | pkey_mm_init(mm); |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 143 | return index; |
| 144 | } |
| 145 | |
Nicholas Piggin | 425d331 | 2018-09-15 01:30:55 +1000 | [diff] [blame] | 146 | void hash__setup_new_exec(void) |
| 147 | { |
| 148 | slice_setup_new_exec(); |
Nicholas Piggin | 5434ae7 | 2018-09-15 01:30:56 +1000 | [diff] [blame] | 149 | |
| 150 | slb_setup_new_exec(); |
Nicholas Piggin | 425d331 | 2018-09-15 01:30:55 +1000 | [diff] [blame] | 151 | } |
| 152 | |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 153 | static int radix__init_new_context(struct mm_struct *mm) |
| 154 | { |
| 155 | unsigned long rts_field; |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 156 | int index, max_id; |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 157 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 158 | max_id = (1 << mmu_pid_bits) - 1; |
| 159 | index = alloc_context_id(mmu_base_pid, max_id); |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 160 | if (index < 0) |
| 161 | return index; |
| 162 | |
| 163 | /* |
| 164 | * set the process table entry, |
| 165 | */ |
| 166 | rts_field = radix__get_tree_size(); |
| 167 | process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); |
| 168 | |
Benjamin Herrenschmidt | 3a6a047 | 2017-07-07 16:12:16 -0500 | [diff] [blame] | 169 | /* |
| 170 | * Order the above store with subsequent update of the PID |
| 171 | * register (at which point HW can start loading/caching |
| 172 | * the entry) and the corresponding load by the MMU from |
| 173 | * the L2 cache. |
| 174 | */ |
| 175 | asm volatile("ptesync;isync" : : : "memory"); |
| 176 | |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 177 | mm->context.hash_context = NULL; |
Alistair Popple | 1ab66d1 | 2017-04-03 19:51:44 +1000 | [diff] [blame] | 178 | |
Michael Ellerman | 760573c | 2017-03-29 22:36:56 +1100 | [diff] [blame] | 179 | return index; |
| 180 | } |
| 181 | |
| 182 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 183 | { |
| 184 | int index; |
| 185 | |
| 186 | if (radix_enabled()) |
| 187 | index = radix__init_new_context(mm); |
| 188 | else |
| 189 | index = hash__init_new_context(mm); |
| 190 | |
| 191 | if (index < 0) |
| 192 | return index; |
| 193 | |
Stephen Rothwell | 9dfe5c53 | 2007-08-15 16:33:55 +1000 | [diff] [blame] | 194 | mm->context.id = index; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 195 | |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 196 | mm->context.pte_frag = NULL; |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 197 | mm->context.pmd_frag = NULL; |
Alexey Kardashevskiy | 15b244a | 2015-06-05 16:35:24 +1000 | [diff] [blame] | 198 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Alexey Kardashevskiy | 88f54a3 | 2016-11-30 17:51:59 +1100 | [diff] [blame] | 199 | mm_iommu_init(mm); |
Alexey Kardashevskiy | 15b244a | 2015-06-05 16:35:24 +1000 | [diff] [blame] | 200 | #endif |
Benjamin Herrenschmidt | a619e59 | 2017-07-24 14:28:02 +1000 | [diff] [blame] | 201 | atomic_set(&mm->context.active_cpus, 0); |
Benjamin Herrenschmidt | aff6f8c | 2018-03-23 09:29:05 +1100 | [diff] [blame] | 202 | atomic_set(&mm->context.copros, 0); |
Benjamin Herrenschmidt | a619e59 | 2017-07-24 14:28:02 +1000 | [diff] [blame] | 203 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 204 | return 0; |
| 205 | } |
| 206 | |
Alexander Graf | e85a471 | 2009-11-02 12:02:30 +0000 | [diff] [blame] | 207 | void __destroy_context(int context_id) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 208 | { |
Matthew Wilcox | b3fa641 | 2018-06-18 08:26:32 -0400 | [diff] [blame] | 209 | ida_free(&mmu_context_ida, context_id); |
Alexander Graf | e85a471 | 2009-11-02 12:02:30 +0000 | [diff] [blame] | 210 | } |
| 211 | EXPORT_SYMBOL_GPL(__destroy_context); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 212 | |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 213 | static void destroy_contexts(mm_context_t *ctx) |
| 214 | { |
| 215 | int index, context_id; |
| 216 | |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 217 | for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { |
| 218 | context_id = ctx->extended_id[index]; |
| 219 | if (context_id) |
Matthew Wilcox | b3fa641 | 2018-06-18 08:26:32 -0400 | [diff] [blame] | 220 | ida_free(&mmu_context_ida, context_id); |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 221 | } |
Aneesh Kumar K.V | 7011018 | 2019-04-17 18:33:50 +0530 | [diff] [blame] | 222 | kfree(ctx->hash_context); |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 223 | } |
| 224 | |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 225 | static void pmd_frag_destroy(void *pmd_frag) |
| 226 | { |
| 227 | int count; |
| 228 | struct page *page; |
| 229 | |
| 230 | page = virt_to_page(pmd_frag); |
| 231 | /* drop all the pending references */ |
| 232 | count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; |
| 233 | /* We allow PTE_FRAG_NR fragments from a PTE page */ |
Nicholas Piggin | 4231aba | 2018-07-27 21:48:17 +1000 | [diff] [blame] | 234 | if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) { |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 235 | pgtable_pmd_page_dtor(page); |
Nicholas Piggin | 4231aba | 2018-07-27 21:48:17 +1000 | [diff] [blame] | 236 | __free_page(page); |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 237 | } |
| 238 | } |
| 239 | |
Nicholas Piggin | 34c604d | 2018-07-25 19:54:28 +1000 | [diff] [blame] | 240 | static void destroy_pagetable_cache(struct mm_struct *mm) |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 241 | { |
| 242 | void *frag; |
| 243 | |
| 244 | frag = mm->context.pte_frag; |
| 245 | if (frag) |
| 246 | pte_frag_destroy(frag); |
| 247 | |
| 248 | frag = mm->context.pmd_frag; |
| 249 | if (frag) |
| 250 | pmd_frag_destroy(frag); |
| 251 | return; |
| 252 | } |
| 253 | |
Alexander Graf | e85a471 | 2009-11-02 12:02:30 +0000 | [diff] [blame] | 254 | void destroy_context(struct mm_struct *mm) |
| 255 | { |
Alexey Kardashevskiy | 15b244a | 2015-06-05 16:35:24 +1000 | [diff] [blame] | 256 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
Alexey Kardashevskiy | 4b6fad7 | 2016-11-30 17:52:05 +1100 | [diff] [blame] | 257 | WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); |
Alexey Kardashevskiy | 15b244a | 2015-06-05 16:35:24 +1000 | [diff] [blame] | 258 | #endif |
Aneesh Kumar K.V | 7aec584 | 2019-09-18 19:31:03 +0530 | [diff] [blame] | 259 | /* |
| 260 | * For tasks which were successfully initialized we end up calling |
| 261 | * arch_exit_mmap() which clears the process table entry. And |
| 262 | * arch_exit_mmap() is called before the required fullmm TLB flush |
| 263 | * which does a RIC=2 flush. Hence for an initialized task, we do clear |
| 264 | * any cached process table entries. |
| 265 | * |
| 266 | * The condition below handles the error case during task init. We have |
| 267 | * set the process table entry early and if we fail a task |
| 268 | * initialization, we need to ensure the process table entry is zeroed. |
| 269 | * We need not worry about process table entry caches because the task |
| 270 | * never ran with the PID value. |
| 271 | */ |
Nicholas Piggin | 30b49ec | 2017-10-24 23:06:54 +1000 | [diff] [blame] | 272 | if (radix_enabled()) |
Aneesh Kumar K.V | 7aec584 | 2019-09-18 19:31:03 +0530 | [diff] [blame] | 273 | process_tb[mm->context.id].prtb0 = 0; |
Nicholas Piggin | 30b49ec | 2017-10-24 23:06:54 +1000 | [diff] [blame] | 274 | else |
| 275 | subpage_prot_free(mm); |
Aneesh Kumar K.V | f384796c | 2018-03-26 15:34:48 +0530 | [diff] [blame] | 276 | destroy_contexts(&mm->context); |
Nicholas Piggin | 30b49ec | 2017-10-24 23:06:54 +1000 | [diff] [blame] | 277 | mm->context.id = MMU_NO_CONTEXT; |
| 278 | } |
| 279 | |
| 280 | void arch_exit_mmap(struct mm_struct *mm) |
| 281 | { |
Nicholas Piggin | 34c604d | 2018-07-25 19:54:28 +1000 | [diff] [blame] | 282 | destroy_pagetable_cache(mm); |
| 283 | |
Benjamin Herrenschmidt | c6bb0b8 | 2017-07-08 07:45:32 -0500 | [diff] [blame] | 284 | if (radix_enabled()) { |
| 285 | /* |
| 286 | * Radix doesn't have a valid bit in the process table |
| 287 | * entries. However we know that at least P9 implementation |
| 288 | * will avoid caching an entry with an invalid RTS field, |
| 289 | * and 0 is invalid. So this will do. |
Nicholas Piggin | 30b49ec | 2017-10-24 23:06:54 +1000 | [diff] [blame] | 290 | * |
| 291 | * This runs before the "fullmm" tlb flush in exit_mmap, |
| 292 | * which does a RIC=2 tlbie to clear the process table |
| 293 | * entry. See the "fullmm" comments in tlb-radix.c. |
| 294 | * |
| 295 | * No barrier required here after the store because |
| 296 | * this process will do the invalidate, which starts with |
| 297 | * ptesync. |
Benjamin Herrenschmidt | c6bb0b8 | 2017-07-08 07:45:32 -0500 | [diff] [blame] | 298 | */ |
| 299 | process_tb[mm->context.id].prtb0 = 0; |
Nicholas Piggin | 30b49ec | 2017-10-24 23:06:54 +1000 | [diff] [blame] | 300 | } |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 301 | } |
Aneesh Kumar K.V | 7e381c0 | 2016-04-29 23:26:02 +1000 | [diff] [blame] | 302 | |
| 303 | #ifdef CONFIG_PPC_RADIX_MMU |
| 304 | void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) |
| 305 | { |
Nicholas Piggin | 2bf1071 | 2018-07-05 18:47:00 +1000 | [diff] [blame] | 306 | mtspr(SPRN_PID, next->context.id); |
| 307 | isync(); |
Aneesh Kumar K.V | 7e381c0 | 2016-04-29 23:26:02 +1000 | [diff] [blame] | 308 | } |
| 309 | #endif |