blob: 6b0d4dff50125e49522212cb7e6db1a778da539d [file] [log] [blame]
Catalin Marinas9cce7a42012-03-05 11:49:28 +00001/*
2 * Based on arch/arm/include/asm/processor.h
3 *
4 * Copyright (C) 1995-1999 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PROCESSOR_H
20#define __ASM_PROCESSOR_H
21
Yury Noroveef94a32017-08-31 11:30:50 +030022#define TASK_SIZE_64 (UL(1) << VA_BITS)
23
Robin Murphy51369e32018-02-05 15:34:18 +000024#define KERNEL_DS UL(-1)
25#define USER_DS (TASK_SIZE_64 - 1)
26
Ard Biesheuvel26a46762018-11-07 18:10:38 +010027/*
28 * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
29 * no point in shifting all network buffers by 2 bytes just to make some IP
30 * header fields appear aligned in memory, potentially sacrificing some DMA
31 * performance on some platforms.
32 */
33#define NET_IP_ALIGN 0
34
Yury Noroveef94a32017-08-31 11:30:50 +030035#ifndef __ASSEMBLY__
Catalin Marinas9cce7a42012-03-05 11:49:28 +000036#ifdef __KERNEL__
37
Dave Martin65896542018-03-28 10:50:49 +010038#include <linux/build_bug.h>
Dave Martin94b07c12018-06-01 11:10:14 +010039#include <linux/cache.h>
40#include <linux/init.h>
Dave Martin65896542018-03-28 10:50:49 +010041#include <linux/stddef.h>
Catalin Marinas9cce7a42012-03-05 11:49:28 +000042#include <linux/string.h>
43
Will Deaconcd5e10b2016-02-02 12:46:23 +000044#include <asm/alternative.h>
Dave Martinc0cda3b2018-03-26 15:12:28 +010045#include <asm/cpufeature.h>
Catalin Marinas9cce7a42012-03-05 11:49:28 +000046#include <asm/hw_breakpoint.h>
Will Deaconafb83cc2016-02-10 10:07:30 +000047#include <asm/lse.h>
Paul Walmsley2ec45602015-01-05 17:38:41 -070048#include <asm/pgtable-hwdef.h>
Catalin Marinas9cce7a42012-03-05 11:49:28 +000049#include <asm/ptrace.h>
50#include <asm/types.h>
51
Yury Noroveef94a32017-08-31 11:30:50 +030052/*
53 * TASK_SIZE - the maximum size of a user space task.
54 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
55 */
56#ifdef CONFIG_COMPAT
57#define TASK_SIZE_32 UL(0x100000000)
58#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
59 TASK_SIZE_32 : TASK_SIZE_64)
60#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
61 TASK_SIZE_32 : TASK_SIZE_64)
62#else
63#define TASK_SIZE TASK_SIZE_64
64#endif /* CONFIG_COMPAT */
65
66#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
67
Catalin Marinas9cce7a42012-03-05 11:49:28 +000068#define STACK_TOP_MAX TASK_SIZE_64
69#ifdef CONFIG_COMPAT
70#define AARCH32_VECTORS_BASE 0xffff0000
71#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
72 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
73#else
74#define STACK_TOP STACK_TOP_MAX
75#endif /* CONFIG_COMPAT */
Will Deaconf483a852012-11-08 16:00:16 +000076
Catalin Marinasa1e50a82015-02-05 18:01:53 +000077extern phys_addr_t arm64_dma_phys_limit;
78#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
Catalin Marinas9cce7a42012-03-05 11:49:28 +000079
80struct debug_info {
Chris Redmonfda89d92017-03-16 18:10:43 -040081#ifdef CONFIG_HAVE_HW_BREAKPOINT
Catalin Marinas9cce7a42012-03-05 11:49:28 +000082 /* Have we suspended stepping by a debugger? */
83 int suspended_step;
84 /* Allow breakpoints and watchpoints to be disabled for this thread. */
85 int bps_disabled;
86 int wps_disabled;
87 /* Hardware breakpoints pinned to this task. */
88 struct perf_event *hbp_break[ARM_MAX_BRP];
89 struct perf_event *hbp_watch[ARM_MAX_WRP];
Chris Redmonfda89d92017-03-16 18:10:43 -040090#endif
Catalin Marinas9cce7a42012-03-05 11:49:28 +000091};
92
93struct cpu_context {
94 unsigned long x19;
95 unsigned long x20;
96 unsigned long x21;
97 unsigned long x22;
98 unsigned long x23;
99 unsigned long x24;
100 unsigned long x25;
101 unsigned long x26;
102 unsigned long x27;
103 unsigned long x28;
104 unsigned long fp;
105 unsigned long sp;
106 unsigned long pc;
107};
108
109struct thread_struct {
110 struct cpu_context cpu_context; /* cpu context */
Dave Martin65896542018-03-28 10:50:49 +0100111
112 /*
113 * Whitelisted fields for hardened usercopy:
114 * Maintainers must ensure manually that this contains no
115 * implicit padding.
116 */
117 struct {
118 unsigned long tp_value; /* TLS register */
119 unsigned long tp2_value;
120 struct user_fpsimd_state fpsimd_state;
121 } uw;
122
Dave Martin20b85472018-03-28 10:50:48 +0100123 unsigned int fpsimd_cpu;
Dave Martinbc0ee472017-10-31 15:51:05 +0000124 void *sve_state; /* SVE registers, if any */
125 unsigned int sve_vl; /* SVE vector length */
Dave Martin79ab0472017-10-31 15:51:06 +0000126 unsigned int sve_vl_onexec; /* SVE vl after next exec */
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000127 unsigned long fault_address; /* fault info */
Catalin Marinas91413002014-04-06 23:04:12 +0100128 unsigned long fault_code; /* ESR_EL1 value */
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000129 struct debug_info debug; /* debugging */
130};
131
Kees Cook9e8084d2017-08-16 14:05:09 -0700132static inline void arch_thread_struct_whitelist(unsigned long *offset,
133 unsigned long *size)
134{
Dave Martin65896542018-03-28 10:50:49 +0100135 /* Verify that there is no padding among the whitelisted fields: */
136 BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
137 sizeof_field(struct thread_struct, uw.tp_value) +
138 sizeof_field(struct thread_struct, uw.tp2_value) +
139 sizeof_field(struct thread_struct, uw.fpsimd_state));
140
141 *offset = offsetof(struct thread_struct, uw);
142 *size = sizeof_field(struct thread_struct, uw);
Kees Cook9e8084d2017-08-16 14:05:09 -0700143}
144
Will Deacond00a3812015-05-27 15:39:40 +0100145#ifdef CONFIG_COMPAT
146#define task_user_tls(t) \
147({ \
148 unsigned long *__tls; \
149 if (is_compat_thread(task_thread_info(t))) \
Dave Martin65896542018-03-28 10:50:49 +0100150 __tls = &(t)->thread.uw.tp2_value; \
Will Deacond00a3812015-05-27 15:39:40 +0100151 else \
Dave Martin65896542018-03-28 10:50:49 +0100152 __tls = &(t)->thread.uw.tp_value; \
Will Deacond00a3812015-05-27 15:39:40 +0100153 __tls; \
154 })
155#else
Dave Martin65896542018-03-28 10:50:49 +0100156#define task_user_tls(t) (&(t)->thread.uw.tp_value)
Will Deacond00a3812015-05-27 15:39:40 +0100157#endif
158
Dave Martin936eb652017-06-21 16:00:44 +0100159/* Sync TPIDR_EL0 back to thread_struct for current */
160void tls_preserve_current_state(void);
161
Dave Martindf3fb962018-05-21 19:08:15 +0100162#define INIT_THREAD { \
163 .fpsimd_cpu = NR_CPUS, \
164}
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000165
166static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
167{
168 memset(regs, 0, sizeof(*regs));
Dave Martin17c28952017-08-01 15:35:54 +0100169 forget_syscall(regs);
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000170 regs->pc = pc;
171}
172
173static inline void start_thread(struct pt_regs *regs, unsigned long pc,
174 unsigned long sp)
175{
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000176 start_thread_common(regs, pc);
177 regs->pstate = PSR_MODE_EL0t;
Will Deacon8f04e8e2018-08-07 13:47:06 +0100178
179 if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
180 regs->pstate |= PSR_SSBS_BIT;
181
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000182 regs->sp = sp;
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000183}
184
185#ifdef CONFIG_COMPAT
186static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
187 unsigned long sp)
188{
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000189 start_thread_common(regs, pc);
Mark Rutlandd64567f2018-07-05 15:16:52 +0100190 regs->pstate = PSR_AA32_MODE_USR;
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000191 if (pc & 1)
Mark Rutlandd64567f2018-07-05 15:16:52 +0100192 regs->pstate |= PSR_AA32_T_BIT;
Will Deacona795a382013-10-11 14:52:12 +0100193
194#ifdef __AARCH64EB__
Mark Rutlandd64567f2018-07-05 15:16:52 +0100195 regs->pstate |= PSR_AA32_E_BIT;
Will Deacona795a382013-10-11 14:52:12 +0100196#endif
197
Will Deacon8f04e8e2018-08-07 13:47:06 +0100198 if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
199 regs->pstate |= PSR_AA32_SSBS_BIT;
200
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000201 regs->compat_sp = sp;
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000202}
203#endif
204
205/* Forward declaration, a strange C thing */
206struct task_struct;
207
208/* Free all resources held by a thread. */
209extern void release_thread(struct task_struct *);
210
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000211unsigned long get_wchan(struct task_struct *p);
212
Peter Crosthwaite1baa82f2015-03-02 19:19:14 +0000213static inline void cpu_relax(void)
214{
215 asm volatile("yield" ::: "memory");
216}
217
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000218/* Thread switching */
219extern struct task_struct *cpu_switch_to(struct task_struct *prev,
220 struct task_struct *next);
221
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000222#define task_pt_regs(p) \
Ard Biesheuvel34be98f2017-07-20 17:15:45 +0100223 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000224
Catalin Marinasebe61522014-07-10 11:37:40 +0100225#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
Will Deacon3168a742014-08-29 16:11:10 +0100226#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000227
228/*
229 * Prefetching support
230 */
231#define ARCH_HAS_PREFETCH
232static inline void prefetch(const void *ptr)
233{
234 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
235}
236
237#define ARCH_HAS_PREFETCHW
238static inline void prefetchw(const void *ptr)
239{
240 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
241}
242
243#define ARCH_HAS_SPINLOCK_PREFETCH
Will Deaconcd5e10b2016-02-02 12:46:23 +0000244static inline void spin_lock_prefetch(const void *ptr)
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000245{
Will Deaconcd5e10b2016-02-02 12:46:23 +0000246 asm volatile(ARM64_LSE_ATOMIC_INSN(
247 "prfm pstl1strm, %a0",
248 "nop") : : "p" (ptr));
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000249}
250
251#define HAVE_ARCH_PICK_MMAP_LAYOUT
252
253#endif
254
Dave Martin94b07c12018-06-01 11:10:14 +0100255extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
256extern void __init minsigstksz_setup(void);
257
Dave Martin9a6e5942018-04-12 17:32:35 +0100258/*
259 * Not at the top of the file due to a direct #include cycle between
260 * <asm/fpsimd.h> and <asm/processor.h>. Deferring this #include
261 * ensures that contents of processor.h are visible to fpsimd.h even if
262 * processor.h is included first.
263 *
264 * These prctl helpers are the only things in this file that require
265 * fpsimd.h. The core code expects them to be in this header.
266 */
267#include <asm/fpsimd.h>
268
Dave Martin2d2123b2017-10-31 15:51:14 +0000269/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
270#define SVE_SET_VL(arg) sve_set_current_vl(arg)
271#define SVE_GET_VL() sve_get_current_vl()
272
Laura Abbott0b3e3362018-07-20 14:41:54 -0700273/*
274 * For CONFIG_GCC_PLUGIN_STACKLEAK
275 *
276 * These need to be macros because otherwise we get stuck in a nightmare
277 * of header definitions for the use of task_stack_page.
278 */
279
280#define current_top_of_stack() \
281({ \
282 struct stack_info _info; \
283 BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
284 _info.high; \
285})
286#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
287
Yury Noroveef94a32017-08-31 11:30:50 +0300288#endif /* __ASSEMBLY__ */
Catalin Marinas9cce7a42012-03-05 11:49:28 +0000289#endif /* __ASM_PROCESSOR_H */