blob: b219796a40818eaa237292477d38f22be02b8afb [file] [log] [blame]
Catalin Marinas53631b52012-03-05 11:49:32 +00001/*
2 * FP/SIMD context switching and fault handling
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Dave Martin7582e222017-10-31 15:51:08 +000020#include <linux/bitmap.h>
Dave Martind06b76b2018-09-28 14:39:10 +010021#include <linux/bitops.h>
Dave Martincb84d112017-08-03 17:23:23 +010022#include <linux/bottom_half.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000023#include <linux/bug.h>
Dave Martin7582e222017-10-31 15:51:08 +000024#include <linux/cache.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000025#include <linux/compat.h>
Janet Liu32365e62015-06-11 12:02:45 +080026#include <linux/cpu.h>
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +010027#include <linux/cpu_pm.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000028#include <linux/kernel.h>
Dave Martin94ef7ec2017-10-31 15:50:54 +000029#include <linux/linkage.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000030#include <linux/irqflags.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000031#include <linux/init.h>
Dave Martincb84d112017-08-03 17:23:23 +010032#include <linux/percpu.h>
Dave Martin2d2123b2017-10-31 15:51:14 +000033#include <linux/prctl.h>
Dave Martin4328825d2017-08-03 17:23:22 +010034#include <linux/preempt.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000035#include <linux/ptrace.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010036#include <linux/sched/signal.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000037#include <linux/sched/task_stack.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000038#include <linux/signal.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000039#include <linux/slab.h>
Dave Martin31dc52b2018-04-12 16:47:20 +010040#include <linux/stddef.h>
Dave Martin4ffa09a2017-10-31 15:51:15 +000041#include <linux/sysctl.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000042
Dave Martinaf4a81b2018-03-01 17:44:07 +000043#include <asm/esr.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000044#include <asm/fpsimd.h>
Dave Martinc0cda3b2018-03-26 15:12:28 +010045#include <asm/cpufeature.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000046#include <asm/cputype.h>
Dave Martin2cf97d42018-04-12 17:04:39 +010047#include <asm/processor.h>
Dave Martin4328825d2017-08-03 17:23:22 +010048#include <asm/simd.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000049#include <asm/sigcontext.h>
50#include <asm/sysreg.h>
51#include <asm/traps.h>
Dave Martind06b76b2018-09-28 14:39:10 +010052#include <asm/virt.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000053
54#define FPEXC_IOF (1 << 0)
55#define FPEXC_DZF (1 << 1)
56#define FPEXC_OFF (1 << 2)
57#define FPEXC_UFF (1 << 3)
58#define FPEXC_IXF (1 << 4)
59#define FPEXC_IDF (1 << 7)
60
61/*
Dave Martinbc0ee472017-10-31 15:51:05 +000062 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
63 *
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020064 * In order to reduce the number of times the FPSIMD state is needlessly saved
65 * and restored, we need to keep track of two things:
66 * (a) for each task, we need to remember which CPU was the last one to have
67 * the task's FPSIMD state loaded into its FPSIMD registers;
68 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
69 * been loaded into its FPSIMD registers most recently, or whether it has
70 * been used to perform kernel mode NEON in the meantime.
71 *
Dave Martin20b85472018-03-28 10:50:48 +010072 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
Adam Buchbinderef769e32016-02-24 09:52:41 -080073 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020074 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
75 * address of the userland FPSIMD state of the task that was loaded onto the CPU
76 * the most recently, or NULL if kernel mode NEON has been performed after that.
77 *
78 * With this in place, we no longer have to restore the next FPSIMD state right
79 * when switching between tasks. Instead, we can defer this check to userland
80 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
Dave Martin20b85472018-03-28 10:50:48 +010081 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020082 * can omit the FPSIMD restore.
83 *
84 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
85 * indicate whether or not the userland FPSIMD state of the current task is
86 * present in the registers. The flag is set unless the FPSIMD registers of this
87 * CPU currently contain the most recent userland FPSIMD state of the current
88 * task.
89 *
Dave Martincb84d112017-08-03 17:23:23 +010090 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
91 * save the task's FPSIMD context back to task_struct from softirq context.
92 * To prevent this from racing with the manipulation of the task's FPSIMD state
93 * from task context and thereby corrupting the state, it is necessary to
94 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
95 * flag with local_bh_disable() unless softirqs are already masked.
96 *
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020097 * For a certain task, the sequence may look something like this:
Dave Martin20b85472018-03-28 10:50:48 +010098 * - the task gets scheduled in; if both the task's fpsimd_cpu field
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020099 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
100 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
101 * cleared, otherwise it is set;
102 *
103 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
104 * userland FPSIMD state is copied from memory to the registers, the task's
Dave Martin20b85472018-03-28 10:50:48 +0100105 * fpsimd_cpu field is set to the id of the current CPU, the current
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200106 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
107 * TIF_FOREIGN_FPSTATE flag is cleared;
108 *
109 * - the task executes an ordinary syscall; upon return to userland, the
110 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
111 * restored;
112 *
113 * - the task executes a syscall which executes some NEON instructions; this is
114 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
115 * register contents to memory, clears the fpsimd_last_state per-cpu variable
116 * and sets the TIF_FOREIGN_FPSTATE flag;
117 *
118 * - the task gets preempted after kernel_neon_end() is called; as we have not
119 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
120 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
121 */
Dave Martincb968af2017-12-06 16:45:47 +0000122struct fpsimd_last_state_struct {
Dave Martin20b85472018-03-28 10:50:48 +0100123 struct user_fpsimd_state *st;
Dave Martincb968af2017-12-06 16:45:47 +0000124};
125
126static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200127
Dave Martin79ab0472017-10-31 15:51:06 +0000128/* Default VL for tasks that don't set it explicitly: */
Dave Martin2e0f2472017-10-31 15:51:10 +0000129static int sve_default_vl = -1;
Dave Martin79ab0472017-10-31 15:51:06 +0000130
Dave Martin7582e222017-10-31 15:51:08 +0000131#ifdef CONFIG_ARM64_SVE
132
133/* Maximum supported vector length across all CPUs (initially poisoned) */
Dave Martin87c021a2018-06-01 11:10:13 +0100134int __ro_after_init sve_max_vl = SVE_VL_MIN;
Dave Martind06b76b2018-09-28 14:39:10 +0100135int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
Dave Martin7582e222017-10-31 15:51:08 +0000136/* Set of available vector lengths, as vq_to_bit(vq): */
Dave Martin2e0f2472017-10-31 15:51:10 +0000137static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
Dave Martind06b76b2018-09-28 14:39:10 +0100138/* Set of vector lengths present on at least one cpu: */
139static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
Dave Martinfdfa9762017-10-31 15:51:12 +0000140static void __percpu *efi_sve_state;
Dave Martin7582e222017-10-31 15:51:08 +0000141
142#else /* ! CONFIG_ARM64_SVE */
143
144/* Dummy declaration for code that will be optimised out: */
Dave Martin2e0f2472017-10-31 15:51:10 +0000145extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
Dave Martind06b76b2018-09-28 14:39:10 +0100146extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
Dave Martinfdfa9762017-10-31 15:51:12 +0000147extern void __percpu *efi_sve_state;
Dave Martin7582e222017-10-31 15:51:08 +0000148
149#endif /* ! CONFIG_ARM64_SVE */
150
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200151/*
Dave Martinbc0ee472017-10-31 15:51:05 +0000152 * Call __sve_free() directly only if you know task can't be scheduled
153 * or preempted.
154 */
155static void __sve_free(struct task_struct *task)
156{
157 kfree(task->thread.sve_state);
158 task->thread.sve_state = NULL;
159}
160
161static void sve_free(struct task_struct *task)
162{
163 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
164
165 __sve_free(task);
166}
167
Dave Martinbc0ee472017-10-31 15:51:05 +0000168/*
169 * TIF_SVE controls whether a task can use SVE without trapping while
170 * in userspace, and also the way a task's FPSIMD/SVE state is stored
171 * in thread_struct.
172 *
173 * The kernel uses this flag to track whether a user task is actively
174 * using SVE, and therefore whether full SVE register state needs to
175 * be tracked. If not, the cheaper FPSIMD context handling code can
176 * be used instead of the more costly SVE equivalents.
177 *
178 * * TIF_SVE set:
179 *
180 * The task can execute SVE instructions while in userspace without
181 * trapping to the kernel.
182 *
183 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
184 * corresponding Zn), P0-P15 and FFR are encoded in in
185 * task->thread.sve_state, formatted appropriately for vector
186 * length task->thread.sve_vl.
187 *
188 * task->thread.sve_state must point to a valid buffer at least
189 * sve_state_size(task) bytes in size.
190 *
191 * During any syscall, the kernel may optionally clear TIF_SVE and
192 * discard the vector state except for the FPSIMD subset.
193 *
194 * * TIF_SVE clear:
195 *
196 * An attempt by the user task to execute an SVE instruction causes
197 * do_sve_acc() to be called, which does some preparation and then
198 * sets TIF_SVE.
199 *
200 * When stored, FPSIMD registers V0-V31 are encoded in
Dave Martin65896542018-03-28 10:50:49 +0100201 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
Dave Martinbc0ee472017-10-31 15:51:05 +0000202 * logically zero but not stored anywhere; P0-P15 and FFR are not
203 * stored and have unspecified values from userspace's point of
204 * view. For hygiene purposes, the kernel zeroes them on next use,
205 * but userspace is discouraged from relying on this.
206 *
207 * task->thread.sve_state does not need to be non-NULL, valid or any
208 * particular size: it must not be dereferenced.
209 *
Dave Martin65896542018-03-28 10:50:49 +0100210 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
211 * irrespective of whether TIF_SVE is clear or set, since these are
212 * not vector length dependent.
Dave Martinbc0ee472017-10-31 15:51:05 +0000213 */
214
215/*
216 * Update current's FPSIMD/SVE registers from thread_struct.
217 *
218 * This function should be called only when the FPSIMD/SVE state in
219 * thread_struct is known to be up to date, when preparing to enter
220 * userspace.
221 *
222 * Softirqs (and preemption) must be disabled.
223 */
224static void task_fpsimd_load(void)
225{
226 WARN_ON(!in_softirq() && !irqs_disabled());
227
228 if (system_supports_sve() && test_thread_flag(TIF_SVE))
Dave Martin2cf97d42018-04-12 17:04:39 +0100229 sve_load_state(sve_pffr(&current->thread),
Dave Martin65896542018-03-28 10:50:49 +0100230 &current->thread.uw.fpsimd_state.fpsr,
Dave Martinbc0ee472017-10-31 15:51:05 +0000231 sve_vq_from_vl(current->thread.sve_vl) - 1);
232 else
Dave Martin65896542018-03-28 10:50:49 +0100233 fpsimd_load_state(&current->thread.uw.fpsimd_state);
Dave Martinbc0ee472017-10-31 15:51:05 +0000234}
235
236/*
Dave Martind1797612018-04-06 14:55:59 +0100237 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
238 * date with respect to the CPU registers.
Dave Martinbc0ee472017-10-31 15:51:05 +0000239 *
240 * Softirqs (and preemption) must be disabled.
241 */
Dave Martine6b673b2018-04-06 14:55:59 +0100242void fpsimd_save(void)
Dave Martinbc0ee472017-10-31 15:51:05 +0000243{
Dave Martind1797612018-04-06 14:55:59 +0100244 struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
Dave Martine6b673b2018-04-06 14:55:59 +0100245 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
Dave Martind1797612018-04-06 14:55:59 +0100246
Dave Martinbc0ee472017-10-31 15:51:05 +0000247 WARN_ON(!in_softirq() && !irqs_disabled());
248
249 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
250 if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
251 if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
252 /*
253 * Can't save the user regs, so current would
254 * re-enter user with corrupt state.
255 * There's no way to recover, so kill it:
256 */
Dave Martinaf40ff62018-03-08 17:41:05 +0000257 force_signal_inject(SIGKILL, SI_KERNEL, 0);
Dave Martinbc0ee472017-10-31 15:51:05 +0000258 return;
259 }
260
Dave Martin2cf97d42018-04-12 17:04:39 +0100261 sve_save_state(sve_pffr(&current->thread), &st->fpsr);
Dave Martinbc0ee472017-10-31 15:51:05 +0000262 } else
Dave Martind1797612018-04-06 14:55:59 +0100263 fpsimd_save_state(st);
Dave Martinbc0ee472017-10-31 15:51:05 +0000264 }
265}
266
Dave Martin7582e222017-10-31 15:51:08 +0000267/*
268 * Helpers to translate bit indices in sve_vq_map to VQ values (and
269 * vice versa). This allows find_next_bit() to be used to find the
270 * _maximum_ VQ not exceeding a certain value.
271 */
272
273static unsigned int vq_to_bit(unsigned int vq)
274{
275 return SVE_VQ_MAX - vq;
276}
277
278static unsigned int bit_to_vq(unsigned int bit)
279{
280 if (WARN_ON(bit >= SVE_VQ_MAX))
281 bit = SVE_VQ_MAX - 1;
282
283 return SVE_VQ_MAX - bit;
284}
285
286/*
287 * All vector length selection from userspace comes through here.
288 * We're on a slow path, so some sanity-checks are included.
289 * If things go wrong there's a bug somewhere, but try to fall back to a
290 * safe choice.
291 */
292static unsigned int find_supported_vector_length(unsigned int vl)
293{
294 int bit;
295 int max_vl = sve_max_vl;
296
297 if (WARN_ON(!sve_vl_valid(vl)))
298 vl = SVE_VL_MIN;
299
300 if (WARN_ON(!sve_vl_valid(max_vl)))
301 max_vl = SVE_VL_MIN;
302
303 if (vl > max_vl)
304 vl = max_vl;
305
306 bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
307 vq_to_bit(sve_vq_from_vl(vl)));
308 return sve_vl_from_vq(bit_to_vq(bit));
309}
310
Dave Martin4ffa09a2017-10-31 15:51:15 +0000311#ifdef CONFIG_SYSCTL
312
313static int sve_proc_do_default_vl(struct ctl_table *table, int write,
314 void __user *buffer, size_t *lenp,
315 loff_t *ppos)
316{
317 int ret;
318 int vl = sve_default_vl;
319 struct ctl_table tmp_table = {
320 .data = &vl,
321 .maxlen = sizeof(vl),
322 };
323
324 ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
325 if (ret || !write)
326 return ret;
327
328 /* Writing -1 has the special meaning "set to max": */
Dave Martin87c021a2018-06-01 11:10:13 +0100329 if (vl == -1)
330 vl = sve_max_vl;
Dave Martin4ffa09a2017-10-31 15:51:15 +0000331
332 if (!sve_vl_valid(vl))
333 return -EINVAL;
334
Dave Martin87c021a2018-06-01 11:10:13 +0100335 sve_default_vl = find_supported_vector_length(vl);
Dave Martin4ffa09a2017-10-31 15:51:15 +0000336 return 0;
337}
338
339static struct ctl_table sve_default_vl_table[] = {
340 {
341 .procname = "sve_default_vector_length",
342 .mode = 0644,
343 .proc_handler = sve_proc_do_default_vl,
344 },
345 { }
346};
347
348static int __init sve_sysctl_init(void)
349{
350 if (system_supports_sve())
351 if (!register_sysctl("abi", sve_default_vl_table))
352 return -EINVAL;
353
354 return 0;
355}
356
357#else /* ! CONFIG_SYSCTL */
358static int __init sve_sysctl_init(void) { return 0; }
359#endif /* ! CONFIG_SYSCTL */
360
Dave Martinbc0ee472017-10-31 15:51:05 +0000361#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
362 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
363
364/*
Dave Martin65896542018-03-28 10:50:49 +0100365 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
Dave Martinbc0ee472017-10-31 15:51:05 +0000366 * task->thread.sve_state.
367 *
368 * Task can be a non-runnable task, or current. In the latter case,
369 * softirqs (and preemption) must be disabled.
370 * task->thread.sve_state must point to at least sve_state_size(task)
371 * bytes of allocated kernel memory.
Dave Martin65896542018-03-28 10:50:49 +0100372 * task->thread.uw.fpsimd_state must be up to date before calling this
373 * function.
Dave Martinbc0ee472017-10-31 15:51:05 +0000374 */
375static void fpsimd_to_sve(struct task_struct *task)
376{
377 unsigned int vq;
378 void *sst = task->thread.sve_state;
Dave Martin65896542018-03-28 10:50:49 +0100379 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
Dave Martinbc0ee472017-10-31 15:51:05 +0000380 unsigned int i;
381
382 if (!system_supports_sve())
383 return;
384
385 vq = sve_vq_from_vl(task->thread.sve_vl);
386 for (i = 0; i < 32; ++i)
387 memcpy(ZREG(sst, vq, i), &fst->vregs[i],
388 sizeof(fst->vregs[i]));
389}
390
Dave Martin8cd969d2017-10-31 15:51:07 +0000391/*
392 * Transfer the SVE state in task->thread.sve_state to
Dave Martin65896542018-03-28 10:50:49 +0100393 * task->thread.uw.fpsimd_state.
Dave Martin8cd969d2017-10-31 15:51:07 +0000394 *
395 * Task can be a non-runnable task, or current. In the latter case,
396 * softirqs (and preemption) must be disabled.
397 * task->thread.sve_state must point to at least sve_state_size(task)
398 * bytes of allocated kernel memory.
399 * task->thread.sve_state must be up to date before calling this function.
400 */
401static void sve_to_fpsimd(struct task_struct *task)
402{
403 unsigned int vq;
404 void const *sst = task->thread.sve_state;
Dave Martin65896542018-03-28 10:50:49 +0100405 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
Dave Martin8cd969d2017-10-31 15:51:07 +0000406 unsigned int i;
407
408 if (!system_supports_sve())
409 return;
410
411 vq = sve_vq_from_vl(task->thread.sve_vl);
412 for (i = 0; i < 32; ++i)
413 memcpy(&fst->vregs[i], ZREG(sst, vq, i),
414 sizeof(fst->vregs[i]));
415}
416
Dave Martinbc0ee472017-10-31 15:51:05 +0000417#ifdef CONFIG_ARM64_SVE
418
419/*
420 * Return how many bytes of memory are required to store the full SVE
421 * state for task, given task's currently configured vector length.
422 */
423size_t sve_state_size(struct task_struct const *task)
424{
425 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
426}
427
428/*
429 * Ensure that task->thread.sve_state is allocated and sufficiently large.
430 *
431 * This function should be used only in preparation for replacing
432 * task->thread.sve_state with new data. The memory is always zeroed
433 * here to prevent stale data from showing through: this is done in
434 * the interest of testability and predictability: except in the
435 * do_sve_acc() case, there is no ABI requirement to hide stale data
436 * written previously be task.
437 */
438void sve_alloc(struct task_struct *task)
439{
440 if (task->thread.sve_state) {
441 memset(task->thread.sve_state, 0, sve_state_size(current));
442 return;
443 }
444
445 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
446 task->thread.sve_state =
447 kzalloc(sve_state_size(task), GFP_KERNEL);
448
449 /*
450 * If future SVE revisions can have larger vectors though,
451 * this may cease to be true:
452 */
453 BUG_ON(!task->thread.sve_state);
454}
455
Dave Martin43d4da2c42017-10-31 15:51:13 +0000456
457/*
458 * Ensure that task->thread.sve_state is up to date with respect to
459 * the user task, irrespective of when SVE is in use or not.
460 *
461 * This should only be called by ptrace. task must be non-runnable.
462 * task->thread.sve_state must point to at least sve_state_size(task)
463 * bytes of allocated kernel memory.
464 */
465void fpsimd_sync_to_sve(struct task_struct *task)
466{
467 if (!test_tsk_thread_flag(task, TIF_SVE))
468 fpsimd_to_sve(task);
469}
470
471/*
Dave Martin65896542018-03-28 10:50:49 +0100472 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
Dave Martin43d4da2c42017-10-31 15:51:13 +0000473 * the user task, irrespective of whether SVE is in use or not.
474 *
475 * This should only be called by ptrace. task must be non-runnable.
476 * task->thread.sve_state must point to at least sve_state_size(task)
477 * bytes of allocated kernel memory.
478 */
479void sve_sync_to_fpsimd(struct task_struct *task)
480{
481 if (test_tsk_thread_flag(task, TIF_SVE))
482 sve_to_fpsimd(task);
483}
484
485/*
486 * Ensure that task->thread.sve_state is up to date with respect to
Dave Martin65896542018-03-28 10:50:49 +0100487 * the task->thread.uw.fpsimd_state.
Dave Martin43d4da2c42017-10-31 15:51:13 +0000488 *
489 * This should only be called by ptrace to merge new FPSIMD register
490 * values into a task for which SVE is currently active.
491 * task must be non-runnable.
492 * task->thread.sve_state must point to at least sve_state_size(task)
493 * bytes of allocated kernel memory.
Dave Martin65896542018-03-28 10:50:49 +0100494 * task->thread.uw.fpsimd_state must already have been initialised with
Dave Martin43d4da2c42017-10-31 15:51:13 +0000495 * the new FPSIMD register values to be merged in.
496 */
497void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
498{
499 unsigned int vq;
500 void *sst = task->thread.sve_state;
Dave Martin65896542018-03-28 10:50:49 +0100501 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
Dave Martin43d4da2c42017-10-31 15:51:13 +0000502 unsigned int i;
503
504 if (!test_tsk_thread_flag(task, TIF_SVE))
505 return;
506
507 vq = sve_vq_from_vl(task->thread.sve_vl);
508
509 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
510
511 for (i = 0; i < 32; ++i)
512 memcpy(ZREG(sst, vq, i), &fst->vregs[i],
513 sizeof(fst->vregs[i]));
514}
515
Dave Martin7582e222017-10-31 15:51:08 +0000516int sve_set_vector_length(struct task_struct *task,
517 unsigned long vl, unsigned long flags)
518{
519 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
520 PR_SVE_SET_VL_ONEXEC))
521 return -EINVAL;
522
523 if (!sve_vl_valid(vl))
524 return -EINVAL;
525
526 /*
527 * Clamp to the maximum vector length that VL-agnostic SVE code can
528 * work with. A flag may be assigned in the future to allow setting
529 * of larger vector lengths without confusing older software.
530 */
531 if (vl > SVE_VL_ARCH_MAX)
532 vl = SVE_VL_ARCH_MAX;
533
534 vl = find_supported_vector_length(vl);
535
536 if (flags & (PR_SVE_VL_INHERIT |
537 PR_SVE_SET_VL_ONEXEC))
538 task->thread.sve_vl_onexec = vl;
539 else
540 /* Reset VL to system default on next exec: */
541 task->thread.sve_vl_onexec = 0;
542
543 /* Only actually set the VL if not deferred: */
544 if (flags & PR_SVE_SET_VL_ONEXEC)
545 goto out;
546
547 if (vl == task->thread.sve_vl)
548 goto out;
549
550 /*
551 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
552 * write any live register state back to task_struct, and convert to a
553 * non-SVE thread.
554 */
555 if (task == current) {
556 local_bh_disable();
557
Dave Martind1797612018-04-06 14:55:59 +0100558 fpsimd_save();
Dave Martin7582e222017-10-31 15:51:08 +0000559 }
560
561 fpsimd_flush_task_state(task);
562 if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
563 sve_to_fpsimd(task);
564
565 if (task == current)
566 local_bh_enable();
567
568 /*
569 * Force reallocation of task SVE state to the correct size
570 * on next use:
571 */
572 sve_free(task);
573
574 task->thread.sve_vl = vl;
575
576out:
Dave Martin09d12232018-04-11 17:59:06 +0100577 update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
578 flags & PR_SVE_VL_INHERIT);
Dave Martin7582e222017-10-31 15:51:08 +0000579
580 return 0;
581}
582
Dave Martinbc0ee472017-10-31 15:51:05 +0000583/*
Dave Martin2d2123b2017-10-31 15:51:14 +0000584 * Encode the current vector length and flags for return.
585 * This is only required for prctl(): ptrace has separate fields
586 *
587 * flags are as for sve_set_vector_length().
588 */
589static int sve_prctl_status(unsigned long flags)
590{
591 int ret;
592
593 if (flags & PR_SVE_SET_VL_ONEXEC)
594 ret = current->thread.sve_vl_onexec;
595 else
596 ret = current->thread.sve_vl;
597
598 if (test_thread_flag(TIF_SVE_VL_INHERIT))
599 ret |= PR_SVE_VL_INHERIT;
600
601 return ret;
602}
603
604/* PR_SVE_SET_VL */
605int sve_set_current_vl(unsigned long arg)
606{
607 unsigned long vl, flags;
608 int ret;
609
610 vl = arg & PR_SVE_VL_LEN_MASK;
611 flags = arg & ~vl;
612
613 if (!system_supports_sve())
614 return -EINVAL;
615
616 ret = sve_set_vector_length(current, vl, flags);
617 if (ret)
618 return ret;
619
620 return sve_prctl_status(flags);
621}
622
623/* PR_SVE_GET_VL */
624int sve_get_current_vl(void)
625{
626 if (!system_supports_sve())
627 return -EINVAL;
628
629 return sve_prctl_status(0);
630}
631
Dave Martin2e0f2472017-10-31 15:51:10 +0000632static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
633{
634 unsigned int vq, vl;
635 unsigned long zcr;
636
637 bitmap_zero(map, SVE_VQ_MAX);
638
639 zcr = ZCR_ELx_LEN_MASK;
640 zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
641
642 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
643 write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
644 vl = sve_get_vl();
645 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
646 set_bit(vq_to_bit(vq), map);
647 }
648}
649
Dave Martin8b08e842018-12-06 16:32:35 +0000650/*
651 * Initialise the set of known supported VQs for the boot CPU.
652 * This is called during kernel boot, before secondary CPUs are brought up.
653 */
Dave Martin2e0f2472017-10-31 15:51:10 +0000654void __init sve_init_vq_map(void)
655{
656 sve_probe_vqs(sve_vq_map);
Dave Martind06b76b2018-09-28 14:39:10 +0100657 bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
Dave Martin2e0f2472017-10-31 15:51:10 +0000658}
659
660/*
661 * If we haven't committed to the set of supported VQs yet, filter out
662 * those not supported by the current CPU.
Dave Martin8b08e842018-12-06 16:32:35 +0000663 * This function is called during the bring-up of early secondary CPUs only.
Dave Martin2e0f2472017-10-31 15:51:10 +0000664 */
665void sve_update_vq_map(void)
666{
Dave Martind06b76b2018-09-28 14:39:10 +0100667 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
668
669 sve_probe_vqs(tmp_map);
670 bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
671 bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
Dave Martin2e0f2472017-10-31 15:51:10 +0000672}
673
Dave Martin8b08e842018-12-06 16:32:35 +0000674/*
675 * Check whether the current CPU supports all VQs in the committed set.
676 * This function is called during the bring-up of late secondary CPUs only.
677 */
Dave Martin2e0f2472017-10-31 15:51:10 +0000678int sve_verify_vq_map(void)
679{
Dave Martind06b76b2018-09-28 14:39:10 +0100680 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
681 unsigned long b;
Dave Martin2e0f2472017-10-31 15:51:10 +0000682
Dave Martind06b76b2018-09-28 14:39:10 +0100683 sve_probe_vqs(tmp_map);
684
685 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
686 if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
Dave Martin2e0f2472017-10-31 15:51:10 +0000687 pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
688 smp_processor_id());
Dave Martind06b76b2018-09-28 14:39:10 +0100689 return -EINVAL;
Dave Martin2e0f2472017-10-31 15:51:10 +0000690 }
691
Dave Martind06b76b2018-09-28 14:39:10 +0100692 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
693 return 0;
694
695 /*
696 * For KVM, it is necessary to ensure that this CPU doesn't
697 * support any vector length that guests may have probed as
698 * unsupported.
699 */
700
701 /* Recover the set of supported VQs: */
702 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
703 /* Find VQs supported that are not globally supported: */
704 bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
705
706 /* Find the lowest such VQ, if any: */
707 b = find_last_bit(tmp_map, SVE_VQ_MAX);
708 if (b >= SVE_VQ_MAX)
709 return 0; /* no mismatches */
710
711 /*
712 * Mismatches above sve_max_virtualisable_vl are fine, since
713 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
714 */
715 if (sve_vl_from_vq(bit_to_vq(b)) <= sve_max_virtualisable_vl) {
716 pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
717 smp_processor_id());
718 return -EINVAL;
719 }
720
721 return 0;
Dave Martin2e0f2472017-10-31 15:51:10 +0000722}
723
Dave Martinfdfa9762017-10-31 15:51:12 +0000724static void __init sve_efi_setup(void)
725{
726 if (!IS_ENABLED(CONFIG_EFI))
727 return;
728
729 /*
730 * alloc_percpu() warns and prints a backtrace if this goes wrong.
731 * This is evidence of a crippled system and we are returning void,
732 * so no attempt is made to handle this situation here.
733 */
734 if (!sve_vl_valid(sve_max_vl))
735 goto fail;
736
737 efi_sve_state = __alloc_percpu(
738 SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
739 if (!efi_sve_state)
740 goto fail;
741
742 return;
743
744fail:
745 panic("Cannot allocate percpu memory for EFI SVE save/restore");
746}
747
Dave Martin2e0f2472017-10-31 15:51:10 +0000748/*
749 * Enable SVE for EL1.
750 * Intended for use by the cpufeatures code during CPU boot.
751 */
Dave Martinc0cda3b2018-03-26 15:12:28 +0100752void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
Dave Martin2e0f2472017-10-31 15:51:10 +0000753{
754 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
755 isb();
Dave Martin2e0f2472017-10-31 15:51:10 +0000756}
757
Dave Martin31dc52b2018-04-12 16:47:20 +0100758/*
759 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
760 * vector length.
761 *
762 * Use only if SVE is present.
763 * This function clobbers the SVE vector length.
764 */
765u64 read_zcr_features(void)
766{
767 u64 zcr;
768 unsigned int vq_max;
769
770 /*
771 * Set the maximum possible VL, and write zeroes to all other
772 * bits to see if they stick.
773 */
774 sve_kernel_enable(NULL);
775 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
776
777 zcr = read_sysreg_s(SYS_ZCR_EL1);
778 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
779 vq_max = sve_vq_from_vl(sve_get_vl());
780 zcr |= vq_max - 1; /* set LEN field to maximum effective value */
781
782 return zcr;
783}
784
Dave Martin2e0f2472017-10-31 15:51:10 +0000785void __init sve_setup(void)
786{
787 u64 zcr;
Dave Martind06b76b2018-09-28 14:39:10 +0100788 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
789 unsigned long b;
Dave Martin2e0f2472017-10-31 15:51:10 +0000790
791 if (!system_supports_sve())
792 return;
793
794 /*
795 * The SVE architecture mandates support for 128-bit vectors,
796 * so sve_vq_map must have at least SVE_VQ_MIN set.
797 * If something went wrong, at least try to patch it up:
798 */
799 if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
800 set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
801
802 zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
803 sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
804
805 /*
806 * Sanity-check that the max VL we determined through CPU features
807 * corresponds properly to sve_vq_map. If not, do our best:
808 */
809 if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
810 sve_max_vl = find_supported_vector_length(sve_max_vl);
811
812 /*
813 * For the default VL, pick the maximum supported value <= 64.
814 * VL == 64 is guaranteed not to grow the signal frame.
815 */
816 sve_default_vl = find_supported_vector_length(64);
817
Dave Martind06b76b2018-09-28 14:39:10 +0100818 bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
819 SVE_VQ_MAX);
820
821 b = find_last_bit(tmp_map, SVE_VQ_MAX);
822 if (b >= SVE_VQ_MAX)
823 /* No non-virtualisable VLs found */
824 sve_max_virtualisable_vl = SVE_VQ_MAX;
825 else if (WARN_ON(b == SVE_VQ_MAX - 1))
826 /* No virtualisable VLs? This is architecturally forbidden. */
827 sve_max_virtualisable_vl = SVE_VQ_MIN;
828 else /* b + 1 < SVE_VQ_MAX */
829 sve_max_virtualisable_vl = sve_vl_from_vq(bit_to_vq(b + 1));
830
831 if (sve_max_virtualisable_vl > sve_max_vl)
832 sve_max_virtualisable_vl = sve_max_vl;
833
Dave Martin2e0f2472017-10-31 15:51:10 +0000834 pr_info("SVE: maximum available vector length %u bytes per vector\n",
835 sve_max_vl);
836 pr_info("SVE: default vector length %u bytes per vector\n",
837 sve_default_vl);
Dave Martinfdfa9762017-10-31 15:51:12 +0000838
Dave Martind06b76b2018-09-28 14:39:10 +0100839 /* KVM decides whether to support mismatched systems. Just warn here: */
840 if (sve_max_virtualisable_vl < sve_max_vl)
841 pr_warn("SVE: unvirtualisable vector lengths present\n");
842
Dave Martinfdfa9762017-10-31 15:51:12 +0000843 sve_efi_setup();
Dave Martin2e0f2472017-10-31 15:51:10 +0000844}
845
846/*
Dave Martinbc0ee472017-10-31 15:51:05 +0000847 * Called from the put_task_struct() path, which cannot get here
848 * unless dead_task is really dead and not schedulable.
849 */
850void fpsimd_release_task(struct task_struct *dead_task)
851{
852 __sve_free(dead_task);
853}
854
855#endif /* CONFIG_ARM64_SVE */
856
857/*
858 * Trapped SVE access
859 *
860 * Storage is allocated for the full SVE state, the current FPSIMD
861 * register contents are migrated across, and TIF_SVE is set so that
862 * the SVE access trap will be disabled the next time this task
863 * reaches ret_to_user.
864 *
865 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
866 * would have disabled the SVE access trap for userspace during
867 * ret_to_user, making an SVE access trap impossible in that case.
868 */
869asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
870{
871 /* Even if we chose not to use SVE, the hardware could still trap: */
872 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
Will Deacon2c9120f32018-02-20 14:16:29 +0000873 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
Dave Martinbc0ee472017-10-31 15:51:05 +0000874 return;
875 }
876
877 sve_alloc(current);
878
879 local_bh_disable();
880
Dave Martind1797612018-04-06 14:55:59 +0100881 fpsimd_save();
Dave Martinbc0ee472017-10-31 15:51:05 +0000882
883 /* Force ret_to_user to reload the registers: */
884 fpsimd_flush_task_state(current);
Dave Martinbc0ee472017-10-31 15:51:05 +0000885
Dave Martinefbc2022018-09-28 14:39:05 +0100886 fpsimd_to_sve(current);
Dave Martinbc0ee472017-10-31 15:51:05 +0000887 if (test_and_set_thread_flag(TIF_SVE))
888 WARN_ON(1); /* SVE access shouldn't have trapped */
889
890 local_bh_enable();
891}
892
Catalin Marinas53631b52012-03-05 11:49:32 +0000893/*
894 * Trapped FP/ASIMD access.
895 */
Dave Martin94ef7ec2017-10-31 15:50:54 +0000896asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
Catalin Marinas53631b52012-03-05 11:49:32 +0000897{
898 /* TODO: implement lazy context saving/restoring */
899 WARN_ON(1);
900}
901
902/*
903 * Raise a SIGFPE for the current process.
904 */
Dave Martin94ef7ec2017-10-31 15:50:54 +0000905asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
Catalin Marinas53631b52012-03-05 11:49:32 +0000906{
Dave Martinaf4a81b2018-03-01 17:44:07 +0000907 unsigned int si_code = FPE_FLTUNK;
Catalin Marinas53631b52012-03-05 11:49:32 +0000908
Dave Martinaf4a81b2018-03-01 17:44:07 +0000909 if (esr & ESR_ELx_FP_EXC_TFV) {
910 if (esr & FPEXC_IOF)
911 si_code = FPE_FLTINV;
912 else if (esr & FPEXC_DZF)
913 si_code = FPE_FLTDIV;
914 else if (esr & FPEXC_OFF)
915 si_code = FPE_FLTOVF;
916 else if (esr & FPEXC_UFF)
917 si_code = FPE_FLTUND;
918 else if (esr & FPEXC_IXF)
919 si_code = FPE_FLTRES;
920 }
Catalin Marinas53631b52012-03-05 11:49:32 +0000921
Eric W. Biedermanc8526802018-04-16 13:47:06 -0500922 send_sig_fault(SIGFPE, si_code,
923 (void __user *)instruction_pointer(regs),
924 current);
Catalin Marinas53631b52012-03-05 11:49:32 +0000925}
926
927void fpsimd_thread_switch(struct task_struct *next)
928{
Dave Martindf3fb962018-05-21 19:08:15 +0100929 bool wrong_task, wrong_cpu;
930
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000931 if (!system_supports_fpsimd())
932 return;
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200933
Dave Martindf3fb962018-05-21 19:08:15 +0100934 /* Save unsaved fpsimd state, if any: */
935 fpsimd_save();
936
Catalin Marinas53631b52012-03-05 11:49:32 +0000937 /*
Dave Martindf3fb962018-05-21 19:08:15 +0100938 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
939 * state. For kernel threads, FPSIMD registers are never loaded
940 * and wrong_task and wrong_cpu will always be true.
Catalin Marinas53631b52012-03-05 11:49:32 +0000941 */
Dave Martindf3fb962018-05-21 19:08:15 +0100942 wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
Dave Martin09d12232018-04-11 17:59:06 +0100943 &next->thread.uw.fpsimd_state;
Dave Martindf3fb962018-05-21 19:08:15 +0100944 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
Dave Martin09d12232018-04-11 17:59:06 +0100945
Dave Martindf3fb962018-05-21 19:08:15 +0100946 update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
947 wrong_task || wrong_cpu);
Catalin Marinas53631b52012-03-05 11:49:32 +0000948}
949
950void fpsimd_flush_thread(void)
951{
Dave Martin7582e222017-10-31 15:51:08 +0000952 int vl, supported_vl;
Dave Martinbc0ee472017-10-31 15:51:05 +0000953
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000954 if (!system_supports_fpsimd())
955 return;
Dave Martincb84d112017-08-03 17:23:23 +0100956
957 local_bh_disable();
958
Dave Martinefbc2022018-09-28 14:39:05 +0100959 fpsimd_flush_task_state(current);
Dave Martin65896542018-03-28 10:50:49 +0100960 memset(&current->thread.uw.fpsimd_state, 0,
961 sizeof(current->thread.uw.fpsimd_state));
Dave Martinbc0ee472017-10-31 15:51:05 +0000962
963 if (system_supports_sve()) {
964 clear_thread_flag(TIF_SVE);
965 sve_free(current);
966
967 /*
968 * Reset the task vector length as required.
969 * This is where we ensure that all user tasks have a valid
970 * vector length configured: no kernel task can become a user
971 * task without an exec and hence a call to this function.
Dave Martin2e0f2472017-10-31 15:51:10 +0000972 * By the time the first call to this function is made, all
973 * early hardware probing is complete, so sve_default_vl
974 * should be valid.
Dave Martinbc0ee472017-10-31 15:51:05 +0000975 * If a bug causes this to go wrong, we make some noise and
976 * try to fudge thread.sve_vl to a safe value here.
977 */
Dave Martin79ab0472017-10-31 15:51:06 +0000978 vl = current->thread.sve_vl_onexec ?
979 current->thread.sve_vl_onexec : sve_default_vl;
Dave Martinbc0ee472017-10-31 15:51:05 +0000980
981 if (WARN_ON(!sve_vl_valid(vl)))
982 vl = SVE_VL_MIN;
983
Dave Martin7582e222017-10-31 15:51:08 +0000984 supported_vl = find_supported_vector_length(vl);
985 if (WARN_ON(supported_vl != vl))
986 vl = supported_vl;
987
Dave Martinbc0ee472017-10-31 15:51:05 +0000988 current->thread.sve_vl = vl;
Dave Martin79ab0472017-10-31 15:51:06 +0000989
990 /*
991 * If the task is not set to inherit, ensure that the vector
992 * length will be reset by a subsequent exec:
993 */
994 if (!test_thread_flag(TIF_SVE_VL_INHERIT))
995 current->thread.sve_vl_onexec = 0;
Dave Martinbc0ee472017-10-31 15:51:05 +0000996 }
997
Dave Martincb84d112017-08-03 17:23:23 +0100998 local_bh_enable();
Catalin Marinas53631b52012-03-05 11:49:32 +0000999}
1000
Ard Biesheuvelc51f9262014-02-24 15:26:27 +01001001/*
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001002 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1003 * currently held in the registers does in fact belong to 'current'
Ard Biesheuvelc51f9262014-02-24 15:26:27 +01001004 */
1005void fpsimd_preserve_current_state(void)
1006{
Suzuki K Poulose82e01912016-11-08 13:56:21 +00001007 if (!system_supports_fpsimd())
1008 return;
Dave Martincb84d112017-08-03 17:23:23 +01001009
1010 local_bh_disable();
Dave Martind1797612018-04-06 14:55:59 +01001011 fpsimd_save();
Dave Martincb84d112017-08-03 17:23:23 +01001012 local_bh_enable();
Ard Biesheuvelc51f9262014-02-24 15:26:27 +01001013}
1014
1015/*
Dave Martin8cd969d2017-10-31 15:51:07 +00001016 * Like fpsimd_preserve_current_state(), but ensure that
Dave Martin65896542018-03-28 10:50:49 +01001017 * current->thread.uw.fpsimd_state is updated so that it can be copied to
Dave Martin8cd969d2017-10-31 15:51:07 +00001018 * the signal frame.
1019 */
1020void fpsimd_signal_preserve_current_state(void)
1021{
1022 fpsimd_preserve_current_state();
1023 if (system_supports_sve() && test_thread_flag(TIF_SVE))
1024 sve_to_fpsimd(current);
1025}
1026
1027/*
Dave Martin8884b7b2017-12-06 16:45:46 +00001028 * Associate current's FPSIMD context with this cpu
1029 * Preemption must be disabled when calling this function.
1030 */
Dave Martine6b673b2018-04-06 14:55:59 +01001031void fpsimd_bind_task_to_cpu(void)
Dave Martin8884b7b2017-12-06 16:45:46 +00001032{
Dave Martincb968af2017-12-06 16:45:47 +00001033 struct fpsimd_last_state_struct *last =
1034 this_cpu_ptr(&fpsimd_last_state);
Dave Martin8884b7b2017-12-06 16:45:46 +00001035
Dave Martin65896542018-03-28 10:50:49 +01001036 last->st = &current->thread.uw.fpsimd_state;
Dave Martin20b85472018-03-28 10:50:48 +01001037 current->thread.fpsimd_cpu = smp_processor_id();
Dave Martin0cff8e72018-05-09 14:27:41 +01001038
1039 if (system_supports_sve()) {
1040 /* Toggle SVE trapping for userspace if needed */
1041 if (test_thread_flag(TIF_SVE))
1042 sve_user_enable();
1043 else
1044 sve_user_disable();
1045
1046 /* Serialised by exception return to user */
1047 }
Dave Martin8884b7b2017-12-06 16:45:46 +00001048}
1049
Dave Martine6b673b2018-04-06 14:55:59 +01001050void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
1051{
1052 struct fpsimd_last_state_struct *last =
1053 this_cpu_ptr(&fpsimd_last_state);
1054
1055 WARN_ON(!in_softirq() && !irqs_disabled());
1056
1057 last->st = st;
Dave Martin8884b7b2017-12-06 16:45:46 +00001058}
1059
1060/*
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001061 * Load the userland FPSIMD state of 'current' from memory, but only if the
1062 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1063 * state of 'current'
1064 */
1065void fpsimd_restore_current_state(void)
1066{
Suzuki K Poulose82e01912016-11-08 13:56:21 +00001067 if (!system_supports_fpsimd())
1068 return;
Dave Martincb84d112017-08-03 17:23:23 +01001069
1070 local_bh_disable();
1071
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001072 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
Dave Martinbc0ee472017-10-31 15:51:05 +00001073 task_fpsimd_load();
Dave Martin0cff8e72018-05-09 14:27:41 +01001074 fpsimd_bind_task_to_cpu();
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001075 }
Dave Martincb84d112017-08-03 17:23:23 +01001076
1077 local_bh_enable();
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001078}
1079
1080/*
1081 * Load an updated userland FPSIMD state for 'current' from memory and set the
1082 * flag that indicates that the FPSIMD register contents are the most recent
1083 * FPSIMD state of 'current'
Ard Biesheuvelc51f9262014-02-24 15:26:27 +01001084 */
Dave Martin0abdeff2017-12-15 18:34:38 +00001085void fpsimd_update_current_state(struct user_fpsimd_state const *state)
Ard Biesheuvelc51f9262014-02-24 15:26:27 +01001086{
Suzuki K Poulose82e01912016-11-08 13:56:21 +00001087 if (!system_supports_fpsimd())
1088 return;
Dave Martincb84d112017-08-03 17:23:23 +01001089
1090 local_bh_disable();
1091
Dave Martin65896542018-03-28 10:50:49 +01001092 current->thread.uw.fpsimd_state = *state;
Dave Martin9de52a72017-11-30 11:56:37 +00001093 if (system_supports_sve() && test_thread_flag(TIF_SVE))
Dave Martin8cd969d2017-10-31 15:51:07 +00001094 fpsimd_to_sve(current);
Dave Martin9de52a72017-11-30 11:56:37 +00001095
Dave Martin8cd969d2017-10-31 15:51:07 +00001096 task_fpsimd_load();
Dave Martin0cff8e72018-05-09 14:27:41 +01001097 fpsimd_bind_task_to_cpu();
Dave Martin8cd969d2017-10-31 15:51:07 +00001098
Dave Martin0cff8e72018-05-09 14:27:41 +01001099 clear_thread_flag(TIF_FOREIGN_FPSTATE);
Dave Martincb84d112017-08-03 17:23:23 +01001100
1101 local_bh_enable();
Ard Biesheuvelc51f9262014-02-24 15:26:27 +01001102}
1103
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001104/*
1105 * Invalidate live CPU copies of task t's FPSIMD state
Dave Martinefbc2022018-09-28 14:39:05 +01001106 *
1107 * This function may be called with preemption enabled. The barrier()
1108 * ensures that the assignment to fpsimd_cpu is visible to any
1109 * preemption/softirq that could race with set_tsk_thread_flag(), so
1110 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1111 *
1112 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1113 * subsequent code.
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001114 */
1115void fpsimd_flush_task_state(struct task_struct *t)
1116{
Dave Martin20b85472018-03-28 10:50:48 +01001117 t->thread.fpsimd_cpu = NR_CPUS;
Dave Martinefbc2022018-09-28 14:39:05 +01001118
1119 barrier();
1120 set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1121
1122 barrier();
Ard Biesheuvel005f78c2014-05-08 11:20:23 +02001123}
1124
Dave Martinefbc2022018-09-28 14:39:05 +01001125/*
1126 * Invalidate any task's FPSIMD state that is present on this cpu.
1127 * This function must be called with softirqs disabled.
1128 */
Dave Martine6b673b2018-04-06 14:55:59 +01001129void fpsimd_flush_cpu_state(void)
Dave Martin17eed272017-10-31 15:51:16 +00001130{
Dave Martincb968af2017-12-06 16:45:47 +00001131 __this_cpu_write(fpsimd_last_state.st, NULL);
Dave Martind8ad71f2018-05-21 18:25:43 +01001132 set_thread_flag(TIF_FOREIGN_FPSTATE);
Dave Martin17eed272017-10-31 15:51:16 +00001133}
1134
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001135#ifdef CONFIG_KERNEL_MODE_NEON
1136
Dave Martincb84d112017-08-03 17:23:23 +01001137DEFINE_PER_CPU(bool, kernel_neon_busy);
Catalin Marinas11cefd52017-08-07 12:36:35 +01001138EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
Ard Biesheuvel190f1ca2014-02-24 15:26:29 +01001139
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001140/*
1141 * Kernel-side NEON support functions
1142 */
Dave Martincb84d112017-08-03 17:23:23 +01001143
1144/*
1145 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1146 * context
1147 *
1148 * Must not be called unless may_use_simd() returns true.
1149 * Task context in the FPSIMD registers is saved back to memory as necessary.
1150 *
1151 * A matching call to kernel_neon_end() must be made before returning from the
1152 * calling context.
1153 *
1154 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1155 * called.
1156 */
1157void kernel_neon_begin(void)
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001158{
Suzuki K Poulose82e01912016-11-08 13:56:21 +00001159 if (WARN_ON(!system_supports_fpsimd()))
1160 return;
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001161
Dave Martincb84d112017-08-03 17:23:23 +01001162 BUG_ON(!may_use_simd());
1163
1164 local_bh_disable();
1165
1166 __this_cpu_write(kernel_neon_busy, true);
1167
Dave Martindf3fb962018-05-21 19:08:15 +01001168 /* Save unsaved fpsimd state, if any: */
1169 fpsimd_save();
Dave Martincb84d112017-08-03 17:23:23 +01001170
1171 /* Invalidate any task state remaining in the fpsimd regs: */
Dave Martin17eed272017-10-31 15:51:16 +00001172 fpsimd_flush_cpu_state();
Dave Martincb84d112017-08-03 17:23:23 +01001173
1174 preempt_disable();
1175
1176 local_bh_enable();
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001177}
Dave Martincb84d112017-08-03 17:23:23 +01001178EXPORT_SYMBOL(kernel_neon_begin);
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001179
Dave Martincb84d112017-08-03 17:23:23 +01001180/*
1181 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1182 *
1183 * Must be called from a context in which kernel_neon_begin() was previously
1184 * called, with no call to kernel_neon_end() in the meantime.
1185 *
1186 * The caller must not use the FPSIMD registers after this function is called,
1187 * unless kernel_neon_begin() is called again in the meantime.
1188 */
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001189void kernel_neon_end(void)
1190{
Dave Martincb84d112017-08-03 17:23:23 +01001191 bool busy;
1192
Suzuki K Poulose82e01912016-11-08 13:56:21 +00001193 if (!system_supports_fpsimd())
1194 return;
Dave Martincb84d112017-08-03 17:23:23 +01001195
1196 busy = __this_cpu_xchg(kernel_neon_busy, false);
1197 WARN_ON(!busy); /* No matching kernel_neon_begin()? */
1198
1199 preempt_enable();
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001200}
1201EXPORT_SYMBOL(kernel_neon_end);
1202
Dave Martine580b8b2017-09-18 09:40:12 +01001203#ifdef CONFIG_EFI
1204
Dave Martin20b85472018-03-28 10:50:48 +01001205static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
Dave Martin3b660232017-08-18 14:53:47 +01001206static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
Dave Martinfdfa9762017-10-31 15:51:12 +00001207static DEFINE_PER_CPU(bool, efi_sve_state_used);
Dave Martin4328825d2017-08-03 17:23:22 +01001208
1209/*
1210 * EFI runtime services support functions
1211 *
1212 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1213 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1214 * is always used rather than being an optional accelerator.
1215 *
1216 * These functions provide the necessary support for ensuring FPSIMD
1217 * save/restore in the contexts from which EFI is used.
1218 *
1219 * Do not use them for any other purpose -- if tempted to do so, you are
1220 * either doing something wrong or you need to propose some refactoring.
1221 */
1222
1223/*
1224 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1225 */
1226void __efi_fpsimd_begin(void)
1227{
1228 if (!system_supports_fpsimd())
1229 return;
1230
1231 WARN_ON(preemptible());
1232
Dave Martinfdfa9762017-10-31 15:51:12 +00001233 if (may_use_simd()) {
Dave Martin4328825d2017-08-03 17:23:22 +01001234 kernel_neon_begin();
Dave Martinfdfa9762017-10-31 15:51:12 +00001235 } else {
1236 /*
1237 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1238 * preserving:
1239 */
1240 if (system_supports_sve() && likely(efi_sve_state)) {
1241 char *sve_state = this_cpu_ptr(efi_sve_state);
1242
1243 __this_cpu_write(efi_sve_state_used, true);
1244
1245 sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1246 &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1247 } else {
1248 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1249 }
1250
Dave Martin4328825d2017-08-03 17:23:22 +01001251 __this_cpu_write(efi_fpsimd_state_used, true);
1252 }
1253}
1254
1255/*
1256 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1257 */
1258void __efi_fpsimd_end(void)
1259{
1260 if (!system_supports_fpsimd())
1261 return;
1262
Dave Martinfdfa9762017-10-31 15:51:12 +00001263 if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
Dave Martin4328825d2017-08-03 17:23:22 +01001264 kernel_neon_end();
Dave Martinfdfa9762017-10-31 15:51:12 +00001265 } else {
1266 if (system_supports_sve() &&
1267 likely(__this_cpu_read(efi_sve_state_used))) {
1268 char const *sve_state = this_cpu_ptr(efi_sve_state);
1269
1270 sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1271 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1272 sve_vq_from_vl(sve_get_vl()) - 1);
1273
1274 __this_cpu_write(efi_sve_state_used, false);
1275 } else {
1276 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1277 }
1278 }
Dave Martin4328825d2017-08-03 17:23:22 +01001279}
1280
Dave Martine580b8b2017-09-18 09:40:12 +01001281#endif /* CONFIG_EFI */
1282
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +01001283#endif /* CONFIG_KERNEL_MODE_NEON */
1284
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +01001285#ifdef CONFIG_CPU_PM
1286static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1287 unsigned long cmd, void *v)
1288{
1289 switch (cmd) {
1290 case CPU_PM_ENTER:
Dave Martindf3fb962018-05-21 19:08:15 +01001291 fpsimd_save();
Dave Martin17eed272017-10-31 15:51:16 +00001292 fpsimd_flush_cpu_state();
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +01001293 break;
1294 case CPU_PM_EXIT:
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +01001295 break;
1296 case CPU_PM_ENTER_FAILED:
1297 default:
1298 return NOTIFY_DONE;
1299 }
1300 return NOTIFY_OK;
1301}
1302
1303static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1304 .notifier_call = fpsimd_cpu_pm_notifier,
1305};
1306
Jisheng Zhanga7c61a32015-11-20 17:59:10 +08001307static void __init fpsimd_pm_init(void)
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +01001308{
1309 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1310}
1311
1312#else
1313static inline void fpsimd_pm_init(void) { }
1314#endif /* CONFIG_CPU_PM */
1315
Janet Liu32365e62015-06-11 12:02:45 +08001316#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewiorc23a7262016-09-06 19:04:37 +02001317static int fpsimd_cpu_dead(unsigned int cpu)
Janet Liu32365e62015-06-11 12:02:45 +08001318{
Dave Martincb968af2017-12-06 16:45:47 +00001319 per_cpu(fpsimd_last_state.st, cpu) = NULL;
Sebastian Andrzej Siewiorc23a7262016-09-06 19:04:37 +02001320 return 0;
Janet Liu32365e62015-06-11 12:02:45 +08001321}
1322
Janet Liu32365e62015-06-11 12:02:45 +08001323static inline void fpsimd_hotplug_init(void)
1324{
Sebastian Andrzej Siewiorc23a7262016-09-06 19:04:37 +02001325 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1326 NULL, fpsimd_cpu_dead);
Janet Liu32365e62015-06-11 12:02:45 +08001327}
1328
1329#else
1330static inline void fpsimd_hotplug_init(void) { }
1331#endif
1332
Catalin Marinas53631b52012-03-05 11:49:32 +00001333/*
1334 * FP/SIMD support code initialisation.
1335 */
1336static int __init fpsimd_init(void)
1337{
Suzuki K. Poulosefe80f9f2015-10-19 14:24:53 +01001338 if (elf_hwcap & HWCAP_FP) {
1339 fpsimd_pm_init();
1340 fpsimd_hotplug_init();
1341 } else {
Catalin Marinas53631b52012-03-05 11:49:32 +00001342 pr_notice("Floating-point is not implemented\n");
Catalin Marinas53631b52012-03-05 11:49:32 +00001343 }
Catalin Marinas53631b52012-03-05 11:49:32 +00001344
Suzuki K. Poulosefe80f9f2015-10-19 14:24:53 +01001345 if (!(elf_hwcap & HWCAP_ASIMD))
Catalin Marinas53631b52012-03-05 11:49:32 +00001346 pr_notice("Advanced SIMD is not implemented\n");
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +01001347
Dave Martin4ffa09a2017-10-31 15:51:15 +00001348 return sve_sysctl_init();
Catalin Marinas53631b52012-03-05 11:49:32 +00001349}
Suzuki K Pouloseae2e9722017-10-06 14:16:53 +01001350core_initcall(fpsimd_init);