blob: c7531b85b303e4df7ebb2c596833db4065ff191b [file] [log] [blame]
Catalin Marinas53631b52012-03-05 11:49:32 +00001/*
2 * FP/SIMD context switching and fault handling
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Dave Martincb84d112017-08-03 17:23:23 +010020#include <linux/bottom_half.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000021#include <linux/bug.h>
22#include <linux/compat.h>
Janet Liu32365e62015-06-11 12:02:45 +080023#include <linux/cpu.h>
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +010024#include <linux/cpu_pm.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000025#include <linux/kernel.h>
Dave Martin94ef7ec2017-10-31 15:50:54 +000026#include <linux/linkage.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000027#include <linux/irqflags.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000028#include <linux/init.h>
Dave Martincb84d112017-08-03 17:23:23 +010029#include <linux/percpu.h>
Dave Martin4328825d2017-08-03 17:23:22 +010030#include <linux/preempt.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000031#include <linux/ptrace.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010032#include <linux/sched/signal.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000033#include <linux/sched/task_stack.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000034#include <linux/signal.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000035#include <linux/slab.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000036
37#include <asm/fpsimd.h>
38#include <asm/cputype.h>
Dave Martin4328825d2017-08-03 17:23:22 +010039#include <asm/simd.h>
Dave Martinbc0ee472017-10-31 15:51:05 +000040#include <asm/sigcontext.h>
41#include <asm/sysreg.h>
42#include <asm/traps.h>
Catalin Marinas53631b52012-03-05 11:49:32 +000043
44#define FPEXC_IOF (1 << 0)
45#define FPEXC_DZF (1 << 1)
46#define FPEXC_OFF (1 << 2)
47#define FPEXC_UFF (1 << 3)
48#define FPEXC_IXF (1 << 4)
49#define FPEXC_IDF (1 << 7)
50
51/*
Dave Martinbc0ee472017-10-31 15:51:05 +000052 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
53 *
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020054 * In order to reduce the number of times the FPSIMD state is needlessly saved
55 * and restored, we need to keep track of two things:
56 * (a) for each task, we need to remember which CPU was the last one to have
57 * the task's FPSIMD state loaded into its FPSIMD registers;
58 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
59 * been loaded into its FPSIMD registers most recently, or whether it has
60 * been used to perform kernel mode NEON in the meantime.
61 *
62 * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to
Adam Buchbinderef769e32016-02-24 09:52:41 -080063 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020064 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
65 * address of the userland FPSIMD state of the task that was loaded onto the CPU
66 * the most recently, or NULL if kernel mode NEON has been performed after that.
67 *
68 * With this in place, we no longer have to restore the next FPSIMD state right
69 * when switching between tasks. Instead, we can defer this check to userland
70 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
71 * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
72 * can omit the FPSIMD restore.
73 *
74 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
75 * indicate whether or not the userland FPSIMD state of the current task is
76 * present in the registers. The flag is set unless the FPSIMD registers of this
77 * CPU currently contain the most recent userland FPSIMD state of the current
78 * task.
79 *
Dave Martincb84d112017-08-03 17:23:23 +010080 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
81 * save the task's FPSIMD context back to task_struct from softirq context.
82 * To prevent this from racing with the manipulation of the task's FPSIMD state
83 * from task context and thereby corrupting the state, it is necessary to
84 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
85 * flag with local_bh_disable() unless softirqs are already masked.
86 *
Ard Biesheuvel005f78c2014-05-08 11:20:23 +020087 * For a certain task, the sequence may look something like this:
88 * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
89 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
90 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
91 * cleared, otherwise it is set;
92 *
93 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
94 * userland FPSIMD state is copied from memory to the registers, the task's
95 * fpsimd_state.cpu field is set to the id of the current CPU, the current
96 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
97 * TIF_FOREIGN_FPSTATE flag is cleared;
98 *
99 * - the task executes an ordinary syscall; upon return to userland, the
100 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
101 * restored;
102 *
103 * - the task executes a syscall which executes some NEON instructions; this is
104 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
105 * register contents to memory, clears the fpsimd_last_state per-cpu variable
106 * and sets the TIF_FOREIGN_FPSTATE flag;
107 *
108 * - the task gets preempted after kernel_neon_end() is called; as we have not
109 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
110 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
111 */
112static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
113
Dave Martin79ab0472017-10-31 15:51:06 +0000114/* Default VL for tasks that don't set it explicitly: */
115static int sve_default_vl = SVE_VL_MIN;
116
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200117/*
Dave Martinbc0ee472017-10-31 15:51:05 +0000118 * Call __sve_free() directly only if you know task can't be scheduled
119 * or preempted.
120 */
121static void __sve_free(struct task_struct *task)
122{
123 kfree(task->thread.sve_state);
124 task->thread.sve_state = NULL;
125}
126
127static void sve_free(struct task_struct *task)
128{
129 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
130
131 __sve_free(task);
132}
133
134
135/* Offset of FFR in the SVE register dump */
136static size_t sve_ffr_offset(int vl)
137{
138 return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
139}
140
141static void *sve_pffr(struct task_struct *task)
142{
143 return (char *)task->thread.sve_state +
144 sve_ffr_offset(task->thread.sve_vl);
145}
146
147static void change_cpacr(u64 val, u64 mask)
148{
149 u64 cpacr = read_sysreg(CPACR_EL1);
150 u64 new = (cpacr & ~mask) | val;
151
152 if (new != cpacr)
153 write_sysreg(new, CPACR_EL1);
154}
155
156static void sve_user_disable(void)
157{
158 change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
159}
160
161static void sve_user_enable(void)
162{
163 change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
164}
165
166/*
167 * TIF_SVE controls whether a task can use SVE without trapping while
168 * in userspace, and also the way a task's FPSIMD/SVE state is stored
169 * in thread_struct.
170 *
171 * The kernel uses this flag to track whether a user task is actively
172 * using SVE, and therefore whether full SVE register state needs to
173 * be tracked. If not, the cheaper FPSIMD context handling code can
174 * be used instead of the more costly SVE equivalents.
175 *
176 * * TIF_SVE set:
177 *
178 * The task can execute SVE instructions while in userspace without
179 * trapping to the kernel.
180 *
181 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
182 * corresponding Zn), P0-P15 and FFR are encoded in in
183 * task->thread.sve_state, formatted appropriately for vector
184 * length task->thread.sve_vl.
185 *
186 * task->thread.sve_state must point to a valid buffer at least
187 * sve_state_size(task) bytes in size.
188 *
189 * During any syscall, the kernel may optionally clear TIF_SVE and
190 * discard the vector state except for the FPSIMD subset.
191 *
192 * * TIF_SVE clear:
193 *
194 * An attempt by the user task to execute an SVE instruction causes
195 * do_sve_acc() to be called, which does some preparation and then
196 * sets TIF_SVE.
197 *
198 * When stored, FPSIMD registers V0-V31 are encoded in
199 * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are
200 * logically zero but not stored anywhere; P0-P15 and FFR are not
201 * stored and have unspecified values from userspace's point of
202 * view. For hygiene purposes, the kernel zeroes them on next use,
203 * but userspace is discouraged from relying on this.
204 *
205 * task->thread.sve_state does not need to be non-NULL, valid or any
206 * particular size: it must not be dereferenced.
207 *
208 * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of
209 * whether TIF_SVE is clear or set, since these are not vector length
210 * dependent.
211 */
212
213/*
214 * Update current's FPSIMD/SVE registers from thread_struct.
215 *
216 * This function should be called only when the FPSIMD/SVE state in
217 * thread_struct is known to be up to date, when preparing to enter
218 * userspace.
219 *
220 * Softirqs (and preemption) must be disabled.
221 */
222static void task_fpsimd_load(void)
223{
224 WARN_ON(!in_softirq() && !irqs_disabled());
225
226 if (system_supports_sve() && test_thread_flag(TIF_SVE))
227 sve_load_state(sve_pffr(current),
228 &current->thread.fpsimd_state.fpsr,
229 sve_vq_from_vl(current->thread.sve_vl) - 1);
230 else
231 fpsimd_load_state(&current->thread.fpsimd_state);
232
233 if (system_supports_sve()) {
234 /* Toggle SVE trapping for userspace if needed */
235 if (test_thread_flag(TIF_SVE))
236 sve_user_enable();
237 else
238 sve_user_disable();
239
240 /* Serialised by exception return to user */
241 }
242}
243
244/*
245 * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
246 * with respect to the CPU registers.
247 *
248 * Softirqs (and preemption) must be disabled.
249 */
250static void task_fpsimd_save(void)
251{
252 WARN_ON(!in_softirq() && !irqs_disabled());
253
254 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
255 if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
256 if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
257 /*
258 * Can't save the user regs, so current would
259 * re-enter user with corrupt state.
260 * There's no way to recover, so kill it:
261 */
262 force_signal_inject(
263 SIGKILL, 0, current_pt_regs(), 0);
264 return;
265 }
266
267 sve_save_state(sve_pffr(current),
268 &current->thread.fpsimd_state.fpsr);
269 } else
270 fpsimd_save_state(&current->thread.fpsimd_state);
271 }
272}
273
274#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
275 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
276
277/*
278 * Transfer the FPSIMD state in task->thread.fpsimd_state to
279 * task->thread.sve_state.
280 *
281 * Task can be a non-runnable task, or current. In the latter case,
282 * softirqs (and preemption) must be disabled.
283 * task->thread.sve_state must point to at least sve_state_size(task)
284 * bytes of allocated kernel memory.
285 * task->thread.fpsimd_state must be up to date before calling this function.
286 */
287static void fpsimd_to_sve(struct task_struct *task)
288{
289 unsigned int vq;
290 void *sst = task->thread.sve_state;
291 struct fpsimd_state const *fst = &task->thread.fpsimd_state;
292 unsigned int i;
293
294 if (!system_supports_sve())
295 return;
296
297 vq = sve_vq_from_vl(task->thread.sve_vl);
298 for (i = 0; i < 32; ++i)
299 memcpy(ZREG(sst, vq, i), &fst->vregs[i],
300 sizeof(fst->vregs[i]));
301}
302
303#ifdef CONFIG_ARM64_SVE
304
305/*
306 * Return how many bytes of memory are required to store the full SVE
307 * state for task, given task's currently configured vector length.
308 */
309size_t sve_state_size(struct task_struct const *task)
310{
311 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
312}
313
314/*
315 * Ensure that task->thread.sve_state is allocated and sufficiently large.
316 *
317 * This function should be used only in preparation for replacing
318 * task->thread.sve_state with new data. The memory is always zeroed
319 * here to prevent stale data from showing through: this is done in
320 * the interest of testability and predictability: except in the
321 * do_sve_acc() case, there is no ABI requirement to hide stale data
322 * written previously be task.
323 */
324void sve_alloc(struct task_struct *task)
325{
326 if (task->thread.sve_state) {
327 memset(task->thread.sve_state, 0, sve_state_size(current));
328 return;
329 }
330
331 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
332 task->thread.sve_state =
333 kzalloc(sve_state_size(task), GFP_KERNEL);
334
335 /*
336 * If future SVE revisions can have larger vectors though,
337 * this may cease to be true:
338 */
339 BUG_ON(!task->thread.sve_state);
340}
341
342/*
343 * Called from the put_task_struct() path, which cannot get here
344 * unless dead_task is really dead and not schedulable.
345 */
346void fpsimd_release_task(struct task_struct *dead_task)
347{
348 __sve_free(dead_task);
349}
350
351#endif /* CONFIG_ARM64_SVE */
352
353/*
354 * Trapped SVE access
355 *
356 * Storage is allocated for the full SVE state, the current FPSIMD
357 * register contents are migrated across, and TIF_SVE is set so that
358 * the SVE access trap will be disabled the next time this task
359 * reaches ret_to_user.
360 *
361 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
362 * would have disabled the SVE access trap for userspace during
363 * ret_to_user, making an SVE access trap impossible in that case.
364 */
365asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
366{
367 /* Even if we chose not to use SVE, the hardware could still trap: */
368 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
369 force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
370 return;
371 }
372
373 sve_alloc(current);
374
375 local_bh_disable();
376
377 task_fpsimd_save();
378 fpsimd_to_sve(current);
379
380 /* Force ret_to_user to reload the registers: */
381 fpsimd_flush_task_state(current);
382 set_thread_flag(TIF_FOREIGN_FPSTATE);
383
384 if (test_and_set_thread_flag(TIF_SVE))
385 WARN_ON(1); /* SVE access shouldn't have trapped */
386
387 local_bh_enable();
388}
389
390/*
Catalin Marinas53631b52012-03-05 11:49:32 +0000391 * Trapped FP/ASIMD access.
392 */
Dave Martin94ef7ec2017-10-31 15:50:54 +0000393asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
Catalin Marinas53631b52012-03-05 11:49:32 +0000394{
395 /* TODO: implement lazy context saving/restoring */
396 WARN_ON(1);
397}
398
399/*
400 * Raise a SIGFPE for the current process.
401 */
Dave Martin94ef7ec2017-10-31 15:50:54 +0000402asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
Catalin Marinas53631b52012-03-05 11:49:32 +0000403{
404 siginfo_t info;
405 unsigned int si_code = 0;
406
407 if (esr & FPEXC_IOF)
408 si_code = FPE_FLTINV;
409 else if (esr & FPEXC_DZF)
410 si_code = FPE_FLTDIV;
411 else if (esr & FPEXC_OFF)
412 si_code = FPE_FLTOVF;
413 else if (esr & FPEXC_UFF)
414 si_code = FPE_FLTUND;
415 else if (esr & FPEXC_IXF)
416 si_code = FPE_FLTRES;
417
418 memset(&info, 0, sizeof(info));
419 info.si_signo = SIGFPE;
420 info.si_code = si_code;
421 info.si_addr = (void __user *)instruction_pointer(regs);
422
423 send_sig_info(SIGFPE, &info, current);
424}
425
426void fpsimd_thread_switch(struct task_struct *next)
427{
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000428 if (!system_supports_fpsimd())
429 return;
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200430 /*
431 * Save the current FPSIMD state to memory, but only if whatever is in
432 * the registers is in fact the most recent userland FPSIMD state of
433 * 'current'.
434 */
Dave Martinbc0ee472017-10-31 15:51:05 +0000435 if (current->mm)
436 task_fpsimd_save();
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200437
438 if (next->mm) {
439 /*
440 * If we are switching to a task whose most recent userland
441 * FPSIMD state is already in the registers of *this* cpu,
442 * we can skip loading the state from memory. Otherwise, set
443 * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
444 * upon the next return to userland.
445 */
446 struct fpsimd_state *st = &next->thread.fpsimd_state;
447
448 if (__this_cpu_read(fpsimd_last_state) == st
449 && st->cpu == smp_processor_id())
Dave Martin9cf5b542017-10-31 15:50:59 +0000450 clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200451 else
Dave Martin9cf5b542017-10-31 15:50:59 +0000452 set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200453 }
Catalin Marinas53631b52012-03-05 11:49:32 +0000454}
455
456void fpsimd_flush_thread(void)
457{
Dave Martinbc0ee472017-10-31 15:51:05 +0000458 int vl;
459
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000460 if (!system_supports_fpsimd())
461 return;
Dave Martincb84d112017-08-03 17:23:23 +0100462
463 local_bh_disable();
464
Catalin Marinas53631b52012-03-05 11:49:32 +0000465 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
Ard Biesheuvel674c242c2015-08-27 07:12:33 +0100466 fpsimd_flush_task_state(current);
Dave Martinbc0ee472017-10-31 15:51:05 +0000467
468 if (system_supports_sve()) {
469 clear_thread_flag(TIF_SVE);
470 sve_free(current);
471
472 /*
473 * Reset the task vector length as required.
474 * This is where we ensure that all user tasks have a valid
475 * vector length configured: no kernel task can become a user
476 * task without an exec and hence a call to this function.
477 * If a bug causes this to go wrong, we make some noise and
478 * try to fudge thread.sve_vl to a safe value here.
479 */
Dave Martin79ab0472017-10-31 15:51:06 +0000480 vl = current->thread.sve_vl_onexec ?
481 current->thread.sve_vl_onexec : sve_default_vl;
Dave Martinbc0ee472017-10-31 15:51:05 +0000482
483 if (WARN_ON(!sve_vl_valid(vl)))
484 vl = SVE_VL_MIN;
485
486 current->thread.sve_vl = vl;
Dave Martin79ab0472017-10-31 15:51:06 +0000487
488 /*
489 * If the task is not set to inherit, ensure that the vector
490 * length will be reset by a subsequent exec:
491 */
492 if (!test_thread_flag(TIF_SVE_VL_INHERIT))
493 current->thread.sve_vl_onexec = 0;
Dave Martinbc0ee472017-10-31 15:51:05 +0000494 }
495
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200496 set_thread_flag(TIF_FOREIGN_FPSTATE);
Dave Martincb84d112017-08-03 17:23:23 +0100497
498 local_bh_enable();
Catalin Marinas53631b52012-03-05 11:49:32 +0000499}
500
Ard Biesheuvelc51f9262014-02-24 15:26:27 +0100501/*
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200502 * Save the userland FPSIMD state of 'current' to memory, but only if the state
503 * currently held in the registers does in fact belong to 'current'
Dave Martinbc0ee472017-10-31 15:51:05 +0000504 *
505 * Currently, SVE tasks can't exist, so just WARN in that case.
506 * Subsequent patches will add full SVE support here.
Ard Biesheuvelc51f9262014-02-24 15:26:27 +0100507 */
508void fpsimd_preserve_current_state(void)
509{
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000510 if (!system_supports_fpsimd())
511 return;
Dave Martincb84d112017-08-03 17:23:23 +0100512
513 local_bh_disable();
514
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200515 if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
516 fpsimd_save_state(&current->thread.fpsimd_state);
Dave Martincb84d112017-08-03 17:23:23 +0100517
Dave Martinbc0ee472017-10-31 15:51:05 +0000518 WARN_ON_ONCE(test_and_clear_thread_flag(TIF_SVE));
519
Dave Martincb84d112017-08-03 17:23:23 +0100520 local_bh_enable();
Ard Biesheuvelc51f9262014-02-24 15:26:27 +0100521}
522
523/*
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200524 * Load the userland FPSIMD state of 'current' from memory, but only if the
525 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
526 * state of 'current'
527 */
528void fpsimd_restore_current_state(void)
529{
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000530 if (!system_supports_fpsimd())
531 return;
Dave Martincb84d112017-08-03 17:23:23 +0100532
533 local_bh_disable();
534
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200535 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
536 struct fpsimd_state *st = &current->thread.fpsimd_state;
537
Dave Martinbc0ee472017-10-31 15:51:05 +0000538 task_fpsimd_load();
Dave Martin50464182017-08-03 17:23:21 +0100539 __this_cpu_write(fpsimd_last_state, st);
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200540 st->cpu = smp_processor_id();
541 }
Dave Martincb84d112017-08-03 17:23:23 +0100542
543 local_bh_enable();
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200544}
545
546/*
547 * Load an updated userland FPSIMD state for 'current' from memory and set the
548 * flag that indicates that the FPSIMD register contents are the most recent
549 * FPSIMD state of 'current'
Ard Biesheuvelc51f9262014-02-24 15:26:27 +0100550 */
551void fpsimd_update_current_state(struct fpsimd_state *state)
552{
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000553 if (!system_supports_fpsimd())
554 return;
Dave Martincb84d112017-08-03 17:23:23 +0100555
556 local_bh_disable();
557
Ard Biesheuvelc51f9262014-02-24 15:26:27 +0100558 fpsimd_load_state(state);
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200559 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
560 struct fpsimd_state *st = &current->thread.fpsimd_state;
561
Dave Martin50464182017-08-03 17:23:21 +0100562 __this_cpu_write(fpsimd_last_state, st);
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200563 st->cpu = smp_processor_id();
564 }
Dave Martincb84d112017-08-03 17:23:23 +0100565
566 local_bh_enable();
Ard Biesheuvelc51f9262014-02-24 15:26:27 +0100567}
568
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200569/*
570 * Invalidate live CPU copies of task t's FPSIMD state
571 */
572void fpsimd_flush_task_state(struct task_struct *t)
573{
574 t->thread.fpsimd_state.cpu = NR_CPUS;
575}
576
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100577#ifdef CONFIG_KERNEL_MODE_NEON
578
Dave Martincb84d112017-08-03 17:23:23 +0100579DEFINE_PER_CPU(bool, kernel_neon_busy);
Catalin Marinas11cefd52017-08-07 12:36:35 +0100580EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
Ard Biesheuvel190f1ca2014-02-24 15:26:29 +0100581
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100582/*
583 * Kernel-side NEON support functions
584 */
Dave Martincb84d112017-08-03 17:23:23 +0100585
586/*
587 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
588 * context
589 *
590 * Must not be called unless may_use_simd() returns true.
591 * Task context in the FPSIMD registers is saved back to memory as necessary.
592 *
593 * A matching call to kernel_neon_end() must be made before returning from the
594 * calling context.
595 *
596 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
597 * called.
598 */
599void kernel_neon_begin(void)
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100600{
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000601 if (WARN_ON(!system_supports_fpsimd()))
602 return;
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100603
Dave Martincb84d112017-08-03 17:23:23 +0100604 BUG_ON(!may_use_simd());
605
606 local_bh_disable();
607
608 __this_cpu_write(kernel_neon_busy, true);
609
610 /* Save unsaved task fpsimd state, if any: */
611 if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
612 fpsimd_save_state(&current->thread.fpsimd_state);
613
614 /* Invalidate any task state remaining in the fpsimd regs: */
615 __this_cpu_write(fpsimd_last_state, NULL);
616
617 preempt_disable();
618
619 local_bh_enable();
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100620}
Dave Martincb84d112017-08-03 17:23:23 +0100621EXPORT_SYMBOL(kernel_neon_begin);
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100622
Dave Martincb84d112017-08-03 17:23:23 +0100623/*
624 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
625 *
626 * Must be called from a context in which kernel_neon_begin() was previously
627 * called, with no call to kernel_neon_end() in the meantime.
628 *
629 * The caller must not use the FPSIMD registers after this function is called,
630 * unless kernel_neon_begin() is called again in the meantime.
631 */
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100632void kernel_neon_end(void)
633{
Dave Martincb84d112017-08-03 17:23:23 +0100634 bool busy;
635
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000636 if (!system_supports_fpsimd())
637 return;
Dave Martincb84d112017-08-03 17:23:23 +0100638
639 busy = __this_cpu_xchg(kernel_neon_busy, false);
640 WARN_ON(!busy); /* No matching kernel_neon_begin()? */
641
642 preempt_enable();
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100643}
644EXPORT_SYMBOL(kernel_neon_end);
645
Dave Martine580b8b2017-09-18 09:40:12 +0100646#ifdef CONFIG_EFI
647
Dave Martin3b660232017-08-18 14:53:47 +0100648static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
649static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
Dave Martin4328825d2017-08-03 17:23:22 +0100650
651/*
652 * EFI runtime services support functions
653 *
654 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
655 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
656 * is always used rather than being an optional accelerator.
657 *
658 * These functions provide the necessary support for ensuring FPSIMD
659 * save/restore in the contexts from which EFI is used.
660 *
661 * Do not use them for any other purpose -- if tempted to do so, you are
662 * either doing something wrong or you need to propose some refactoring.
663 */
664
665/*
666 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
667 */
668void __efi_fpsimd_begin(void)
669{
670 if (!system_supports_fpsimd())
671 return;
672
673 WARN_ON(preemptible());
674
675 if (may_use_simd())
676 kernel_neon_begin();
677 else {
678 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
679 __this_cpu_write(efi_fpsimd_state_used, true);
680 }
681}
682
683/*
684 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
685 */
686void __efi_fpsimd_end(void)
687{
688 if (!system_supports_fpsimd())
689 return;
690
691 if (__this_cpu_xchg(efi_fpsimd_state_used, false))
692 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
693 else
694 kernel_neon_end();
695}
696
Dave Martine580b8b2017-09-18 09:40:12 +0100697#endif /* CONFIG_EFI */
698
Ard Biesheuvel4cfb3612013-07-09 14:18:12 +0100699#endif /* CONFIG_KERNEL_MODE_NEON */
700
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +0100701#ifdef CONFIG_CPU_PM
702static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
703 unsigned long cmd, void *v)
704{
705 switch (cmd) {
706 case CPU_PM_ENTER:
Dave Martinbc0ee472017-10-31 15:51:05 +0000707 if (current->mm)
708 task_fpsimd_save();
Leo Yan7c68a9c2014-09-01 11:09:51 +0800709 this_cpu_write(fpsimd_last_state, NULL);
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +0100710 break;
711 case CPU_PM_EXIT:
712 if (current->mm)
Ard Biesheuvel005f78c2014-05-08 11:20:23 +0200713 set_thread_flag(TIF_FOREIGN_FPSTATE);
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +0100714 break;
715 case CPU_PM_ENTER_FAILED:
716 default:
717 return NOTIFY_DONE;
718 }
719 return NOTIFY_OK;
720}
721
722static struct notifier_block fpsimd_cpu_pm_notifier_block = {
723 .notifier_call = fpsimd_cpu_pm_notifier,
724};
725
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800726static void __init fpsimd_pm_init(void)
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +0100727{
728 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
729}
730
731#else
732static inline void fpsimd_pm_init(void) { }
733#endif /* CONFIG_CPU_PM */
734
Janet Liu32365e62015-06-11 12:02:45 +0800735#ifdef CONFIG_HOTPLUG_CPU
Sebastian Andrzej Siewiorc23a7262016-09-06 19:04:37 +0200736static int fpsimd_cpu_dead(unsigned int cpu)
Janet Liu32365e62015-06-11 12:02:45 +0800737{
Sebastian Andrzej Siewiorc23a7262016-09-06 19:04:37 +0200738 per_cpu(fpsimd_last_state, cpu) = NULL;
739 return 0;
Janet Liu32365e62015-06-11 12:02:45 +0800740}
741
Janet Liu32365e62015-06-11 12:02:45 +0800742static inline void fpsimd_hotplug_init(void)
743{
Sebastian Andrzej Siewiorc23a7262016-09-06 19:04:37 +0200744 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
745 NULL, fpsimd_cpu_dead);
Janet Liu32365e62015-06-11 12:02:45 +0800746}
747
748#else
749static inline void fpsimd_hotplug_init(void) { }
750#endif
751
Catalin Marinas53631b52012-03-05 11:49:32 +0000752/*
753 * FP/SIMD support code initialisation.
754 */
755static int __init fpsimd_init(void)
756{
Suzuki K. Poulosefe80f9f2015-10-19 14:24:53 +0100757 if (elf_hwcap & HWCAP_FP) {
758 fpsimd_pm_init();
759 fpsimd_hotplug_init();
760 } else {
Catalin Marinas53631b52012-03-05 11:49:32 +0000761 pr_notice("Floating-point is not implemented\n");
Catalin Marinas53631b52012-03-05 11:49:32 +0000762 }
Catalin Marinas53631b52012-03-05 11:49:32 +0000763
Suzuki K. Poulosefe80f9f2015-10-19 14:24:53 +0100764 if (!(elf_hwcap & HWCAP_ASIMD))
Catalin Marinas53631b52012-03-05 11:49:32 +0000765 pr_notice("Advanced SIMD is not implemented\n");
Lorenzo Pieralisifb1ab1a2013-07-19 17:48:08 +0100766
Catalin Marinas53631b52012-03-05 11:49:32 +0000767 return 0;
768}
769late_initcall(fpsimd_init);