s390/kernel: remove save_fpu_regs() parameter and use __LC_CURRENT instead
All calls to save_fpu_regs() specify the fpu structure of the current task
pointer as parameter. The task pointer of the current task can also be
retrieved from the CPU lowcore directly. Remove the parameter definition,
load the __LC_CURRENT task pointer from the CPU lowcore, and rebase the FPU
structure onto the task structure. Apply the same approach for the
load_fpu_regs() function.
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h
index 237f8fc..55dc2c0 100644
--- a/arch/s390/include/asm/fpu-internal.h
+++ b/arch/s390/include/asm/fpu-internal.h
@@ -28,7 +28,7 @@
};
};
-void save_fpu_regs(struct fpu *fpu);
+void save_fpu_regs(void);
#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 0a4a315..dcadfde 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -30,7 +30,7 @@
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
- save_fpu_regs(&prev->thread.fpu); \
+ save_fpu_regs(); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 6bc42c0..48c9af7 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -28,16 +28,14 @@
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK();
DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
- DEFINE(__THREAD_fpu, offsetof(struct task_struct, thread.fpu));
+ DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
+ DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
+ DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
BLANK();
- DEFINE(__FPU_fpc, offsetof(struct fpu, fpc));
- DEFINE(__FPU_flags, offsetof(struct fpu, flags));
- DEFINE(__FPU_regs, offsetof(struct fpu, regs));
- BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 0b46fd4..eb46642 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -154,7 +154,7 @@
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
}
/* Load registers after signal return */
@@ -286,7 +286,7 @@
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
set_current_blocked(&set);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->sregs_ext))
@@ -309,7 +309,7 @@
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 21c1219..5a966de 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -183,7 +183,6 @@
xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
- lg %r12,__LC_THREAD_INFO # load fp/vx regs save area
brasl %r14,load_fpu_regs # load guest fp/vx regs
.Lsie_load_guest_gprs:
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
@@ -752,14 +751,16 @@
* of the register contents at system call or io return.
*/
ENTRY(save_fpu_regs)
+ lg %r2,__LC_CURRENT
+ aghi %r2,__TASK_thread
tm __LC_CPU_FLAGS+7,_CIF_FPU
bor %r14
- stfpc __FPU_fpc(%r2)
+ stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
- lg %r3,__FPU_regs(%r2)
+ lg %r3,__THREAD_FPU_regs(%r2)
ltgr %r3,%r3
jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU
- tm __FPU_flags+3(%r2),FPU_USE_VX
+ tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -794,20 +795,19 @@
* FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
*
* There are special calling conventions to fit into sysc and io return work:
- * %r12: __LC_THREAD_INFO
* %r15: <kernel stack>
* The function requires:
* %r4 and __SF_EMPTY+32(%r15)
*/
load_fpu_regs:
+ lg %r4,__LC_CURRENT
+ aghi %r4,__TASK_thread
tm __LC_CPU_FLAGS+7,_CIF_FPU
bnor %r14
- lg %r4,__TI_task(%r12)
- la %r4,__THREAD_fpu(%r4)
- lfpc __FPU_fpc(%r4)
+ lfpc __THREAD_FPU_fpc(%r4)
stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
- tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
- lg %r4,__FPU_regs(%r4) # %r4 <- reg save area
+ tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
+ lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs
.Lload_fpu_regs_vx_ctl:
tm __SF_EMPTY+32+5(%r15),2 # test VX control
@@ -1190,13 +1190,14 @@
jhe 2f
clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
jhe 1f
+ lg %r2,__LC_CURRENT
0: # Store floating-point controls
- stfpc __FPU_fpc(%r2)
+ stfpc __THREAD_FPU_fpc(%r2)
1: # Load register save area and check if VX is active
- lg %r3,__FPU_regs(%r2)
+ lg %r3,__THREAD_FPU_regs(%r2)
ltgr %r3,%r3
jz 5f # no save area -> set CIF_FPU
- tm __FPU_flags+3(%r2),FPU_USE_VX
+ tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
jz 4f # no VX -> store FP regs
2: # Store vector registers (V0-V15)
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -1250,11 +1251,10 @@
jhe 5f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
jhe 6f
- lg %r4,__TI_task(%r12)
- la %r4,__THREAD_fpu(%r4)
- lfpc __FPU_fpc(%r4)
- tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
- lg %r4,__FPU_regs(%r4) # %r4 <- reg save area
+ lg %r4,__LC_CURRENT
+ lfpc __THREAD_FPU_fpc(%r4)
+ tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
+ lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz 3f # -> no VX, load FP regs
6: # Set VX-enablement control
stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 9cf0063..f2dac9f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -104,7 +104,7 @@
* The CIF_FPU flag is set in any case to lazy clear or restore a saved
* state when switching to a different task or returning to user space.
*/
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
dst->thread.fpu.fpc = current->thread.fpu.fpc;
if (is_vx_task(current))
convert_vx_to_fp(dst->thread.fpu.fprs,
@@ -196,7 +196,7 @@
*/
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
fpregs->fpc = current->thread.fpu.fpc;
fpregs->pad = 0;
if (is_vx_task(current))
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8c52588..8b1c8e3 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -943,7 +943,7 @@
_s390_fp_regs fp_regs;
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
fp_regs.fpc = target->thread.fpu.fpc;
fpregs_store(&fp_regs, &target->thread.fpu);
@@ -961,7 +961,7 @@
freg_t fprs[__NUM_FPRS];
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
@@ -1049,7 +1049,7 @@
return -ENODEV;
if (is_vx_task(target)) {
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
} else
@@ -1072,7 +1072,7 @@
if (rc)
return rc;
} else if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
@@ -1093,7 +1093,7 @@
return -ENODEV;
if (is_vx_task(target)) {
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(vxrs));
} else
@@ -1115,7 +1115,7 @@
if (rc)
return rc;
} else if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 2f4c7e2..9549af1 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -105,7 +105,7 @@
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
}
/* Load registers after signal return */
@@ -222,7 +222,7 @@
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe;
set_current_blocked(&set);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext))
@@ -246,7 +246,7 @@
set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 76f7693..9861613 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -236,7 +236,7 @@
return -ENOMEM;
preempt_disable();
if (tsk == current)
- save_fpu_regs(&tsk->thread.fpu);
+ save_fpu_regs();
/* Copy the 16 floating point registers */
convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
fprs = tsk->thread.fpu.fprs;
@@ -257,7 +257,7 @@
}
/* get vector interrupt code from fpc */
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
vic = (current->thread.fpu.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
@@ -295,7 +295,7 @@
location = get_trap_ip(regs);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !is_vx_task(current) &&
(current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c0cceaf..1903f02 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1224,7 +1224,7 @@
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Save host register state */
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
save_fpu_to(&vcpu->arch.host_fpregs);
if (test_kvm_facility(vcpu->kvm, 129)) {
@@ -1256,7 +1256,7 @@
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129))
/*
@@ -1671,7 +1671,7 @@
return -EINVAL;
memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
vcpu->arch.guest_fpregs.fpc = fpu->fpc;
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
load_fpu_from(&vcpu->arch.guest_fpregs);
return 0;
}
@@ -2241,7 +2241,7 @@
* copying in vcpu load/put. Lets update our copies before we save
* it into the save area
*/
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129)) {
/*
* If the vector extension is available, the vector registers
@@ -2288,7 +2288,7 @@
*
* Let's update our copies before we save it into the save area.
*/
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}