blob: 159622ee067488ed9baae2ba5627298b6844df32 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -04002#ifndef __ASM_QSPINLOCK_PARAVIRT_H
3#define __ASM_QSPINLOCK_PARAVIRT_H
4
Waiman Longd7804532015-11-09 19:09:24 -05005/*
6 * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
7 * registers. For i386, however, only 1 32-bit register needs to be saved
8 * and restored. So an optimized version of __pv_queued_spin_unlock() is
9 * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
10 */
11#ifdef CONFIG_64BIT
12
13PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
14#define __pv_queued_spin_unlock __pv_queued_spin_unlock
15#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
16#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
17
18/*
19 * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
20 * which combines the registers saving trunk and the body of the following
21 * C code:
22 *
23 * void __pv_queued_spin_unlock(struct qspinlock *lock)
24 * {
Will Deacon625e88b2018-04-26 11:34:16 +010025 * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
Waiman Longd7804532015-11-09 19:09:24 -050026 *
27 * if (likely(lockval == _Q_LOCKED_VAL))
28 * return;
29 * pv_queued_spin_unlock_slowpath(lock, lockval);
30 * }
31 *
32 * For x86-64,
33 * rdi = lock (first argument)
34 * rsi = lockval (second argument)
35 * rdx = internal variable (set to 0)
36 */
37asm (".pushsection .text;"
38 ".globl " PV_UNLOCK ";"
Josh Poimboeuf16df4ff2016-01-21 16:49:32 -060039 ".type " PV_UNLOCK ", @function;"
Waiman Longd7804532015-11-09 19:09:24 -050040 ".align 4,0x90;"
41 PV_UNLOCK ": "
Josh Poimboeuf16df4ff2016-01-21 16:49:32 -060042 FRAME_BEGIN
Waiman Longd7804532015-11-09 19:09:24 -050043 "push %rdx;"
44 "mov $0x1,%eax;"
45 "xor %edx,%edx;"
Waiman Longc0dc3732018-07-17 16:16:00 -040046 LOCK_PREFIX "cmpxchg %dl,(%rdi);"
Waiman Longd7804532015-11-09 19:09:24 -050047 "cmp $0x1,%al;"
48 "jne .slowpath;"
49 "pop %rdx;"
Josh Poimboeuf16df4ff2016-01-21 16:49:32 -060050 FRAME_END
Waiman Longd7804532015-11-09 19:09:24 -050051 "ret;"
52 ".slowpath: "
53 "push %rsi;"
54 "movzbl %al,%esi;"
55 "call " PV_UNLOCK_SLOWPATH ";"
56 "pop %rsi;"
57 "pop %rdx;"
Josh Poimboeuf16df4ff2016-01-21 16:49:32 -060058 FRAME_END
Waiman Longd7804532015-11-09 19:09:24 -050059 "ret;"
60 ".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
61 ".popsection");
62
63#else /* CONFIG_64BIT */
64
65extern void __pv_queued_spin_unlock(struct qspinlock *lock);
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -040066PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
67
Waiman Longd7804532015-11-09 19:09:24 -050068#endif /* CONFIG_64BIT */
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -040069#endif