Waiman Long | d73a339 | 2015-04-24 14:56:31 -0400 | [diff] [blame] | 1 | #ifndef _ASM_X86_QSPINLOCK_H |
| 2 | #define _ASM_X86_QSPINLOCK_H |
| 3 | |
Peter Zijlstra (Intel) | 2aa79af | 2015-04-24 14:56:36 -0400 | [diff] [blame^] | 4 | #include <asm/cpufeature.h> |
Waiman Long | d73a339 | 2015-04-24 14:56:31 -0400 | [diff] [blame] | 5 | #include <asm-generic/qspinlock_types.h> |
| 6 | |
| 7 | #define queued_spin_unlock queued_spin_unlock |
| 8 | /** |
| 9 | * queued_spin_unlock - release a queued spinlock |
| 10 | * @lock : Pointer to queued spinlock structure |
| 11 | * |
| 12 | * A smp_store_release() on the least-significant byte. |
| 13 | */ |
| 14 | static inline void queued_spin_unlock(struct qspinlock *lock) |
| 15 | { |
| 16 | smp_store_release((u8 *)lock, 0); |
| 17 | } |
| 18 | |
Peter Zijlstra (Intel) | 2aa79af | 2015-04-24 14:56:36 -0400 | [diff] [blame^] | 19 | #define virt_queued_spin_lock virt_queued_spin_lock |
| 20 | |
| 21 | static inline bool virt_queued_spin_lock(struct qspinlock *lock) |
| 22 | { |
| 23 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) |
| 24 | return false; |
| 25 | |
| 26 | while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) |
| 27 | cpu_relax(); |
| 28 | |
| 29 | return true; |
| 30 | } |
| 31 | |
Waiman Long | d73a339 | 2015-04-24 14:56:31 -0400 | [diff] [blame] | 32 | #include <asm-generic/qspinlock.h> |
| 33 | |
| 34 | #endif /* _ASM_X86_QSPINLOCK_H */ |