blob: 01727dbc294a3234c8c1be0035c01200c9504034 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
David Howellsf05e7982012-03-28 18:11:12 +01002#ifndef _ASM_X86_BARRIER_H
3#define _ASM_X86_BARRIER_H
4
5#include <asm/alternative.h>
6#include <asm/nops.h>
7
8/*
9 * Force strict CPU ordering.
Michael S. Tsirkin57d9b1b2016-01-28 19:02:44 +020010 * And yes, this might be required on UP too when we're talking
David Howellsf05e7982012-03-28 18:11:12 +010011 * to devices.
12 */
13
14#ifdef CONFIG_X86_32
Michael S. Tsirkinbd922472016-01-28 19:02:29 +020015#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
16 X86_FEATURE_XMM2) ::: "memory", "cc")
17#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
18 X86_FEATURE_XMM2) ::: "memory", "cc")
19#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
20 X86_FEATURE_XMM2) ::: "memory", "cc")
David Howellsf05e7982012-03-28 18:11:12 +010021#else
22#define mb() asm volatile("mfence":::"memory")
23#define rmb() asm volatile("lfence":::"memory")
24#define wmb() asm volatile("sfence" ::: "memory")
25#endif
26
Alexander Duyck1077fa32014-12-11 15:02:06 -080027#ifdef CONFIG_X86_PPRO_FENCE
28#define dma_rmb() rmb()
29#else
30#define dma_rmb() barrier()
31#endif
32#define dma_wmb() barrier()
33
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020034#define __smp_mb() mb()
35#define __smp_rmb() dma_rmb()
36#define __smp_wmb() barrier()
37#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010038
Dave Jones09df7c42014-03-10 19:32:22 -040039#if defined(CONFIG_X86_PPRO_FENCE)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010040
41/*
Peter Zijlstra4f3aaf22014-06-11 11:01:45 +020042 * For this option x86 doesn't have a strong TSO memory
Peter Zijlstra47933ad2013-11-06 14:57:36 +010043 * model and we should fall back to full barriers.
44 */
45
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020046#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010047do { \
48 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020049 __smp_mb(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020050 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010051} while (0)
52
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020053#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010054({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020055 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010056 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020057 __smp_mb(); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010058 ___p1; \
59})
60
61#else /* regular x86 TSO memory ordering */
62
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020063#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010064do { \
65 compiletime_assert_atomic_type(*p); \
66 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020067 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010068} while (0)
69
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020070#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010071({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020072 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010073 compiletime_assert_atomic_type(*p); \
74 barrier(); \
75 ___p1; \
76})
77
David Howellsf05e7982012-03-28 18:11:12 +010078#endif
79
Peter Zijlstrad00a5692014-03-13 19:00:35 +010080/* Atomic operations are already serializing on x86 */
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020081#define __smp_mb__before_atomic() barrier()
82#define __smp_mb__after_atomic() barrier()
Peter Zijlstrad00a5692014-03-13 19:00:35 +010083
Michael S. Tsirkin300b06d2015-12-21 09:22:18 +020084#include <asm-generic/barrier.h>
85
David Howellsf05e7982012-03-28 18:11:12 +010086#endif /* _ASM_X86_BARRIER_H */