Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 2 | #ifndef _ASM_X86_BARRIER_H |
| 3 | #define _ASM_X86_BARRIER_H |
| 4 | |
| 5 | #include <asm/alternative.h> |
| 6 | #include <asm/nops.h> |
| 7 | |
| 8 | /* |
| 9 | * Force strict CPU ordering. |
Michael S. Tsirkin | 57d9b1b | 2016-01-28 19:02:44 +0200 | [diff] [blame] | 10 | * And yes, this might be required on UP too when we're talking |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 11 | * to devices. |
| 12 | */ |
| 13 | |
| 14 | #ifdef CONFIG_X86_32 |
Michael S. Tsirkin | bd92247 | 2016-01-28 19:02:29 +0200 | [diff] [blame] | 15 | #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \ |
| 16 | X86_FEATURE_XMM2) ::: "memory", "cc") |
| 17 | #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \ |
| 18 | X86_FEATURE_XMM2) ::: "memory", "cc") |
| 19 | #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \ |
| 20 | X86_FEATURE_XMM2) ::: "memory", "cc") |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 21 | #else |
| 22 | #define mb() asm volatile("mfence":::"memory") |
| 23 | #define rmb() asm volatile("lfence":::"memory") |
| 24 | #define wmb() asm volatile("sfence" ::: "memory") |
| 25 | #endif |
| 26 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 27 | #ifdef CONFIG_X86_PPRO_FENCE |
| 28 | #define dma_rmb() rmb() |
| 29 | #else |
| 30 | #define dma_rmb() barrier() |
| 31 | #endif |
| 32 | #define dma_wmb() barrier() |
| 33 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 34 | #define __smp_mb() mb() |
| 35 | #define __smp_rmb() dma_rmb() |
| 36 | #define __smp_wmb() barrier() |
| 37 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 38 | |
Dave Jones | 09df7c4 | 2014-03-10 19:32:22 -0400 | [diff] [blame] | 39 | #if defined(CONFIG_X86_PPRO_FENCE) |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 40 | |
| 41 | /* |
Peter Zijlstra | 4f3aaf2 | 2014-06-11 11:01:45 +0200 | [diff] [blame] | 42 | * For this option x86 doesn't have a strong TSO memory |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 43 | * model and we should fall back to full barriers. |
| 44 | */ |
| 45 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 46 | #define __smp_store_release(p, v) \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 47 | do { \ |
| 48 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 49 | __smp_mb(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 50 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 51 | } while (0) |
| 52 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 53 | #define __smp_load_acquire(p) \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 54 | ({ \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 55 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 56 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 57 | __smp_mb(); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 58 | ___p1; \ |
| 59 | }) |
| 60 | |
| 61 | #else /* regular x86 TSO memory ordering */ |
| 62 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 63 | #define __smp_store_release(p, v) \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 64 | do { \ |
| 65 | compiletime_assert_atomic_type(*p); \ |
| 66 | barrier(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 67 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 68 | } while (0) |
| 69 | |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 70 | #define __smp_load_acquire(p) \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 71 | ({ \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 72 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 73 | compiletime_assert_atomic_type(*p); \ |
| 74 | barrier(); \ |
| 75 | ___p1; \ |
| 76 | }) |
| 77 | |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 78 | #endif |
| 79 | |
Peter Zijlstra | d00a569 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 80 | /* Atomic operations are already serializing on x86 */ |
Michael S. Tsirkin | 1638fb7 | 2015-12-27 15:04:42 +0200 | [diff] [blame] | 81 | #define __smp_mb__before_atomic() barrier() |
| 82 | #define __smp_mb__after_atomic() barrier() |
Peter Zijlstra | d00a569 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 83 | |
Michael S. Tsirkin | 300b06d | 2015-12-21 09:22:18 +0200 | [diff] [blame] | 84 | #include <asm-generic/barrier.h> |
| 85 | |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 86 | #endif /* _ASM_X86_BARRIER_H */ |