Richard Weinberger | a3a85a7 | 2012-03-29 18:47:46 +0200 | [diff] [blame] | 1 | #ifndef _ASM_UM_BARRIER_H_ |
| 2 | #define _ASM_UM_BARRIER_H_ |
| 3 | |
| 4 | #include <asm/asm.h> |
| 5 | #include <asm/segment.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 6 | #include <asm/cpufeatures.h> |
Richard Weinberger | a3a85a7 | 2012-03-29 18:47:46 +0200 | [diff] [blame] | 7 | #include <asm/cmpxchg.h> |
| 8 | #include <asm/nops.h> |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/irqflags.h> |
| 12 | |
| 13 | /* |
| 14 | * Force strict CPU ordering. |
| 15 | * And yes, this is required on UP too when we're talking |
| 16 | * to devices. |
| 17 | */ |
| 18 | #ifdef CONFIG_X86_32 |
| 19 | |
| 20 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
| 21 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
| 22 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
| 23 | |
| 24 | #else /* CONFIG_X86_32 */ |
| 25 | |
| 26 | #define mb() asm volatile("mfence" : : : "memory") |
| 27 | #define rmb() asm volatile("lfence" : : : "memory") |
| 28 | #define wmb() asm volatile("sfence" : : : "memory") |
| 29 | |
| 30 | #endif /* CONFIG_X86_32 */ |
| 31 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 32 | #ifdef CONFIG_X86_PPRO_FENCE |
| 33 | #define dma_rmb() rmb() |
| 34 | #else /* CONFIG_X86_PPRO_FENCE */ |
| 35 | #define dma_rmb() barrier() |
| 36 | #endif /* CONFIG_X86_PPRO_FENCE */ |
| 37 | #define dma_wmb() barrier() |
| 38 | |
Michael S. Tsirkin | 577f183 | 2015-12-21 09:22:18 +0200 | [diff] [blame] | 39 | #include <asm-generic/barrier.h> |
Alexander Duyck | 8a44971 | 2014-12-11 15:01:55 -0800 | [diff] [blame] | 40 | |
Richard Weinberger | a3a85a7 | 2012-03-29 18:47:46 +0200 | [diff] [blame] | 41 | #endif |