Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Richard Weinberger | a3a85a7 | 2012-03-29 18:47:46 +0200 | [diff] [blame] | 2 | #ifndef _ASM_UM_BARRIER_H_ |
| 3 | #define _ASM_UM_BARRIER_H_ |
| 4 | |
Al Viro | 2098e21 | 2021-09-20 21:32:51 +0000 | [diff] [blame] | 5 | #include <asm/cpufeatures.h> |
Johannes Berg | a30cc14 | 2019-09-11 14:51:18 +0200 | [diff] [blame] | 6 | #include <asm/alternative.h> |
Richard Weinberger | a3a85a7 | 2012-03-29 18:47:46 +0200 | [diff] [blame] | 7 | |
| 8 | /* |
| 9 | * Force strict CPU ordering. |
| 10 | * And yes, this is required on UP too when we're talking |
| 11 | * to devices. |
| 12 | */ |
| 13 | #ifdef CONFIG_X86_32 |
| 14 | |
| 15 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
| 16 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
| 17 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
| 18 | |
| 19 | #else /* CONFIG_X86_32 */ |
| 20 | |
| 21 | #define mb() asm volatile("mfence" : : : "memory") |
| 22 | #define rmb() asm volatile("lfence" : : : "memory") |
| 23 | #define wmb() asm volatile("sfence" : : : "memory") |
| 24 | |
| 25 | #endif /* CONFIG_X86_32 */ |
| 26 | |
Michael S. Tsirkin | 577f183 | 2015-12-21 09:22:18 +0200 | [diff] [blame] | 27 | #include <asm-generic/barrier.h> |
Alexander Duyck | 8a44971 | 2014-12-11 15:01:55 -0800 | [diff] [blame] | 28 | |
Richard Weinberger | a3a85a7 | 2012-03-29 18:47:46 +0200 | [diff] [blame] | 29 | #endif |