blob: 00c319048d52d78e58755f7da2639aa044e0b885 [file] [log] [blame]
Richard Weinbergera3a85a72012-03-29 18:47:46 +02001#ifndef _ASM_UM_BARRIER_H_
2#define _ASM_UM_BARRIER_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01006#include <asm/cpufeatures.h>
Richard Weinbergera3a85a72012-03-29 18:47:46 +02007#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/*
14 * Force strict CPU ordering.
15 * And yes, this is required on UP too when we're talking
16 * to devices.
17 */
18#ifdef CONFIG_X86_32
19
20#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
21#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
22#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
23
24#else /* CONFIG_X86_32 */
25
26#define mb() asm volatile("mfence" : : : "memory")
27#define rmb() asm volatile("lfence" : : : "memory")
28#define wmb() asm volatile("sfence" : : : "memory")
29
30#endif /* CONFIG_X86_32 */
31
Alexander Duyck1077fa32014-12-11 15:02:06 -080032#ifdef CONFIG_X86_PPRO_FENCE
33#define dma_rmb() rmb()
34#else /* CONFIG_X86_PPRO_FENCE */
35#define dma_rmb() barrier()
36#endif /* CONFIG_X86_PPRO_FENCE */
37#define dma_wmb() barrier()
38
Michael S. Tsirkin577f1832015-12-21 09:22:18 +020039#include <asm-generic/barrier.h>
Alexander Duyck8a449712014-12-11 15:01:55 -080040
Richard Weinbergera3a85a72012-03-29 18:47:46 +020041#endif