blob: 99c105d78b7e123eec41d9bb5b2c308e5205d514 [file] [log] [blame]
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -07001#ifndef ASM_X86_CMPXCHG_H
2#define ASM_X86_CMPXCHG_H
3
Jeremy Fitzhardinge61e2cd02011-08-29 14:47:58 -07004#include <linux/compiler.h>
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -07005#include <asm/alternative.h> /* Provides LOCK_PREFIX */
6
Borislav Petkovb08ee5f2014-07-11 12:43:38 +02007#define __HAVE_ARCH_CMPXCHG 1
8
Jeremy Fitzhardinge61e2cd02011-08-29 14:47:58 -07009/*
10 * Non-existant functions to indicate usage errors at link time
11 * (or compile-time if the compiler implements __compiletime_error().
12 */
13extern void __xchg_wrong_size(void)
14 __compiletime_error("Bad argument size for xchg");
15extern void __cmpxchg_wrong_size(void)
16 __compiletime_error("Bad argument size for cmpxchg");
17extern void __xadd_wrong_size(void)
18 __compiletime_error("Bad argument size for xadd");
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -070019extern void __add_wrong_size(void)
20 __compiletime_error("Bad argument size for add");
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070021
22/*
23 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
24 * -1 because sizeof will never return -1, thereby making those switch
25 * case statements guaranteeed dead code which the compiler will
26 * eliminate, and allowing the "missing symbol in the default case" to
27 * indicate a usage error.
28 */
29#define __X86_CASE_B 1
30#define __X86_CASE_W 2
31#define __X86_CASE_L 4
32#ifdef CONFIG_64BIT
33#define __X86_CASE_Q 8
34#else
35#define __X86_CASE_Q -1 /* sizeof will never return -1 */
36#endif
37
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070038/*
39 * An exchange-type operation, which takes a value and a pointer, and
Li Zhong7f5281a2013-04-25 15:20:54 +080040 * returns the old value.
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070041 */
42#define __xchg_op(ptr, arg, op, lock) \
43 ({ \
44 __typeof__ (*(ptr)) __ret = (arg); \
45 switch (sizeof(*(ptr))) { \
46 case __X86_CASE_B: \
47 asm volatile (lock #op "b %b0, %1\n" \
Jeremy Fitzhardinge2ca052a2012-04-02 16:15:33 -070048 : "+q" (__ret), "+m" (*(ptr)) \
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070049 : : "memory", "cc"); \
50 break; \
51 case __X86_CASE_W: \
52 asm volatile (lock #op "w %w0, %1\n" \
53 : "+r" (__ret), "+m" (*(ptr)) \
54 : : "memory", "cc"); \
55 break; \
56 case __X86_CASE_L: \
57 asm volatile (lock #op "l %0, %1\n" \
58 : "+r" (__ret), "+m" (*(ptr)) \
59 : : "memory", "cc"); \
60 break; \
61 case __X86_CASE_Q: \
62 asm volatile (lock #op "q %q0, %1\n" \
63 : "+r" (__ret), "+m" (*(ptr)) \
64 : : "memory", "cc"); \
65 break; \
66 default: \
67 __ ## op ## _wrong_size(); \
68 } \
69 __ret; \
70 })
71
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070072/*
73 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
74 * Since this is generally used to protect other memory information, we
75 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
76 * information around.
77 */
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070078#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070079
80/*
81 * Atomic compare and exchange. Compare OLD with MEM, if identical,
82 * store NEW in MEM. Return the initial value in MEM. Success is
83 * indicated by comparing RETURN with OLD.
84 */
85#define __raw_cmpxchg(ptr, old, new, size, lock) \
86({ \
87 __typeof__(*(ptr)) __ret; \
88 __typeof__(*(ptr)) __old = (old); \
89 __typeof__(*(ptr)) __new = (new); \
90 switch (size) { \
91 case __X86_CASE_B: \
92 { \
93 volatile u8 *__ptr = (volatile u8 *)(ptr); \
94 asm volatile(lock "cmpxchgb %2,%1" \
95 : "=a" (__ret), "+m" (*__ptr) \
96 : "q" (__new), "0" (__old) \
97 : "memory"); \
98 break; \
99 } \
100 case __X86_CASE_W: \
101 { \
102 volatile u16 *__ptr = (volatile u16 *)(ptr); \
103 asm volatile(lock "cmpxchgw %2,%1" \
104 : "=a" (__ret), "+m" (*__ptr) \
105 : "r" (__new), "0" (__old) \
106 : "memory"); \
107 break; \
108 } \
109 case __X86_CASE_L: \
110 { \
111 volatile u32 *__ptr = (volatile u32 *)(ptr); \
112 asm volatile(lock "cmpxchgl %2,%1" \
113 : "=a" (__ret), "+m" (*__ptr) \
114 : "r" (__new), "0" (__old) \
115 : "memory"); \
116 break; \
117 } \
118 case __X86_CASE_Q: \
119 { \
120 volatile u64 *__ptr = (volatile u64 *)(ptr); \
121 asm volatile(lock "cmpxchgq %2,%1" \
122 : "=a" (__ret), "+m" (*__ptr) \
123 : "r" (__new), "0" (__old) \
124 : "memory"); \
125 break; \
126 } \
127 default: \
128 __cmpxchg_wrong_size(); \
129 } \
130 __ret; \
131})
132
133#define __cmpxchg(ptr, old, new, size) \
134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
135
136#define __sync_cmpxchg(ptr, old, new, size) \
137 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
138
139#define __cmpxchg_local(ptr, old, new, size) \
140 __raw_cmpxchg((ptr), (old), (new), (size), "")
141
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200142#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100143# include <asm/cmpxchg_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200144#else
David Howellsa1ce3922012-10-02 18:01:25 +0100145# include <asm/cmpxchg_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200146#endif
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700147
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700148#define cmpxchg(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000149 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700150
151#define sync_cmpxchg(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000152 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700153
154#define cmpxchg_local(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000155 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700156
Jeremy Fitzhardinge433b35202011-06-21 12:00:55 -0700157/*
158 * xadd() adds "inc" to "*ptr" and atomically returns the previous
159 * value of "*ptr".
160 *
161 * xadd() is locked when multiple CPUs are online
162 * xadd_sync() is always locked
163 * xadd_local() is never locked
164 */
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -0700165#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
Jeremy Fitzhardinge433b35202011-06-21 12:00:55 -0700166#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
167#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
168#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
169
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -0700170#define __add(ptr, inc, lock) \
171 ({ \
172 __typeof__ (*(ptr)) __ret = (inc); \
173 switch (sizeof(*(ptr))) { \
174 case __X86_CASE_B: \
175 asm volatile (lock "addb %b1, %0\n" \
H. Peter Anvin8c91c532012-04-06 09:30:57 -0700176 : "+m" (*(ptr)) : "qi" (inc) \
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -0700177 : "memory", "cc"); \
178 break; \
179 case __X86_CASE_W: \
180 asm volatile (lock "addw %w1, %0\n" \
181 : "+m" (*(ptr)) : "ri" (inc) \
182 : "memory", "cc"); \
183 break; \
184 case __X86_CASE_L: \
185 asm volatile (lock "addl %1, %0\n" \
186 : "+m" (*(ptr)) : "ri" (inc) \
187 : "memory", "cc"); \
188 break; \
189 case __X86_CASE_Q: \
190 asm volatile (lock "addq %1, %0\n" \
191 : "+m" (*(ptr)) : "ri" (inc) \
192 : "memory", "cc"); \
193 break; \
194 default: \
195 __add_wrong_size(); \
196 } \
197 __ret; \
198 })
199
200/*
201 * add_*() adds "inc" to "*ptr"
202 *
203 * __add() takes a lock prefix
204 * add_smp() is locked when multiple CPUs are online
205 * add_sync() is always locked
206 */
207#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
208#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
209
Jan Beulichcdcd6292012-01-02 17:02:18 +0000210#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
211({ \
212 bool __ret; \
213 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
214 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
215 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
216 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
217 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
218 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
219 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
220 : "=a" (__ret), "+d" (__old2), \
221 "+m" (*(p1)), "+m" (*(p2)) \
222 : "i" (2 * sizeof(long)), "a" (__old1), \
223 "b" (__new1), "c" (__new2)); \
224 __ret; \
225})
226
227#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
228 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
229
230#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
231 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
232
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700233#endif /* ASM_X86_CMPXCHG_H */