blob: 281fa634bb1a80e023b4b6c2a68b5e225fd80868 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * atomic32.c: 32-bit atomic_t implementation
4 *
5 * Copyright (C) 2004 Keith M Wesolowski
Kyle McMartin6197fe42007-05-29 02:51:13 -07006 * Copyright (C) 2007 Kyle McMartin
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
9 */
10
Arun Sharma600634972011-07-26 16:09:06 -070011#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/spinlock.h>
13#include <linux/module.h>
14
15#ifdef CONFIG_SMP
16#define ATOMIC_HASH_SIZE 4
17#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
18
19spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
Thomas Gleixner24774fb2011-01-23 15:19:12 +010020 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021};
22
23#else /* SMP */
24
Ingo Molnara9f6a0d2005-09-09 13:10:41 -070025static DEFINE_SPINLOCK(dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#define ATOMIC_HASH_SIZE 1
27#define ATOMIC_HASH(a) (&dummy)
28
29#endif /* SMP */
30
Peter Zijlstra3a1adb22016-04-18 01:16:04 +020031#define ATOMIC_FETCH_OP(op, c_op) \
32int atomic_fetch_##op(int i, atomic_t *v) \
33{ \
34 int ret; \
35 unsigned long flags; \
36 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
37 \
38 ret = v->counter; \
39 v->counter c_op i; \
40 \
41 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
42 return ret; \
43} \
44EXPORT_SYMBOL(atomic_fetch_##op);
45
Peter Zijlstra304a0d62014-04-23 19:40:25 +020046#define ATOMIC_OP_RETURN(op, c_op) \
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010047int atomic_##op##_return(int i, atomic_t *v) \
48{ \
49 int ret; \
50 unsigned long flags; \
51 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
52 \
Peter Zijlstra304a0d62014-04-23 19:40:25 +020053 ret = (v->counter c_op i); \
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010054 \
55 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
56 return ret; \
57} \
58EXPORT_SYMBOL(atomic_##op##_return);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Peter Zijlstra304a0d62014-04-23 19:40:25 +020060ATOMIC_OP_RETURN(add, +=)
Peter Zijlstra304a0d62014-04-23 19:40:25 +020061
Peter Zijlstra3a1adb22016-04-18 01:16:04 +020062ATOMIC_FETCH_OP(add, +=)
63ATOMIC_FETCH_OP(and, &=)
64ATOMIC_FETCH_OP(or, |=)
65ATOMIC_FETCH_OP(xor, ^=)
66
67#undef ATOMIC_FETCH_OP
Peter Zijlstra304a0d62014-04-23 19:40:25 +020068#undef ATOMIC_OP_RETURN
Nick Piggin4a6dae62005-11-13 16:07:24 -080069
Andreas Larsson1a17fdc2014-11-05 15:52:08 +010070int atomic_xchg(atomic_t *v, int new)
71{
72 int ret;
73 unsigned long flags;
74
75 spin_lock_irqsave(ATOMIC_HASH(v), flags);
76 ret = v->counter;
77 v->counter = new;
78 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
79 return ret;
80}
81EXPORT_SYMBOL(atomic_xchg);
82
Nick Piggin4a6dae62005-11-13 16:07:24 -080083int atomic_cmpxchg(atomic_t *v, int old, int new)
84{
85 int ret;
86 unsigned long flags;
87
88 spin_lock_irqsave(ATOMIC_HASH(v), flags);
89 ret = v->counter;
90 if (likely(ret == old))
91 v->counter = new;
92
93 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
94 return ret;
95}
Robert Reif74e61de2007-03-26 19:10:43 -070096EXPORT_SYMBOL(atomic_cmpxchg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Mark Rutlandbfc18e32018-06-21 13:13:04 +010098int atomic_fetch_add_unless(atomic_t *v, int a, int u)
Nick Piggin8426e1f2005-11-13 16:07:25 -080099{
100 int ret;
101 unsigned long flags;
102
103 spin_lock_irqsave(ATOMIC_HASH(v), flags);
104 ret = v->counter;
105 if (ret != u)
106 v->counter += a;
107 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
Josip Rodina61b5822011-08-04 02:47:40 -0700108 return ret;
Nick Piggin8426e1f2005-11-13 16:07:25 -0800109}
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100110EXPORT_SYMBOL(atomic_fetch_add_unless);
Nick Piggin8426e1f2005-11-13 16:07:25 -0800111
Nick Piggin8426e1f2005-11-13 16:07:25 -0800112/* Atomic operations are already serializing */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113void atomic_set(atomic_t *v, int i)
114{
115 unsigned long flags;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 spin_lock_irqsave(ATOMIC_HASH(v), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 v->counter = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
120}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121EXPORT_SYMBOL(atomic_set);
David S. Miller8a8b8362006-12-17 16:18:47 -0800122
123unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
124{
125 unsigned long old, flags;
126
127 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
128 old = *addr;
129 *addr = old | mask;
130 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
131
132 return old & mask;
133}
134EXPORT_SYMBOL(___set_bit);
135
136unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
137{
138 unsigned long old, flags;
139
140 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
141 old = *addr;
142 *addr = old & ~mask;
143 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
144
145 return old & mask;
146}
147EXPORT_SYMBOL(___clear_bit);
148
149unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
150{
151 unsigned long old, flags;
152
153 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
154 old = *addr;
155 *addr = old ^ mask;
156 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
157
158 return old & mask;
159}
160EXPORT_SYMBOL(___change_bit);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700161
162unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
163{
164 unsigned long flags;
165 u32 prev;
166
Andrew Morton1fb88122007-05-31 01:19:24 -0700167 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700168 if ((prev = *ptr) == old)
169 *ptr = new;
Andrew Morton1fb88122007-05-31 01:19:24 -0700170 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700171
172 return (unsigned long)prev;
173}
174EXPORT_SYMBOL(__cmpxchg_u32);
Andreas Larsson1a17fdc2014-11-05 15:52:08 +0100175
David S. Miller23198dd2017-09-27 22:38:19 -0700176u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
177{
178 unsigned long flags;
179 u64 prev;
180
181 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
182 if ((prev = *ptr) == old)
183 *ptr = new;
184 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
185
186 return prev;
187}
188EXPORT_SYMBOL(__cmpxchg_u64);
189
Andreas Larsson1a17fdc2014-11-05 15:52:08 +0100190unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
191{
192 unsigned long flags;
193 u32 prev;
194
195 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
196 prev = *ptr;
197 *ptr = new;
198 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
199
200 return (unsigned long)prev;
201}
202EXPORT_SYMBOL(__xchg_u32);