Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Maciej W. Rozycki | edf7b93 | 2013-11-01 23:47:05 +0000 | [diff] [blame] | 2 | * Atomic operations that C can't guarantee us. Useful for |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * resource counting etc.. |
| 4 | * |
| 5 | * But use these as seldom as possible since they are much more slower |
| 6 | * than regular operations. |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | * |
Ralf Baechle | e303e08 | 2006-11-30 01:14:50 +0000 | [diff] [blame] | 12 | * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #ifndef _ASM_ATOMIC_H |
| 15 | #define _ASM_ATOMIC_H |
| 16 | |
Ralf Baechle | 192ef36 | 2006-07-07 14:07:18 +0100 | [diff] [blame] | 17 | #include <linux/irqflags.h> |
Matthew Wilcox | ea435467 | 2009-01-06 14:40:39 -0800 | [diff] [blame] | 18 | #include <linux/types.h> |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 19 | #include <asm/barrier.h> |
Maciej W. Rozycki | b0984c4 | 2014-11-15 22:08:48 +0000 | [diff] [blame] | 20 | #include <asm/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/cpu-features.h> |
David Howells | b81947c | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 22 | #include <asm/cmpxchg.h> |
Paul Burton | 878f75c | 2019-10-01 21:53:05 +0000 | [diff] [blame] | 23 | #include <asm/llsc.h> |
Paul Burton | 4d1dbfe | 2019-10-01 21:53:20 +0000 | [diff] [blame] | 24 | #include <asm/sync.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/war.h> |
| 26 | |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 27 | #define ATOMIC_OPS(pfx, type) \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 28 | static __always_inline type arch_##pfx##_read(const pfx##_t *v) \ |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 29 | { \ |
| 30 | return READ_ONCE(v->counter); \ |
| 31 | } \ |
| 32 | \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 33 | static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \ |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 34 | { \ |
| 35 | WRITE_ONCE(v->counter, i); \ |
| 36 | } \ |
| 37 | \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 38 | static __always_inline type \ |
| 39 | arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \ |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 40 | { \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 41 | return arch_cmpxchg(&v->counter, o, n); \ |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 42 | } \ |
| 43 | \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 44 | static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \ |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 45 | { \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 46 | return arch_xchg(&v->counter, n); \ |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 47 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 49 | ATOMIC_OPS(atomic, int) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 51 | #ifdef CONFIG_64BIT |
| 52 | # define ATOMIC64_INIT(i) { (i) } |
| 53 | ATOMIC_OPS(atomic64, s64) |
| 54 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 56 | #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 57 | static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 58 | { \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 59 | type temp; \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 60 | \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 61 | if (!kernel_uses_llsc) { \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 62 | unsigned long flags; \ |
| 63 | \ |
| 64 | raw_local_irq_save(flags); \ |
| 65 | v->counter c_op i; \ |
| 66 | raw_local_irq_restore(flags); \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 67 | return; \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 68 | } \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 69 | \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 70 | __asm__ __volatile__( \ |
| 71 | " .set push \n" \ |
| 72 | " .set " MIPS_ISA_LEVEL " \n" \ |
Paul Burton | 4d1dbfe | 2019-10-01 21:53:20 +0000 | [diff] [blame] | 73 | " " __SYNC(full, loongson3_war) " \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 74 | "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 75 | " " #asm_op " %0, %2 \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 76 | " " #sc " %0, %1 \n" \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 77 | "\t" __SC_BEQZ "%0, 1b \n" \ |
| 78 | " .set pop \n" \ |
| 79 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 80 | : "Ir" (i) : __LLSC_CLOBBER); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | } |
| 82 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 83 | #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 84 | static __inline__ type \ |
| 85 | arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 86 | { \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 87 | type temp, result; \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 88 | \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 89 | if (!kernel_uses_llsc) { \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 90 | unsigned long flags; \ |
| 91 | \ |
| 92 | raw_local_irq_save(flags); \ |
| 93 | result = v->counter; \ |
| 94 | result c_op i; \ |
| 95 | v->counter = result; \ |
| 96 | raw_local_irq_restore(flags); \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 97 | return result; \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 98 | } \ |
| 99 | \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 100 | __asm__ __volatile__( \ |
| 101 | " .set push \n" \ |
| 102 | " .set " MIPS_ISA_LEVEL " \n" \ |
Paul Burton | 4d1dbfe | 2019-10-01 21:53:20 +0000 | [diff] [blame] | 103 | " " __SYNC(full, loongson3_war) " \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 104 | "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 105 | " " #asm_op " %0, %1, %3 \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 106 | " " #sc " %0, %2 \n" \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 107 | "\t" __SC_BEQZ "%0, 1b \n" \ |
| 108 | " " #asm_op " %0, %1, %3 \n" \ |
| 109 | " .set pop \n" \ |
| 110 | : "=&r" (result), "=&r" (temp), \ |
| 111 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 112 | : "Ir" (i) : __LLSC_CLOBBER); \ |
| 113 | \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 114 | return result; \ |
Maciej W. Rozycki | ddb3108 | 2014-11-15 22:09:54 +0000 | [diff] [blame] | 115 | } |
| 116 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 117 | #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 118 | static __inline__ type \ |
| 119 | arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 120 | { \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 121 | int temp, result; \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 122 | \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 123 | if (!kernel_uses_llsc) { \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 124 | unsigned long flags; \ |
| 125 | \ |
| 126 | raw_local_irq_save(flags); \ |
| 127 | result = v->counter; \ |
| 128 | v->counter c_op i; \ |
| 129 | raw_local_irq_restore(flags); \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 130 | return result; \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 131 | } \ |
| 132 | \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 133 | __asm__ __volatile__( \ |
| 134 | " .set push \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 135 | " .set " MIPS_ISA_LEVEL " \n" \ |
Paul Burton | 4d1dbfe | 2019-10-01 21:53:20 +0000 | [diff] [blame] | 136 | " " __SYNC(full, loongson3_war) " \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 137 | "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 138 | " " #asm_op " %0, %1, %3 \n" \ |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 139 | " " #sc " %0, %2 \n" \ |
Paul Burton | 9537db2 | 2019-10-01 21:53:16 +0000 | [diff] [blame] | 140 | "\t" __SC_BEQZ "%0, 1b \n" \ |
| 141 | " .set pop \n" \ |
| 142 | " move %0, %1 \n" \ |
| 143 | : "=&r" (result), "=&r" (temp), \ |
| 144 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 145 | : "Ir" (i) : __LLSC_CLOBBER); \ |
| 146 | \ |
Paul Burton | 36d3295 | 2019-10-01 21:53:15 +0000 | [diff] [blame] | 147 | return result; \ |
Peter Zijlstra | 4edac52 | 2016-04-18 01:16:06 +0200 | [diff] [blame] | 148 | } |
| 149 | |
Paul Burton | 1da7bce | 2019-10-01 21:53:24 +0000 | [diff] [blame] | 150 | #undef ATOMIC_OPS |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 151 | #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 152 | ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 153 | ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 154 | ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 156 | ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc) |
| 157 | ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 159 | #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed |
| 160 | #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed |
| 161 | #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed |
| 162 | #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed |
Peter Zijlstra | 4ec4585 | 2016-04-18 01:15:25 +0200 | [diff] [blame] | 163 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 164 | #ifdef CONFIG_64BIT |
| 165 | ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd) |
| 166 | ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd) |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 167 | # define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed |
| 168 | # define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed |
| 169 | # define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed |
| 170 | # define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 171 | #endif /* CONFIG_64BIT */ |
Peter Zijlstra | 4edac52 | 2016-04-18 01:16:06 +0200 | [diff] [blame] | 172 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 173 | #undef ATOMIC_OPS |
| 174 | #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 175 | ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 176 | ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) |
| 177 | |
| 178 | ATOMIC_OPS(atomic, and, int, &=, and, ll, sc) |
| 179 | ATOMIC_OPS(atomic, or, int, |=, or, ll, sc) |
| 180 | ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc) |
Peter Zijlstra | 27782f2 | 2014-04-23 19:51:36 +0200 | [diff] [blame] | 181 | |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 182 | #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed |
| 183 | #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed |
| 184 | #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed |
Peter Zijlstra | 4ec4585 | 2016-04-18 01:15:25 +0200 | [diff] [blame] | 185 | |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 186 | #ifdef CONFIG_64BIT |
| 187 | ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd) |
| 188 | ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd) |
| 189 | ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 190 | # define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed |
| 191 | # define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed |
| 192 | # define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed |
Paul Burton | a38ee6b | 2019-10-01 21:53:18 +0000 | [diff] [blame] | 193 | #endif |
| 194 | |
Peter Zijlstra | ef31563 | 2014-03-26 17:56:43 +0100 | [diff] [blame] | 195 | #undef ATOMIC_OPS |
Peter Zijlstra | 4edac52 | 2016-04-18 01:16:06 +0200 | [diff] [blame] | 196 | #undef ATOMIC_FETCH_OP |
Peter Zijlstra | ef31563 | 2014-03-26 17:56:43 +0100 | [diff] [blame] | 197 | #undef ATOMIC_OP_RETURN |
| 198 | #undef ATOMIC_OP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
| 200 | /* |
Arnaud Giersch | f10d14d | 2005-11-13 00:38:18 +0100 | [diff] [blame] | 201 | * atomic_sub_if_positive - conditionally subtract integer from atomic variable |
| 202 | * @i: integer value to subtract |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | * @v: pointer of type atomic_t |
| 204 | * |
Arnaud Giersch | f10d14d | 2005-11-13 00:38:18 +0100 | [diff] [blame] | 205 | * Atomically test @v and subtract @i if @v is greater or equal than @i. |
| 206 | * The function returns the old value of @v minus @i. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | */ |
Paul Burton | 40e784b | 2019-10-01 21:53:23 +0000 | [diff] [blame] | 208 | #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \ |
Rui Wang | cb95ea7 | 2021-07-29 17:31:52 +0800 | [diff] [blame] | 209 | static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \ |
Paul Burton | 40e784b | 2019-10-01 21:53:23 +0000 | [diff] [blame] | 210 | { \ |
| 211 | type temp, result; \ |
| 212 | \ |
| 213 | smp_mb__before_atomic(); \ |
| 214 | \ |
| 215 | if (!kernel_uses_llsc) { \ |
| 216 | unsigned long flags; \ |
| 217 | \ |
| 218 | raw_local_irq_save(flags); \ |
| 219 | result = v->counter; \ |
| 220 | result -= i; \ |
| 221 | if (result >= 0) \ |
| 222 | v->counter = result; \ |
| 223 | raw_local_irq_restore(flags); \ |
| 224 | smp_mb__after_atomic(); \ |
| 225 | return result; \ |
| 226 | } \ |
| 227 | \ |
| 228 | __asm__ __volatile__( \ |
| 229 | " .set push \n" \ |
| 230 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 231 | " " __SYNC(full, loongson3_war) " \n" \ |
| 232 | "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \ |
| 233 | " .set pop \n" \ |
| 234 | " " #op " %0, %1, %3 \n" \ |
| 235 | " move %1, %0 \n" \ |
| 236 | " bltz %0, 2f \n" \ |
| 237 | " .set push \n" \ |
| 238 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 239 | " " #sc " %1, %2 \n" \ |
| 240 | " " __SC_BEQZ "%1, 1b \n" \ |
| 241 | "2: " __SYNC(full, loongson3_war) " \n" \ |
| 242 | " .set pop \n" \ |
| 243 | : "=&r" (result), "=&r" (temp), \ |
| 244 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 245 | : "Ir" (i) \ |
| 246 | : __LLSC_CLOBBER); \ |
| 247 | \ |
| 248 | /* \ |
| 249 | * In the Loongson3 workaround case we already have a \ |
| 250 | * completion barrier at 2: above, which is needed due to the \ |
| 251 | * bltz that can branch to code outside of the LL/SC loop. As \ |
| 252 | * such, we don't need to emit another barrier here. \ |
| 253 | */ \ |
Nathan Chancellor | 8790ccf | 2021-01-14 10:34:16 -0700 | [diff] [blame] | 254 | if (__SYNC_loongson3_war == 0) \ |
Paul Burton | 40e784b | 2019-10-01 21:53:23 +0000 | [diff] [blame] | 255 | smp_mb__after_atomic(); \ |
| 256 | \ |
| 257 | return result; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | } |
| 259 | |
Paul Burton | 40e784b | 2019-10-01 21:53:23 +0000 | [diff] [blame] | 260 | ATOMIC_SIP_OP(atomic, int, subu, ll, sc) |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 261 | #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) |
Paul Burton | 40e784b | 2019-10-01 21:53:23 +0000 | [diff] [blame] | 262 | |
| 263 | #ifdef CONFIG_64BIT |
| 264 | ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd) |
Mark Rutland | c7b5fd6 | 2021-05-25 15:02:21 +0100 | [diff] [blame] | 265 | #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) |
Paul Burton | 40e784b | 2019-10-01 21:53:23 +0000 | [diff] [blame] | 266 | #endif |
| 267 | |
| 268 | #undef ATOMIC_SIP_OP |
| 269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | #endif /* _ASM_ATOMIC_H */ |