Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_ATOMIC_H_ |
| 18 | #define ART_RUNTIME_ATOMIC_H_ |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 19 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 20 | #ifdef __clang__ |
| 21 | #define ART_HAVE_STDATOMIC 1 |
| 22 | #endif |
| 23 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 24 | #include <stdint.h> |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 25 | #if ART_HAVE_STDATOMIC |
| 26 | #include <atomic> |
| 27 | #endif |
| 28 | #include <limits> |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 29 | #include <vector> |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 30 | |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame] | 31 | #include "base/logging.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 32 | #include "base/macros.h" |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 33 | |
| 34 | namespace art { |
| 35 | |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 36 | class Mutex; |
| 37 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 38 | #if ART_HAVE_STDATOMIC |
| 39 | template<typename T> |
| 40 | class Atomic : public std::atomic<T> { |
| 41 | public: |
| 42 | COMPILE_ASSERT(sizeof(T) == sizeof(std::atomic<T>), |
| 43 | std_atomic_size_differs_from_that_of_underlying_type); |
| 44 | COMPILE_ASSERT(alignof(T) == alignof(std::atomic<T>), |
| 45 | std_atomic_alignment_differs_from_that_of_underlying_type); |
| 46 | |
| 47 | Atomic<T>() : std::atomic<T>() { } |
| 48 | |
| 49 | explicit Atomic<T>(T value) : std::atomic<T>(value) { } |
| 50 | |
| 51 | // Load from memory without ordering or synchronization constraints. |
| 52 | T LoadRelaxed() const { |
| 53 | return this->load(std::memory_order_relaxed); |
| 54 | } |
| 55 | |
| 56 | // Load from memory with a total ordering. |
| 57 | T LoadSequentiallyConsistent() const { |
| 58 | return this->load(std::memory_order_seq_cst); |
| 59 | } |
| 60 | |
| 61 | // Store to memory without ordering or synchronization constraints. |
| 62 | void StoreRelaxed(T desired) { |
| 63 | this->store(desired, std::memory_order_relaxed); |
| 64 | } |
| 65 | |
| 66 | // Store to memory with a total ordering. |
| 67 | void StoreSequentiallyConsistent(T desired) { |
| 68 | this->store(desired, std::memory_order_seq_cst); |
| 69 | } |
| 70 | |
| 71 | // Atomically replace the value with desired value if it matches the expected value. Doesn't |
| 72 | // imply ordering or synchronization constraints. |
| 73 | bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) { |
| 74 | return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed); |
| 75 | } |
| 76 | |
| 77 | // Atomically replace the value with desired value if it matches the expected value. Prior writes |
| 78 | // made to other memory locations by the thread that did the release become visible in this |
| 79 | // thread. |
| 80 | bool CompareExchangeWeakAcquire(T expected_value, T desired_value) { |
| 81 | return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire); |
| 82 | } |
| 83 | |
| 84 | // Atomically replace the value with desired value if it matches the expected value. prior writes |
| 85 | // to other memory locations become visible to the threads that do a consume or an acquire on the |
| 86 | // same location. |
| 87 | bool CompareExchangeWeakRelease(T expected_value, T desired_value) { |
| 88 | return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release); |
| 89 | } |
| 90 | |
| 91 | T FetchAndAddSequentiallyConsistent(const T value) { |
| 92 | return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value. |
| 93 | } |
| 94 | |
| 95 | T FetchAndSubSequentiallyConsistent(const T value) { |
| 96 | return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value. |
| 97 | } |
| 98 | |
| 99 | volatile T* Address() { |
| 100 | return reinterpret_cast<T*>(this); |
| 101 | } |
| 102 | |
| 103 | static T MaxValue() { |
| 104 | return std::numeric_limits<T>::max(); |
| 105 | } |
| 106 | }; |
| 107 | #else |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 108 | template<typename T> |
| 109 | class Atomic { |
| 110 | public: |
| 111 | Atomic<T>() : value_(0) { } |
| 112 | |
| 113 | explicit Atomic<T>(T value) : value_(value) { } |
| 114 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 115 | // Load from memory without ordering or synchronization constraints. |
| 116 | T LoadRelaxed() const { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 117 | return value_; |
| 118 | } |
| 119 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 120 | // Load from memory with a total ordering. |
| 121 | T LoadSequentiallyConsistent() const; |
| 122 | |
| 123 | // Store to memory without ordering or synchronization constraints. |
| 124 | void StoreRelaxed(T desired) { |
| 125 | value_ = desired; |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 126 | } |
| 127 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 128 | // Store to memory with a total ordering. |
| 129 | void StoreSequentiallyConsistent(T desired); |
| 130 | |
| 131 | // Atomically replace the value with desired value if it matches the expected value. Doesn't |
| 132 | // imply ordering or synchronization constraints. |
| 133 | bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) { |
| 134 | // TODO: make this relaxed. |
| 135 | return __sync_bool_compare_and_swap(&value_, expected_value, desired_value); |
| 136 | } |
| 137 | |
| 138 | // Atomically replace the value with desired value if it matches the expected value. Prior writes |
| 139 | // made to other memory locations by the thread that did the release become visible in this |
| 140 | // thread. |
| 141 | bool CompareExchangeWeakAcquire(T expected_value, T desired_value) { |
| 142 | // TODO: make this acquire. |
| 143 | return __sync_bool_compare_and_swap(&value_, expected_value, desired_value); |
| 144 | } |
| 145 | |
| 146 | // Atomically replace the value with desired value if it matches the expected value. prior writes |
| 147 | // to other memory locations become visible to the threads that do a consume or an acquire on the |
| 148 | // same location. |
| 149 | bool CompareExchangeWeakRelease(T expected_value, T desired_value) { |
| 150 | // TODO: make this release. |
| 151 | return __sync_bool_compare_and_swap(&value_, expected_value, desired_value); |
| 152 | } |
| 153 | |
| 154 | volatile T* Address() { |
| 155 | return &value_; |
| 156 | } |
| 157 | |
| 158 | T FetchAndAddSequentiallyConsistent(const T value) { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 159 | return __sync_fetch_and_add(&value_, value); // Return old_value. |
| 160 | } |
| 161 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 162 | T FetchAndSubSequentiallyConsistent(const T value) { |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 163 | return __sync_fetch_and_sub(&value_, value); // Return old value. |
| 164 | } |
| 165 | |
| 166 | T operator++() { // Prefix operator. |
| 167 | return __sync_add_and_fetch(&value_, 1); // Return new value. |
| 168 | } |
| 169 | |
| 170 | T operator++(int) { // Postfix operator. |
| 171 | return __sync_fetch_and_add(&value_, 1); // Return old value. |
| 172 | } |
| 173 | |
| 174 | T operator--() { // Prefix operator. |
| 175 | return __sync_sub_and_fetch(&value_, 1); // Return new value. |
| 176 | } |
| 177 | |
| 178 | T operator--(int) { // Postfix operator. |
| 179 | return __sync_fetch_and_sub(&value_, 1); // Return old value. |
| 180 | } |
| 181 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 182 | static T MaxValue() { |
| 183 | return std::numeric_limits<T>::max(); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | private: |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 187 | T value_; |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 188 | }; |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 189 | #endif |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 190 | |
| 191 | typedef Atomic<int32_t> AtomicInteger; |
| 192 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 193 | // NOTE: Two "quasiatomic" operations on the exact same memory address |
| 194 | // are guaranteed to operate atomically with respect to each other, |
| 195 | // but no guarantees are made about quasiatomic operations mixed with |
| 196 | // non-quasiatomic operations on the same address, nor about |
| 197 | // quasiatomic operations that are performed on partially-overlapping |
| 198 | // memory. |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 199 | class QuasiAtomic { |
Ian Rogers | 936b37f | 2014-02-14 00:52:24 -0800 | [diff] [blame] | 200 | #if defined(__mips__) && !defined(__LP64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 201 | static constexpr bool kNeedSwapMutexes = true; |
| 202 | #else |
| 203 | static constexpr bool kNeedSwapMutexes = false; |
| 204 | #endif |
| 205 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 206 | public: |
| 207 | static void Startup(); |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 208 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 209 | static void Shutdown(); |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 210 | |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 211 | // Reads the 64-bit value at "addr" without tearing. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 212 | static int64_t Read64(volatile const int64_t* addr) { |
| 213 | if (!kNeedSwapMutexes) { |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame] | 214 | int64_t value; |
| 215 | #if defined(__LP64__) |
| 216 | value = *addr; |
| 217 | #else |
| 218 | #if defined(__arm__) |
| 219 | #if defined(__ARM_FEATURE_LPAE) |
| 220 | // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear. |
| 221 | __asm__ __volatile__("@ QuasiAtomic::Read64\n" |
| 222 | "ldrd %0, %H0, %1" |
| 223 | : "=r" (value) |
| 224 | : "m" (*addr)); |
| 225 | #else |
| 226 | // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. |
| 227 | __asm__ __volatile__("@ QuasiAtomic::Read64\n" |
| 228 | "ldrexd %0, %H0, %1" |
| 229 | : "=r" (value) |
| 230 | : "Q" (*addr)); |
| 231 | #endif |
| 232 | #elif defined(__i386__) |
| 233 | __asm__ __volatile__( |
| 234 | "movq %1, %0\n" |
| 235 | : "=x" (value) |
| 236 | : "m" (*addr)); |
| 237 | #else |
| 238 | LOG(FATAL) << "Unsupported architecture"; |
| 239 | #endif |
| 240 | #endif // defined(__LP64__) |
| 241 | return value; |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 242 | } else { |
| 243 | return SwapMutexRead64(addr); |
| 244 | } |
| 245 | } |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 246 | |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 247 | // Writes to the 64-bit value at "addr" without tearing. |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame] | 248 | static void Write64(volatile int64_t* addr, int64_t value) { |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 249 | if (!kNeedSwapMutexes) { |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame] | 250 | #if defined(__LP64__) |
| 251 | *addr = value; |
| 252 | #else |
| 253 | #if defined(__arm__) |
| 254 | #if defined(__ARM_FEATURE_LPAE) |
| 255 | // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear. |
| 256 | __asm__ __volatile__("@ QuasiAtomic::Write64\n" |
| 257 | "strd %1, %H1, %0" |
| 258 | : "=m"(*addr) |
| 259 | : "r" (value)); |
| 260 | #else |
| 261 | // The write is done as a swap so that the cache-line is in the exclusive state for the store. |
| 262 | int64_t prev; |
| 263 | int status; |
| 264 | do { |
| 265 | __asm__ __volatile__("@ QuasiAtomic::Write64\n" |
| 266 | "ldrexd %0, %H0, %2\n" |
| 267 | "strexd %1, %3, %H3, %2" |
| 268 | : "=&r" (prev), "=&r" (status), "+Q"(*addr) |
| 269 | : "r" (value) |
| 270 | : "cc"); |
| 271 | } while (UNLIKELY(status != 0)); |
| 272 | #endif |
| 273 | #elif defined(__i386__) |
| 274 | __asm__ __volatile__( |
| 275 | "movq %1, %0" |
| 276 | : "=m" (*addr) |
| 277 | : "x" (value)); |
| 278 | #else |
| 279 | LOG(FATAL) << "Unsupported architecture"; |
| 280 | #endif |
| 281 | #endif // defined(__LP64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 282 | } else { |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame] | 283 | SwapMutexWrite64(addr, value); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 284 | } |
| 285 | } |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 286 | |
| 287 | // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value" |
| 288 | // and return true. Otherwise, don't swap, and return false. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 289 | static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { |
| 290 | if (!kNeedSwapMutexes) { |
| 291 | return __sync_bool_compare_and_swap(addr, old_value, new_value); |
| 292 | } else { |
| 293 | return SwapMutexCas64(old_value, new_value, addr); |
| 294 | } |
| 295 | } |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 296 | |
| 297 | // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes? |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 298 | static bool LongAtomicsUseMutexes() { |
Ian Rogers | 63c5dd0 | 2014-05-19 22:55:00 -0700 | [diff] [blame] | 299 | return kNeedSwapMutexes; |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | static void MembarLoadStore() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 303 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 304 | __asm__ __volatile__("dmb ish" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 305 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 306 | __asm__ __volatile__("" : : : "memory"); |
| 307 | #elif defined(__mips__) |
| 308 | __asm__ __volatile__("sync" : : : "memory"); |
| 309 | #else |
| 310 | #error Unexpected architecture |
| 311 | #endif |
| 312 | } |
| 313 | |
| 314 | static void MembarLoadLoad() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 315 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 316 | __asm__ __volatile__("dmb ish" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 317 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 318 | __asm__ __volatile__("" : : : "memory"); |
| 319 | #elif defined(__mips__) |
| 320 | __asm__ __volatile__("sync" : : : "memory"); |
| 321 | #else |
| 322 | #error Unexpected architecture |
| 323 | #endif |
| 324 | } |
| 325 | |
| 326 | static void MembarStoreStore() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 327 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 328 | __asm__ __volatile__("dmb ishst" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 329 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 330 | __asm__ __volatile__("" : : : "memory"); |
| 331 | #elif defined(__mips__) |
| 332 | __asm__ __volatile__("sync" : : : "memory"); |
| 333 | #else |
| 334 | #error Unexpected architecture |
| 335 | #endif |
| 336 | } |
| 337 | |
| 338 | static void MembarStoreLoad() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 339 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 340 | __asm__ __volatile__("dmb ish" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 341 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 342 | __asm__ __volatile__("mfence" : : : "memory"); |
| 343 | #elif defined(__mips__) |
| 344 | __asm__ __volatile__("sync" : : : "memory"); |
| 345 | #else |
| 346 | #error Unexpected architecture |
| 347 | #endif |
| 348 | } |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 349 | |
| 350 | private: |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 351 | static Mutex* GetSwapMutex(const volatile int64_t* addr); |
| 352 | static int64_t SwapMutexRead64(volatile const int64_t* addr); |
| 353 | static void SwapMutexWrite64(volatile int64_t* addr, int64_t val); |
| 354 | static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr); |
| 355 | |
| 356 | // We stripe across a bunch of different mutexes to reduce contention. |
| 357 | static constexpr size_t kSwapMutexCount = 32; |
| 358 | static std::vector<Mutex*>* gSwapMutexes; |
| 359 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 360 | DISALLOW_COPY_AND_ASSIGN(QuasiAtomic); |
| 361 | }; |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 362 | |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 363 | #if !ART_HAVE_STDATOMIC |
| 364 | template<typename T> |
| 365 | inline T Atomic<T>::LoadSequentiallyConsistent() const { |
| 366 | T result = value_; |
| 367 | QuasiAtomic::MembarLoadLoad(); |
| 368 | return result; |
| 369 | } |
| 370 | |
| 371 | template<typename T> |
| 372 | inline void Atomic<T>::StoreSequentiallyConsistent(T desired) { |
| 373 | QuasiAtomic::MembarStoreStore(); |
| 374 | value_ = desired; |
| 375 | QuasiAtomic::MembarStoreLoad(); |
| 376 | } |
| 377 | |
| 378 | #endif |
| 379 | |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 380 | } // namespace art |
| 381 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 382 | #endif // ART_RUNTIME_ATOMIC_H_ |