blob: a48957c7b445a2ad08a22d6bc7911b5d2d6049f0 [file] [log] [blame]
David Howellsc140d872012-03-28 18:30:02 +01001/*
2 * Memory barrier definitions. This is based on information published
3 * in the Processor Abstraction Layer and the System Abstraction Layer
4 * manual.
5 *
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10 */
11#ifndef _ASM_IA64_BARRIER_H
12#define _ASM_IA64_BARRIER_H
13
14#include <linux/compiler.h>
15
16/*
17 * Macros to force memory ordering. In these descriptions, "previous"
18 * and "subsequent" refer to program order; "visible" means that all
19 * architecturally visible effects of a memory access have occurred
20 * (at a minimum, this means the memory has been read or written).
21 *
22 * wmb(): Guarantees that all preceding stores to memory-
23 * like regions are visible before any subsequent
24 * stores and that all following stores will be
25 * visible only after all previous stores.
26 * rmb(): Like wmb(), but for reads.
27 * mb(): wmb()/rmb() combo, i.e., all previous memory
28 * accesses are visible before all subsequent
29 * accesses and vice versa. This is also known as
30 * a "fence."
31 *
32 * Note: "mb()" and its variants cannot be used as a fence to order
33 * accesses to memory mapped I/O registers. For that, mf.a needs to
34 * be used. However, we don't want to always use mf.a because (a)
35 * it's (presumably) much slower than mf and (b) mf.a is supported for
36 * sequential memory pages only.
37 */
38#define mb() ia64_mf()
39#define rmb() mb()
40#define wmb() mb()
41#define read_barrier_depends() do { } while(0)
42
43#ifdef CONFIG_SMP
44# define smp_mb() mb()
45# define smp_rmb() rmb()
46# define smp_wmb() wmb()
47# define smp_read_barrier_depends() read_barrier_depends()
Peter Zijlstra47933ad2013-11-06 14:57:36 +010048
David Howellsc140d872012-03-28 18:30:02 +010049#else
Peter Zijlstra47933ad2013-11-06 14:57:36 +010050
David Howellsc140d872012-03-28 18:30:02 +010051# define smp_mb() barrier()
52# define smp_rmb() barrier()
53# define smp_wmb() barrier()
54# define smp_read_barrier_depends() do { } while(0)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010055
David Howellsc140d872012-03-28 18:30:02 +010056#endif
57
Peter Zijlstra0cd64ef2014-03-13 19:00:36 +010058#define smp_mb__before_atomic() barrier()
59#define smp_mb__after_atomic() barrier()
60
David Howellsc140d872012-03-28 18:30:02 +010061/*
Peter Zijlstra47933ad2013-11-06 14:57:36 +010062 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
63 * need for asm trickery!
64 */
65
66#define smp_store_release(p, v) \
67do { \
68 compiletime_assert_atomic_type(*p); \
69 barrier(); \
70 ACCESS_ONCE(*p) = (v); \
71} while (0)
72
73#define smp_load_acquire(p) \
74({ \
75 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
76 compiletime_assert_atomic_type(*p); \
77 barrier(); \
78 ___p1; \
79})
80
81/*
David Howellsc140d872012-03-28 18:30:02 +010082 * XXX check on this ---I suspect what Linus really wants here is
83 * acquire vs release semantics but we can't discuss this stuff with
84 * Linus just yet. Grrr...
85 */
86#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
87
88/*
89 * The group barrier in front of the rsm & ssm are necessary to ensure
90 * that none of the previous instructions in the same group are
91 * affected by the rsm/ssm.
92 */
93
94#endif /* _ASM_IA64_BARRIER_H */