locking: Convert raw_rwlock to arch_rwlock
Not strictly necessary for -rt as -rt does not have non sleeping
rwlocks, but it's odd to not have a consistent naming convention.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 9b0f2f5..06d37e5 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -65,7 +65,7 @@
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -96,9 +96,9 @@
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -116,9 +116,9 @@
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(