mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
locking/atomic: Introduce atomic_try_cmpxchg()
Add a new cmpxchg interface:
bool try_cmpxchg(u{8,16,32,64} *ptr, u{8,16,32,64} *val, u{8,16,32,64} new);
Where the boolean returns the result of the compare; and thus if the
exchange happened; and in case of failure, the new value of *ptr is
returned in *val.
This allows simplification/improvement of loops like:
for (;;) {
new = val $op $imm;
old = cmpxchg(ptr, val, new);
if (old == val)
break;
val = old;
}
into:
do {
} while (!try_cmpxchg(ptr, &val, val $op $imm));
while also generating better code (GCC6 and onwards).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
bf7b3ac2e3
commit
a9ebf306f5
@@ -423,6 +423,27 @@
|
||||
#endif
|
||||
#endif /* atomic_cmpxchg_relaxed */
|
||||
|
||||
#ifndef atomic_try_cmpxchg
|
||||
|
||||
#define __atomic_try_cmpxchg(type, _p, _po, _n) \
|
||||
({ \
|
||||
typeof(_po) __po = (_po); \
|
||||
typeof(*(_po)) __o = *__po; \
|
||||
*__po = atomic_cmpxchg##type((_p), __o, (_n)); \
|
||||
(*__po == __o); \
|
||||
})
|
||||
|
||||
#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
|
||||
#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
|
||||
#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
|
||||
#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
|
||||
|
||||
#else /* atomic_try_cmpxchg */
|
||||
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
|
||||
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
|
||||
#define atomic_try_cmpxchg_release atomic_try_cmpxchg
|
||||
#endif /* atomic_try_cmpxchg */
|
||||
|
||||
/* cmpxchg_relaxed */
|
||||
#ifndef cmpxchg_relaxed
|
||||
#define cmpxchg_relaxed cmpxchg
|
||||
@@ -996,6 +1017,27 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
#endif
|
||||
#endif /* atomic64_cmpxchg_relaxed */
|
||||
|
||||
#ifndef atomic64_try_cmpxchg
|
||||
|
||||
#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
|
||||
({ \
|
||||
typeof(_po) __po = (_po); \
|
||||
typeof(*(_po)) __o = *__po; \
|
||||
*__po = atomic64_cmpxchg##type((_p), __o, (_n)); \
|
||||
(*__po == __o); \
|
||||
})
|
||||
|
||||
#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
|
||||
#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
|
||||
#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
|
||||
#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
|
||||
|
||||
#else /* atomic64_try_cmpxchg */
|
||||
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
|
||||
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
|
||||
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
|
||||
#endif /* atomic64_try_cmpxchg */
|
||||
|
||||
#ifndef atomic64_andnot
|
||||
static inline void atomic64_andnot(long long i, atomic64_t *v)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user