locking/refcounts: Use atomic_try_cmpxchg()

Generates better code (GCC-6.2.1):

  text        filename
  1576        defconfig-build/lib/refcount.o.pre
  1488        defconfig-build/lib/refcount.o.post

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-02-01 16:07:55 +01:00 коммит произвёл Ingo Molnar
Родитель a9ebf306f5
Коммит b78c0d4712
1 изменённых файлов: 15 добавлений и 32 удалений

Просмотреть файл

@ -57,9 +57,9 @@
*/
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
unsigned int old, new, val = atomic_read(&r->refs);
unsigned int new, val = atomic_read(&r->refs);
for (;;) {
do {
if (!val)
return false;
@ -69,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
new = val + i;
if (new < val)
new = UINT_MAX;
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
if (old == val)
break;
val = old;
}
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
@ -118,9 +114,9 @@ EXPORT_SYMBOL_GPL(refcount_add);
*/
bool refcount_inc_not_zero(refcount_t *r)
{
unsigned int old, new, val = atomic_read(&r->refs);
unsigned int new, val = atomic_read(&r->refs);
for (;;) {
do {
new = val + 1;
if (!val)
@ -129,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
if (unlikely(!new))
return true;
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
if (old == val)
break;
val = old;
}
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
@ -182,9 +173,9 @@ EXPORT_SYMBOL_GPL(refcount_inc);
*/
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
unsigned int old, new, val = atomic_read(&r->refs);
unsigned int new, val = atomic_read(&r->refs);
for (;;) {
do {
if (unlikely(val == UINT_MAX))
return false;
@ -194,12 +185,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return false;
}
old = atomic_cmpxchg_release(&r->refs, val, new);
if (old == val)
break;
val = old;
}
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return !new;
}
@ -258,7 +244,9 @@ EXPORT_SYMBOL_GPL(refcount_dec);
*/
bool refcount_dec_if_one(refcount_t *r)
{
return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
int val = 1;
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
@ -275,9 +263,9 @@ EXPORT_SYMBOL_GPL(refcount_dec_if_one);
*/
bool refcount_dec_not_one(refcount_t *r)
{
unsigned int old, new, val = atomic_read(&r->refs);
unsigned int new, val = atomic_read(&r->refs);
for (;;) {
do {
if (unlikely(val == UINT_MAX))
return true;
@ -290,12 +278,7 @@ bool refcount_dec_not_one(refcount_t *r)
return true;
}
old = atomic_cmpxchg_release(&r->refs, val, new);
if (old == val)
break;
val = old;
}
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}