summaryrefslogtreecommitdiffstats
path: root/arch/ia64/lib/dec_and_lock.c
blob: c7ce92f968f13e1c1bcb0a6049bb1532c1ab184a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
/*
 * Copyright (C) 2003 Jerome Marchand, Bull S.A.
 *	Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * This file is released under the GPLv2, or at your option any later version.
 *
 * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction.  This
 * code is an adaptation of the x86 version of "atomic_dec_and_lock()".
 */

#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>

/*
 * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock.  Both of these
 * operations have to be done atomically, so that the count doesn't drop to zero without
 * acquiring the spinlock first.
 */
int
_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
{
	int old, new;

	do {
		old = atomic_read(refcount);
		new = old - 1;

		if (unlikely (old == 1)) {
			/* oops, we may be decrementing to zero, do it the slow way... */
			spin_lock(lock);
			if (atomic_dec_and_test(refcount))
				return 1;
			spin_unlock(lock);
			return 0;
		}
	} while (cmpxchg(&refcount->counter, old, new) != old);
	return 0;
}

EXPORT_SYMBOL(_atomic_dec_and_lock);
OpenPOWER on IntegriCloud