summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/mutex.h
blob: 5399f7e18102f7144bcb1464ff5f13c2fac4468c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
/*
 * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
 */
#ifndef _ASM_POWERPC_MUTEX_H
#define _ASM_POWERPC_MUTEX_H

static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
{
	int t;

	__asm__ __volatile__ (
"1:	lwarx	%0,0,%1		# mutex trylock\n\
	cmpw	0,%0,%2\n\
	bne-	2f\n"
	PPC405_ERR77(0,%1)
"	stwcx.	%3,0,%1\n\
	bne-	1b"
	PPC_ACQUIRE_BARRIER
	"\n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (old), "r" (new)
	: "cc", "memory");

	return t;
}

static inline int __mutex_dec_return_lock(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%1		# mutex lock\n\
	addic	%0,%0,-1\n"
	PPC405_ERR77(0,%1)
"	stwcx.	%0,0,%1\n\
	bne-	1b"
	PPC_ACQUIRE_BARRIER
	: "=&r" (t)
	: "r" (&v->counter)
	: "cc", "memory");

	return t;
}

static inline int __mutex_inc_return_unlock(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
	PPC_RELEASE_BARRIER
"1:	lwarx	%0,0,%1		# mutex unlock\n\
	addic	%0,%0,1\n"
	PPC405_ERR77(0,%1)
"	stwcx.	%0,0,%1 \n\
	bne-	1b"
	: "=&r" (t)
	: "r" (&v->counter)
	: "cc", "memory");

	return t;
}

/**
 *  __mutex_fastpath_lock - try to take the lock by moving the count
 *                          from 1 to a 0 value
 *  @count: pointer of type atomic_t
 *  @fail_fn: function to call if the original value was not 1
 *
 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
 * it wasn't 1 originally. This function MUST leave the value lower than
 * 1 even when the "1" assertion wasn't true.
 */
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
	if (unlikely(__mutex_dec_return_lock(count) < 0))
		fail_fn(count);
}

/**
 *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
 *                                 from 1 to a 0 value
 *  @count: pointer of type atomic_t
 *  @fail_fn: function to call if the original value was not 1
 *
 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
 * or anything the slow path function returns.
 */
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
	if (unlikely(__mutex_dec_return_lock(count) < 0))
		return fail_fn(count);
	return 0;
}

/**
 *  __mutex_fastpath_unlock - try to promote the count from 0 to 1
 *  @count: pointer of type atomic_t
 *  @fail_fn: function to call if the original value was not 0
 *
 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
 * In the failure case, this function is allowed to either set the value to
 * 1, or to set it to a value lower than 1.
 */
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
	if (unlikely(__mutex_inc_return_unlock(count) <= 0))
		fail_fn(count);
}

#define __mutex_slowpath_needs_to_unlock()		1

/**
 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
 *
 *  @count: pointer of type atomic_t
 *  @fail_fn: fallback function
 *
 * Change the count from 1 to 0, and return 1 (success), or if the count
 * was not 1, then return 0 (failure).
 */
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
	if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
		return 1;
	return 0;
}

#endif
OpenPOWER on IntegriCloud