summaryrefslogtreecommitdiffstats
path: root/arch/frv/lib/atomic-ops.S
blob: ee0ac905fb08ac271139ecd12de58e9fc9b61366 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
/* atomic-ops.S: kernel atomic operations
 *
 * For an explanation of how atomic ops work in this arch, see:
 *   Documentation/frv/atomic-ops.txt
 *
 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <asm/spr-regs.h>

	.text
	.balign 4

###############################################################################
#
# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
	.globl		atomic_test_and_ANDNOT_mask
        .type		atomic_test_and_ANDNOT_mask,@function
atomic_test_and_ANDNOT_mask:
	not.p		gr8,gr10
0:
	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
	ckeq		icc3,cc7
	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
	orcr		cc7,cc7,cc3			/* set CC3 to true */
	and		gr8,gr10,gr11
	cst.p		gr11,@(gr9,gr0)		,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
	beq		icc3,#0,0b
	bralr

	.size		atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask

###############################################################################
#
# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
	.globl		atomic_test_and_OR_mask
        .type		atomic_test_and_OR_mask,@function
atomic_test_and_OR_mask:
	or.p		gr8,gr8,gr10
0:
	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
	ckeq		icc3,cc7
	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
	orcr		cc7,cc7,cc3			/* set CC3 to true */
	or		gr8,gr10,gr11
	cst.p		gr11,@(gr9,gr0)		,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
	beq		icc3,#0,0b
	bralr

	.size		atomic_test_and_OR_mask, .-atomic_test_and_OR_mask

###############################################################################
#
# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
	.globl		atomic_test_and_XOR_mask
        .type		atomic_test_and_XOR_mask,@function
atomic_test_and_XOR_mask:
	or.p		gr8,gr8,gr10
0:
	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
	ckeq		icc3,cc7
	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
	orcr		cc7,cc7,cc3			/* set CC3 to true */
	xor		gr8,gr10,gr11
	cst.p		gr11,@(gr9,gr0)		,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
	beq		icc3,#0,0b
	bralr

	.size		atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask

###############################################################################
#
# int atomic_add_return(int i, atomic_t *v)
#
###############################################################################
	.globl		atomic_add_return
        .type		atomic_add_return,@function
atomic_add_return:
	or.p		gr8,gr8,gr10
0:
	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
	ckeq		icc3,cc7
	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
	orcr		cc7,cc7,cc3			/* set CC3 to true */
	add		gr8,gr10,gr8
	cst.p		gr8,@(gr9,gr0)		,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
	beq		icc3,#0,0b
	bralr

	.size		atomic_add_return, .-atomic_add_return

###############################################################################
#
# int atomic_sub_return(int i, atomic_t *v)
#
###############################################################################
	.globl		atomic_sub_return
        .type		atomic_sub_return,@function
atomic_sub_return:
	or.p		gr8,gr8,gr10
0:
	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
	ckeq		icc3,cc7
	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
	orcr		cc7,cc7,cc3			/* set CC3 to true */
	sub		gr8,gr10,gr8
	cst.p		gr8,@(gr9,gr0)		,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
	beq		icc3,#0,0b
	bralr

	.size		atomic_sub_return, .-atomic_sub_return

###############################################################################
#
# uint32_t __xchg_32(uint32_t i, uint32_t *v)
#
###############################################################################
	.globl		__xchg_32
        .type		__xchg_32,@function
__xchg_32:
	or.p		gr8,gr8,gr10
0:
	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
	ckeq		icc3,cc7
	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
	orcr		cc7,cc7,cc3			/* set CC3 to true */
	cst.p		gr10,@(gr9,gr0)		,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
	beq		icc3,#0,0b
	bralr

	.size		__xchg_32, .-__xchg_32

###############################################################################
#
# uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new)
#
###############################################################################
	.globl		__cmpxchg_32
        .type		__cmpxchg_32,@function
__cmpxchg_32:
	or.p		gr8,gr8,gr11
0:
	orcc		gr0,gr0,gr0,icc3
	ckeq		icc3,cc7
	ld.p		@(gr11,gr0),gr8
	orcr		cc7,cc7,cc3
	subcc		gr8,gr9,gr7,icc0
	bne		icc0,#0,1f
	cst.p		gr10,@(gr11,gr0)	,cc3,#1
	corcc		gr29,gr29,gr0		,cc3,#1
	beq		icc3,#0,0b
1:
	bralr

	.size		__cmpxchg_32, .-__cmpxchg_32
OpenPOWER on IntegriCloud