1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
|
/* kernel atomic64 operations
*
* For an explanation of how atomic ops work in this arch, see:
* Documentation/frv/atomic-ops.txt
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/spr-regs.h>
.text
.balign 4
###############################################################################
#
# long long atomic64_inc_return(atomic64_t *v)
#
###############################################################################
.globl atomic64_inc_return
.type atomic64_inc_return,@function
atomic64_inc_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
addicc gr9,#1,gr9,icc0
addxi gr8,#0,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_inc_return, .-atomic64_inc_return
###############################################################################
#
# long long atomic64_dec_return(atomic64_t *v)
#
###############################################################################
.globl atomic64_dec_return
.type atomic64_dec_return,@function
atomic64_dec_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
subicc gr9,#1,gr9,icc0
subxi gr8,#0,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_dec_return, .-atomic64_dec_return
###############################################################################
#
# long long atomic64_add_return(long long i, atomic64_t *v)
#
###############################################################################
.globl atomic64_add_return
.type atomic64_add_return,@function
atomic64_add_return:
or.p gr8,gr8,gr4
or gr9,gr9,gr5
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
addcc gr9,gr5,gr9,icc0
addx gr8,gr4,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_add_return, .-atomic64_add_return
###############################################################################
#
# long long atomic64_sub_return(long long i, atomic64_t *v)
#
###############################################################################
.globl atomic64_sub_return
.type atomic64_sub_return,@function
atomic64_sub_return:
or.p gr8,gr8,gr4
or gr9,gr9,gr5
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
subcc gr9,gr5,gr9,icc0
subx gr8,gr4,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_sub_return, .-atomic64_sub_return
###############################################################################
#
# uint64_t __xchg_64(uint64_t i, uint64_t *v)
#
###############################################################################
.globl __xchg_64
.type __xchg_64,@function
__xchg_64:
or.p gr8,gr8,gr4
or gr9,gr9,gr5
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
cstd.p gr4,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size __xchg_64, .-__xchg_64
###############################################################################
#
# uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v)
#
###############################################################################
.globl __cmpxchg_64
.type __cmpxchg_64,@function
__cmpxchg_64:
or.p gr8,gr8,gr4
or gr9,gr9,gr5
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr12,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3
subcc gr8,gr4,gr0,icc0
subcc.p gr9,gr5,gr0,icc1
bnelr icc0,#0
bnelr icc1,#0
cstd.p gr10,@(gr12,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size __cmpxchg_64, .-__cmpxchg_64
|