summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/lib/strncpy_user.S
blob: a834057bda6b45040a6182705e9610fb1448377e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
/*
 *  arch/xtensa/lib/strncpy_user.S
 *
 *  This file is subject to the terms and conditions of the GNU General
 *  Public License.  See the file "COPYING" in the main directory of
 *  this archive for more details.
 *
 *  Returns: -EFAULT if exception before terminator, N if the entire
 *  buffer filled, else strlen.
 *
 *  Copyright (C) 2002 Tensilica Inc.
 */

#include <asm/variant/core.h>
#include <linux/errno.h>

/* Load or store instructions that may cause exceptions use the EX macro. */

#define EX(insn,reg1,reg2,offset,handler)	\
9:	insn	reg1, reg2, offset;		\
	.section __ex_table, "a";		\
	.word	9b, handler;			\
	.previous

/*
 * char *__strncpy_user(char *dst, const char *src, size_t len)
 */
.text
.begin literal
.align	4
.Lmask0:
	.byte	0xff, 0x00, 0x00, 0x00
.Lmask1:
	.byte	0x00, 0xff, 0x00, 0x00
.Lmask2:
	.byte	0x00, 0x00, 0xff, 0x00
.Lmask3:
	.byte	0x00, 0x00, 0x00, 0xff
.end literal

# Register use
#   a0/ return address
#   a1/ stack pointer
#   a2/ return value
#   a3/ src
#   a4/ len
#   a5/ mask0
#   a6/ mask1
#   a7/ mask2
#   a8/ mask3
#   a9/ tmp
#   a10/ tmp
#   a11/ dst
#   a12/ tmp

.align	4
.global	__strncpy_user
.type	__strncpy_user,@function
__strncpy_user:
	entry	sp, 16		# minimal stack frame
	# a2/ dst, a3/ src, a4/ len
	mov	a11, a2		# leave dst in return value register
	beqz	a4, .Lret	# if len is zero
	l32r	a5, .Lmask0	# mask for byte 0
	l32r	a6, .Lmask1	# mask for byte 1
	l32r	a7, .Lmask2	# mask for byte 2
	l32r	a8, .Lmask3	# mask for byte 3
	bbsi.l	a3, 0, .Lsrc1mod2 # if only  8-bit aligned
	bbsi.l	a3, 1, .Lsrc2mod4 # if only 16-bit aligned
.Lsrcaligned:	# return here when src is word-aligned
	srli	a12, a4, 2	# number of loop iterations with 4B per loop
	movi	a9, 3
	bnone	a11, a9, .Laligned
	j	.Ldstunaligned

.Lsrc1mod2:	# src address is odd
	EX(l8ui, a9, a3, 0, fixup_l)	# get byte 0
	addi	a3, a3, 1		# advance src pointer
	EX(s8i, a9, a11, 0, fixup_s)	# store byte 0
	beqz	a9, .Lret		# if byte 0 is zero
	addi	a11, a11, 1		# advance dst pointer
	addi	a4, a4, -1		# decrement len
	beqz	a4, .Lret		# if len is zero
	bbci.l	a3, 1, .Lsrcaligned	# if src is now word-aligned

.Lsrc2mod4:	# src address is 2 mod 4
	EX(l8ui, a9, a3, 0, fixup_l)	# get byte 0
	/* 1-cycle interlock */
	EX(s8i, a9, a11, 0, fixup_s)	# store byte 0
	beqz	a9, .Lret		# if byte 0 is zero
	addi	a11, a11, 1		# advance dst pointer
	addi	a4, a4, -1		# decrement len
	beqz	a4, .Lret		# if len is zero
	EX(l8ui, a9, a3, 1, fixup_l)	# get byte 0
	addi	a3, a3, 2		# advance src pointer
	EX(s8i, a9, a11, 0, fixup_s)	# store byte 0
	beqz	a9, .Lret		# if byte 0 is zero
	addi	a11, a11, 1		# advance dst pointer
	addi	a4, a4, -1		# decrement len
	bnez	a4, .Lsrcaligned	# if len is nonzero
.Lret:
	sub	a2, a11, a2		# compute strlen
	retw

/*
 * dst is word-aligned, src is word-aligned
 */
	.align	4		# 1 mod 4 alignment for LOOPNEZ
	.byte	0		# (0 mod 4 alignment for LBEG)
.Laligned:
#if XCHAL_HAVE_LOOPS
	loopnez	a12, .Loop1done
#else
	beqz	a12, .Loop1done
	slli	a12, a12, 2
	add	a12, a12, a11	# a12 = end of last 4B chunck
#endif
.Loop1:
	EX(l32i, a9, a3, 0, fixup_l)	# get word from src
	addi	a3, a3, 4		# advance src pointer
	bnone	a9, a5, .Lz0		# if byte 0 is zero
	bnone	a9, a6, .Lz1		# if byte 1 is zero
	bnone	a9, a7, .Lz2		# if byte 2 is zero
	EX(s32i, a9, a11, 0, fixup_s)	# store word to dst
	bnone	a9, a8, .Lz3		# if byte 3 is zero
	addi	a11, a11, 4		# advance dst pointer
#if !XCHAL_HAVE_LOOPS
	blt	a11, a12, .Loop1
#endif

.Loop1done:
	bbci.l	a4, 1, .L100
	# copy 2 bytes
	EX(l16ui, a9, a3, 0, fixup_l)
	addi	a3, a3, 2		# advance src pointer
#ifdef __XTENSA_EB__
	bnone	a9, a7, .Lz0		# if byte 2 is zero
	bnone	a9, a8, .Lz1		# if byte 3 is zero
#else
	bnone	a9, a5, .Lz0		# if byte 0 is zero
	bnone	a9, a6, .Lz1		# if byte 1 is zero
#endif
	EX(s16i, a9, a11, 0, fixup_s)
	addi	a11, a11, 2		# advance dst pointer
.L100:
	bbci.l	a4, 0, .Lret
	EX(l8ui, a9, a3, 0, fixup_l)
	/* slot */
	EX(s8i, a9, a11, 0, fixup_s)
	beqz	a9, .Lret		# if byte is zero
	addi	a11, a11, 1-3		# advance dst ptr 1, but also cancel
					# the effect of adding 3 in .Lz3 code
	/* fall thru to .Lz3 and "retw" */

.Lz3:	# byte 3 is zero
	addi	a11, a11, 3		# advance dst pointer
	sub	a2, a11, a2		# compute strlen
	retw
.Lz0:	# byte 0 is zero
#ifdef __XTENSA_EB__
	movi	a9, 0
#endif /* __XTENSA_EB__ */
	EX(s8i, a9, a11, 0, fixup_s)
	sub	a2, a11, a2		# compute strlen
	retw
.Lz1:	# byte 1 is zero
#ifdef __XTENSA_EB__
        extui   a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
	EX(s16i, a9, a11, 0, fixup_s)
	addi	a11, a11, 1		# advance dst pointer
	sub	a2, a11, a2		# compute strlen
	retw
.Lz2:	# byte 2 is zero
#ifdef __XTENSA_EB__
        extui   a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
	EX(s16i, a9, a11, 0, fixup_s)
	movi	a9, 0
	EX(s8i, a9, a11, 2, fixup_s)
	addi	a11, a11, 2		# advance dst pointer
	sub	a2, a11, a2		# compute strlen
	retw

	.align	4		# 1 mod 4 alignment for LOOPNEZ
	.byte	0		# (0 mod 4 alignment for LBEG)
.Ldstunaligned:
/*
 * for now just use byte copy loop
 */
#if XCHAL_HAVE_LOOPS
	loopnez	a4, .Lunalignedend
#else
	beqz	a4, .Lunalignedend
	add	a12, a11, a4		# a12 = ending address
#endif /* XCHAL_HAVE_LOOPS */
.Lnextbyte:
	EX(l8ui, a9, a3, 0, fixup_l)
	addi	a3, a3, 1
	EX(s8i, a9, a11, 0, fixup_s)
	beqz	a9, .Lunalignedend
	addi	a11, a11, 1
#if !XCHAL_HAVE_LOOPS
	blt	a11, a12, .Lnextbyte
#endif

.Lunalignedend:
	sub	a2, a11, a2		# compute strlen
	retw


	.section .fixup, "ax"
	.align	4

	/* For now, just return -EFAULT.  Future implementations might
	 * like to clear remaining kernel space, like the fixup
	 * implementation in memset().  Thus, we differentiate between
	 * load/store fixups. */

fixup_s:
fixup_l:
	movi	a2, -EFAULT
	retw

OpenPOWER on IntegriCloud