summaryrefslogtreecommitdiffstats
path: root/arch/mips/lib
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-01-22 12:59:30 +0100
committerRalf Baechle <ralf@linux-mips.org>2013-02-01 10:00:22 +0100
commit7034228792cc561e79ff8600f02884bd4c80e287 (patch)
tree89b77af37d087d9de236fc5d21f60bf552d0a2c6 /arch/mips/lib
parent405ab01c70e18058d9c01a1256769a61fc65413e (diff)
downloadop-kernel-dev-7034228792cc561e79ff8600f02884bd4c80e287.zip
op-kernel-dev-7034228792cc561e79ff8600f02884bd4c80e287.tar.gz
MIPS: Whitespace cleanup.
Having received another series of whitespace patches I decided to do this once and for all rather than dealing with this kind of patches trickling in forever. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/bitops.c4
-rw-r--r--arch/mips/lib/csum_partial.S42
-rw-r--r--arch/mips/lib/delay.c2
-rw-r--r--arch/mips/lib/dump_tlb.c2
-rw-r--r--arch/mips/lib/memcpy.S34
-rw-r--r--arch/mips/lib/memset.S4
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c2
-rw-r--r--arch/mips/lib/strncpy_user.S2
-rw-r--r--arch/mips/lib/strnlen_user.S6
-rw-r--r--arch/mips/lib/uncached.c2
10 files changed, 50 insertions, 50 deletions
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 239a9c9..81f1dcf 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
/**
- * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
- * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 6b876ca..507147a 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -67,8 +67,8 @@
#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
LOAD _t0, (offset + UNIT(0))(src); \
LOAD _t1, (offset + UNIT(1))(src); \
- LOAD _t2, (offset + UNIT(2))(src); \
- LOAD _t3, (offset + UNIT(3))(src); \
+ LOAD _t2, (offset + UNIT(2))(src); \
+ LOAD _t3, (offset + UNIT(3))(src); \
ADDC(sum, _t0); \
ADDC(sum, _t1); \
ADDC(sum, _t2); \
@@ -285,7 +285,7 @@ LEAF(csum_partial)
1:
#endif
.set reorder
- /* Add the passed partial csum. */
+ /* Add the passed partial csum. */
ADDC32(sum, a2)
jr ra
.set noreorder
@@ -298,7 +298,7 @@ LEAF(csum_partial)
* csum_partial_copy_nocheck(src, dst, len, sum)
* __csum_partial_copy_user(src, dst, len, sum, errp)
*
- * See "Spec" in memcpy.S for details. Unlike __copy_user, all
+ * See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
*/
@@ -371,16 +371,16 @@ LEAF(csum_partial)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#define SHIFT_DISCARD_REVERT SRLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#define SHIFT_DISCARD_REVERT SLLV
#endif
@@ -430,7 +430,7 @@ FEXPORT(csum_partial_copy_nocheck)
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
nop
SUB len, 8*NBYTES # subtract here for bgez loop
@@ -518,7 +518,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -532,7 +532,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
SHIFT_DISCARD_REVERT t0, t0, bits
@@ -551,7 +551,7 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -568,9 +568,9 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -578,13 +578,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
ADD src, src, 4*NBYTES
@@ -634,7 +634,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
#define SHIFT_INC -8
#endif
move t2, zero # partial word
- li t3, SHIFT_START # shift
+ li t3, SHIFT_START # shift
/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
EXC( lbu t0, N(src), .Ll_exc_copy); \
@@ -642,7 +642,7 @@ EXC( lbu t0, N(src), .Ll_exc_copy); \
EXC( sb t0, N(dst), .Ls_exc); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
- beqz len, .Lcopy_bytes_done; \
+ beqz len, .Lcopy_bytes_done; \
or t2, t0
COPY_BYTE(0)
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 288f795..44713af 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(__delay);
* Division by multiplication: you don't have to worry about
* loss of precision.
*
- * Use only for very small delays ( < 1 msec). Should probably use a
+ * Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index a99c1d3..32b9f21 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -63,7 +63,7 @@ static void dump_tlb(int first, int last)
tlb_read();
BARRIER();
pagemask = read_c0_pagemask();
- entryhi = read_c0_entryhi();
+ entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 65192c0..c5c40da 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -156,15 +156,15 @@
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
@@ -235,7 +235,7 @@ __copy_user_common:
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
PREF( 0, 3*32(src) )
@@ -313,7 +313,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -327,7 +327,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
jr ra
@@ -343,7 +343,7 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -357,10 +357,10 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
PREF( 0, 3*32(src) )
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
PREF( 1, 3*32(dst) )
1:
/*
@@ -370,13 +370,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* are to the same unit (unless src is aligned, but it's not).
*/
R10KCBARRIER(0(ra))
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
@@ -388,7 +388,7 @@ EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
- PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
@@ -502,7 +502,7 @@ EXC( lb t1, 0(src), .Ll_exc)
#define SEXC(n) \
- .set reorder; /* DADDI_WAR */ \
+ .set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u: \
ADD len, len, n*NBYTES; \
jr ra; \
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 606c8a9..053d3b0 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -21,8 +21,8 @@
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
.previous
.macro f_fill64 dst, offset, val, fixup
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 9cee907..91615c2 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -30,7 +30,7 @@ static void dump_tlb(int first, int last)
"tlbr\n\t"
"nop\n\t"
".set\treorder");
- entryhi = read_c0_entryhi();
+ entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 7201b2f..bad5394 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -23,7 +23,7 @@
/*
* Ugly special case have to check: we might get passed a user space
- * pointer which wraps into the kernel space. We don't deal with that. If
+ * pointer which wraps into the kernel space. We don't deal with that. If
* it happens at most some bytes of the exceptions handlers will be copied.
*/
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 6445716..beea03c 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -21,9 +21,9 @@
* maximum of a1 or 0 in case of error.
*
* Note: for performance reasons we deliberately accept that a user may
- * make strlen_user and strnlen_user access the first few KSEG0
- * bytes. There's nothing secret there. On 64-bit accessing beyond
- * the maximum is a tad hairier ...
+ * make strlen_user and strnlen_user access the first few KSEG0
+ * bytes. There's nothing secret there. On 64-bit accessing beyond
+ * the maximum is a tad hairier ...
*/
LEAF(__strnlen_user_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index a6d1c77..65e3dfc 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2005 Thiemo Seufer
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
OpenPOWER on IntegriCloud