summaryrefslogtreecommitdiffstats
path: root/libavcodec/arm
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/arm')
-rw-r--r--libavcodec/arm/mlpdsp_armv6.S2
-rw-r--r--libavcodec/arm/mpegvideo_armv5te.c2
-rw-r--r--libavcodec/arm/simple_idct_arm.S6
-rw-r--r--libavcodec/arm/vp8dsp_neon.S2
4 files changed, 6 insertions, 6 deletions
diff --git a/libavcodec/arm/mlpdsp_armv6.S b/libavcodec/arm/mlpdsp_armv6.S
index 3c88021..de9db46 100644
--- a/libavcodec/arm/mlpdsp_armv6.S
+++ b/libavcodec/arm/mlpdsp_armv6.S
@@ -240,7 +240,7 @@ DAT3 .req v4
DAT4 .req v5
DAT5 .req v6
DAT6 .req sl // use these rather than the otherwise unused
-DAT7 .req fp // ip and lr so that we can load them usinf LDRD
+DAT7 .req fp // ip and lr so that we can load them using LDRD
.macro output4words tail, head, r0, r1, r2, r3, r4, r5, r6, r7, pointer_dead=0
.if \head
diff --git a/libavcodec/arm/mpegvideo_armv5te.c b/libavcodec/arm/mpegvideo_armv5te.c
index 3c44cd8..4bb7b6e 100644
--- a/libavcodec/arm/mpegvideo_armv5te.c
+++ b/libavcodec/arm/mpegvideo_armv5te.c
@@ -28,7 +28,7 @@ void ff_dct_unquantize_h263_armv5te(int16_t *block, int qmul, int qadd, int coun
#ifdef ENABLE_ARM_TESTS
/**
- * h263 dequantizer supplementary function, it is performance critical and needs to
+ * H.263 dequantizer supplementary function, it is performance critical and needs to
* have optimized implementations for each architecture. Is also used as a reference
* implementation in regression tests
*/
diff --git a/libavcodec/arm/simple_idct_arm.S b/libavcodec/arm/simple_idct_arm.S
index bf9ee3d..a651927 100644
--- a/libavcodec/arm/simple_idct_arm.S
+++ b/libavcodec/arm/simple_idct_arm.S
@@ -64,7 +64,7 @@ function ff_simple_idct_arm, export=1
__row_loop:
- @@ read the row and check if it is null, almost null, or not, according to strongarm specs, it is not necessary to optimize ldr accesses (i.e. split 32bits in 2 16bits words), at least it gives more usable registers :)
+ @@ read the row and check if it is null, almost null, or not, according to strongarm specs, it is not necessary to optimize ldr accesses (i.e. split 32 bits in two 16-bit words), at least it gives more usable registers :)
ldr r1, [r14, #0] @ R1=(int32)(R12)[0]=ROWr32[0] (relative row cast to a 32b pointer)
ldr r2, [r14, #4] @ R2=(int32)(R12)[1]=ROWr32[1]
ldr r3, [r14, #8] @ R3=ROWr32[2]
@@ -234,8 +234,8 @@ __end_a_evaluation:
@@ row[7] = (a0 - b0) >> ROW_SHIFT;
add r8, r6, r0 @ R8=a0+b0
add r9, r2, r1 @ R9=a1+b1
- @@ put 2 16 bits half-words in a 32bits word
- @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only Little Endian compliant then!!!)
+ @@ put two 16-bit half-words in a 32-bit word
+ @@ ROWr32[0]=ROWr16[0] | (ROWr16[1]<<16) (only little-endian compliant then!!!)
ldr r10, =MASK_MSHW @ R10=0xFFFF0000
and r9, r10, r9, lsl #ROW_SHIFT2MSHW @ R9=0xFFFF0000 & ((a1+b1)<<5)
mvn r11, r10 @ R11= NOT R10= 0x0000FFFF
diff --git a/libavcodec/arm/vp8dsp_neon.S b/libavcodec/arm/vp8dsp_neon.S
index 544332c..02236a4 100644
--- a/libavcodec/arm/vp8dsp_neon.S
+++ b/libavcodec/arm/vp8dsp_neon.S
@@ -322,7 +322,7 @@ endfunc
vmov.i16 q12, #3
vsubl.s8 q10, d8, d6 @ QS0 - PS0
- vsubl.s8 q11, d9, d7 @ (widened to 16bit)
+ vsubl.s8 q11, d9, d7 @ (widened to 16 bits)
veor q2, q2, q13 @ PS1 = P1 ^ 0x80
veor q5, q5, q13 @ QS1 = Q1 ^ 0x80
vmul.i16 q10, q10, q12 @ w = 3 * (QS0 - PS0)
OpenPOWER on IntegriCloud