diff options
Diffstat (limited to 'libavcodec/alpha')
-rw-r--r-- | libavcodec/alpha/asm.h | 2 | ||||
-rw-r--r-- | libavcodec/alpha/dsputil_alpha.c | 14 | ||||
-rw-r--r-- | libavcodec/alpha/dsputil_alpha_asm.S | 26 | ||||
-rw-r--r-- | libavcodec/alpha/motion_est_alpha.c | 8 | ||||
-rw-r--r-- | libavcodec/alpha/motion_est_mvi_asm.S | 4 | ||||
-rw-r--r-- | libavcodec/alpha/mpegvideo_alpha.c | 8 | ||||
-rw-r--r-- | libavcodec/alpha/simple_idct_alpha.c | 8 |
7 files changed, 35 insertions, 35 deletions
diff --git a/libavcodec/alpha/asm.h b/libavcodec/alpha/asm.h index 6dc997b..ac4c04c 100644 --- a/libavcodec/alpha/asm.h +++ b/libavcodec/alpha/asm.h @@ -126,7 +126,7 @@ struct unaligned_long { uint64_t l; } __attribute__((packed)); #define minsw4 __builtin_alpha_minsw4 #define maxub8 __builtin_alpha_maxub8 #define maxsb8 __builtin_alpha_maxsb8 -#define maxuw4 __builtin_alpha_maxuw4 +#define maxuw4 __builtin_alpha_maxuw4 #define maxsw4 __builtin_alpha_maxsw4 #define perr __builtin_alpha_perr #define pklb __builtin_alpha_pklb diff --git a/libavcodec/alpha/dsputil_alpha.c b/libavcodec/alpha/dsputil_alpha.c index 496f461..2160184 100644 --- a/libavcodec/alpha/dsputil_alpha.c +++ b/libavcodec/alpha/dsputil_alpha.c @@ -28,11 +28,11 @@ void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels, int line_size, int h); void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, int line_size); -void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, +void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, int line_size); void (*put_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, int line_size); -void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, +void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, int line_size); void get_pixels_mvi(DCTELEM *restrict block, @@ -48,7 +48,7 @@ int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, i #if 0 /* These functions were the base for the optimized assembler routines, and remain here for documentation purposes. */ -static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, +static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, int line_size) { int i = 8; @@ -72,7 +72,7 @@ static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, } while (--i); } -void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, +void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, int line_size) { int h = 8; @@ -97,7 +97,7 @@ void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, shorts0 ^= signs0; /* Clamp. */ shorts0 = maxsw4(shorts0, 0); - shorts0 = minsw4(shorts0, clampmask); + shorts0 = minsw4(shorts0, clampmask); /* Next 4. */ pix1 = unpkbw(ldl(pixels + 4)); @@ -142,7 +142,7 @@ static inline uint64_t avg2_no_rnd(uint64_t a, uint64_t b) static inline uint64_t avg2(uint64_t a, uint64_t b) { - return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1); + return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1); } #if 0 @@ -353,7 +353,7 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) put_pixels_clamped_axp_p = c->put_pixels_clamped; add_pixels_clamped_axp_p = c->add_pixels_clamped; - + c->idct_put = simple_idct_put_axp; c->idct_add = simple_idct_add_axp; c->idct = simple_idct_axp; diff --git a/libavcodec/alpha/dsputil_alpha_asm.S b/libavcodec/alpha/dsputil_alpha_asm.S index 6519a95..d4b18f0 100644 --- a/libavcodec/alpha/dsputil_alpha_asm.S +++ b/libavcodec/alpha/dsputil_alpha_asm.S @@ -34,7 +34,7 @@ #define tf a4 #define tg a3 #define th v0 - + .set noat .set noreorder .arch pca56 @@ -132,13 +132,13 @@ $aligned: stq t2, 0(t5) stq t3, 0(t6) - + bne a3, $aligned ret .end put_pixels_axp_asm /************************************************************************ - * void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, + * void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, * int line_size) */ .align 6 @@ -172,17 +172,17 @@ put_pixels_clamped_mvi_asm: addq a1, a2, ta maxsw4 t3, zero, t3 minsw4 t0, t8, t0 - + minsw4 t1, t8, t1 minsw4 t2, t8, t2 minsw4 t3, t8, t3 pkwb t0, t0 - + pkwb t1, t1 pkwb t2, t2 pkwb t3, t3 stl t0, 0(a1) - + stl t1, 4(a1) addq ta, a2, a1 stl t2, 0(ta) @@ -193,7 +193,7 @@ put_pixels_clamped_mvi_asm: .end put_pixels_clamped_mvi_asm /************************************************************************ - * void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, + * void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, * int line_size) */ .align 6 @@ -236,18 +236,18 @@ add_pixels_clamped_mvi_asm: bic t0, tg, t0 # 0 2 unpkbw t7, t7 # 2 0 and t3, tg, t5 # 1 1 - addq t0, t1, t0 # 0 3 + addq t0, t1, t0 # 0 3 xor t0, t2, t0 # 0 4 unpkbw ta, ta # 3 0 and t6, tg, t8 # 2 1 maxsw4 t0, zero, t0 # 0 5 - + bic t3, tg, t3 # 1 2 bic t6, tg, t6 # 2 2 minsw4 t0, tf, t0 # 0 6 addq t3, t4, t3 # 1 3 - + pkwb t0, t0 # 0 7 xor t3, t5, t3 # 1 4 maxsw4 t3, zero, t3 # 1 5 @@ -260,14 +260,14 @@ add_pixels_clamped_mvi_asm: maxsw4 t6, zero, t6 # 2 5 addq t9, ta, t9 # 3 3 - stl t0, 0(a1) # 0 8 + stl t0, 0(a1) # 0 8 minsw4 t6, tf, t6 # 2 6 xor t9, tb, t9 # 3 4 maxsw4 t9, zero, t9 # 3 5 lda a0, 32(a0) # block += 16; pkwb t3, t3 # 1 7 - + minsw4 t9, tf, t9 # 3 6 subq th, 2, th pkwb t6, t6 # 2 7 @@ -279,5 +279,5 @@ add_pixels_clamped_mvi_asm: stl t9, 4(te) # 3 8 bne th, 1b - ret + ret .end add_pixels_clamped_mvi_asm diff --git a/libavcodec/alpha/motion_est_alpha.c b/libavcodec/alpha/motion_est_alpha.c index 8b8a0a2..98d3644 100644 --- a/libavcodec/alpha/motion_est_alpha.c +++ b/libavcodec/alpha/motion_est_alpha.c @@ -30,7 +30,7 @@ void get_pixels_mvi(DCTELEM *restrict block, p = ldq(pixels); stq(unpkbw(p), block); - stq(unpkbw(p >> 32), block + 4); + stq(unpkbw(p >> 32), block + 4); pixels += line_size; block += 8; @@ -187,7 +187,7 @@ int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, in /* |.......l|lllllllr|rrrrrrr*| This case is special because disalign1 would be 8, which gets treated as 0 by extqh. At least it is a bit faster - that way :) */ + that way :) */ do { uint64_t p1_l, p1_r, p2_l, p2_r; uint64_t l, m, r; @@ -201,7 +201,7 @@ int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, in p2_r = avg2(extql(m, disalign) | extqh(r, disalign), r); pix1 += line_size; pix2 += line_size; - + result += perr(p1_l, p2_l) + perr(p1_r, p2_r); } while (--h); @@ -288,7 +288,7 @@ int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, in int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int result = 0; - + uint64_t p1_l, p1_r; uint64_t p2_l, p2_r, p2_x; diff --git a/libavcodec/alpha/motion_est_mvi_asm.S b/libavcodec/alpha/motion_est_mvi_asm.S index 9e6b75f..276d310 100644 --- a/libavcodec/alpha/motion_est_mvi_asm.S +++ b/libavcodec/alpha/motion_est_mvi_asm.S @@ -29,7 +29,7 @@ #define tf a4 #define tg a3 #define th v0 - + .set noat .set noreorder .arch pca56 @@ -91,7 +91,7 @@ $unaligned: ldq t4, 8(a0) # ref right addq a0, a2, a0 # pix1 addq a1, a2, a1 # pix2 - /* load line 1 */ + /* load line 1 */ ldq_u t5, 0(a1) # left_u ldq_u t6, 8(a1) # mid ldq_u t7, 16(a1) # right_u diff --git a/libavcodec/alpha/mpegvideo_alpha.c b/libavcodec/alpha/mpegvideo_alpha.c index 2f05e3e..9414a8d 100644 --- a/libavcodec/alpha/mpegvideo_alpha.c +++ b/libavcodec/alpha/mpegvideo_alpha.c @@ -32,18 +32,18 @@ static void dct_unquantize_h263_intra_axp(MpegEncContext *s, DCTELEM *block, qadd = WORD_VEC((qscale - 1) | 1); qmul = qscale << 1; - /* This mask kills spill from negative subwords to the next subword. */ + /* This mask kills spill from negative subwords to the next subword. */ correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */ if (!s->h263_aic) { - if (n < 4) + if (n < 4) block0 = block[0] * s->y_dc_scale; else block0 = block[0] * s->c_dc_scale; } else { qadd = 0; } - n_coeffs = 63; // does not always use zigzag table + n_coeffs = 63; // does not always use zigzag table for(i = 0; i <= n_coeffs; block += 4, i += 4) { uint64_t levels, negmask, zeros, add; @@ -95,7 +95,7 @@ static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block, qadd = WORD_VEC((qscale - 1) | 1); qmul = qscale << 1; - /* This mask kills spill from negative subwords to the next subword. */ + /* This mask kills spill from negative subwords to the next subword. */ correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */ n_coeffs = s->intra_scantable.raster_end[s->block_last_index[n]]; diff --git a/libavcodec/alpha/simple_idct_alpha.c b/libavcodec/alpha/simple_idct_alpha.c index 293a2f9..9519ae1 100644 --- a/libavcodec/alpha/simple_idct_alpha.c +++ b/libavcodec/alpha/simple_idct_alpha.c @@ -29,7 +29,7 @@ extern void (*put_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, int line_size); -extern void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, +extern void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels, int line_size); // cos(i * M_PI / 16) * sqrt(2) * (1 << 14) @@ -55,7 +55,7 @@ static inline int idct_row(DCTELEM *row) if (l == 0 && r == 0) return 0; - + a0 = W4 * sextw(l) + (1 << (ROW_SHIFT - 1)); if (((l & ~0xffffUL) | r) == 0) { @@ -63,7 +63,7 @@ static inline int idct_row(DCTELEM *row) t2 = (uint16_t) a0; t2 |= t2 << 16; t2 |= t2 << 32; - + stq(t2, row); stq(t2, row + 4); return 1; @@ -123,7 +123,7 @@ static inline int idct_row(DCTELEM *row) b3 -= W5 * t; } - + t = extwl(r, 2); /* row[5] */ if (t) { t = sextw(t); |