summaryrefslogtreecommitdiffstats
path: root/libswscale/ppc
diff options
context:
space:
mode:
authorLauri Kasanen <cand@gmx.com>2019-03-20 13:17:43 +0200
committerLauri Kasanen <cand@gmx.com>2019-03-27 09:05:08 +0200
commit681957b88d18f51d0f52add4f7bb1c9fc5d8323b (patch)
tree0fc81a9cee658eb654280c8a206ecf1b2e30c560 /libswscale/ppc
parent81a4719d8eaffd2e0251777cd121ecb54b239784 (diff)
downloadffmpeg-streaming-681957b88d18f51d0f52add4f7bb1c9fc5d8323b.zip
ffmpeg-streaming-681957b88d18f51d0f52add4f7bb1c9fc5d8323b.tar.gz
swscale/ppc: VSX-optimize yuv2rgb_full
./ffmpeg -f lavfi -i yuvtestsrc=duration=1:size=1200x1440 \ -s 1200x1440 -f null -vframes 100 -pix_fmt $i -nostats \ -cpuflags 0 -v error - This uses 32-bit mul, so POWER8 only. The following output formats get about 4.5x speedup: rgb24 39980 UNITS in yuv2packed1, 32768 runs, 0 skips 8774 UNITS in yuv2packed1, 32768 runs, 0 skips bgr24 40069 UNITS in yuv2packed1, 32768 runs, 0 skips 8772 UNITS in yuv2packed1, 32766 runs, 2 skips rgba 39759 UNITS in yuv2packed1, 32768 runs, 0 skips 8681 UNITS in yuv2packed1, 32767 runs, 1 skips bgra 39729 UNITS in yuv2packed1, 32768 runs, 0 skips 8696 UNITS in yuv2packed1, 32766 runs, 2 skips argb 39766 UNITS in yuv2packed1, 32768 runs, 0 skips 8672 UNITS in yuv2packed1, 32766 runs, 2 skips bgra 39784 UNITS in yuv2packed1, 32768 runs, 0 skips 8659 UNITS in yuv2packed1, 32767 runs, 1 skips
Diffstat (limited to 'libswscale/ppc')
-rw-r--r--libswscale/ppc/swscale_vsx.c291
1 files changed, 291 insertions, 0 deletions
diff --git a/libswscale/ppc/swscale_vsx.c b/libswscale/ppc/swscale_vsx.c
index 01eb46c..062ab0d 100644
--- a/libswscale/ppc/swscale_vsx.c
+++ b/libswscale/ppc/swscale_vsx.c
@@ -422,6 +422,248 @@ yuv2NBPSX(16, BE, 1, 16, int32_t)
yuv2NBPSX(16, LE, 0, 16, int32_t)
#endif
+static av_always_inline void
+yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
+ const int16_t *ubuf[2], const int16_t *vbuf[2],
+ const int16_t *abuf0, uint8_t *dest, int dstW,
+ int uvalpha, int y, enum AVPixelFormat target,
+ int hasAlpha)
+{
+ const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
+ const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
+ vector int16_t vy, vu, vv, A = vec_splat_s16(0), tmp16;
+ vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
+ vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
+ vector uint16_t rd16, gd16, bd16;
+ vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
+ const vector uint16_t zero16 = vec_splat_u16(0);
+ const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
+ const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
+ const vector int32_t y_add = vec_splats(1 << 21);
+ const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
+ const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
+ const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
+ const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
+ const vector int32_t rgbclip = vec_splats(1 << 30);
+ const vector int32_t zero32 = vec_splat_s32(0);
+ const vector uint32_t shift2 = vec_splat_u32(2);
+ const vector uint32_t shift22 = vec_splats(22U);
+ const vector uint16_t sub7 = vec_splats((uint16_t) (128 << 7));
+ const vector uint16_t sub8 = vec_splats((uint16_t) (128 << 8));
+ const vector int16_t mul4 = vec_splat_s16(4);
+ const vector int16_t mul8 = vec_splat_s16(8);
+ const vector int16_t add64 = vec_splat_s16(64);
+ const vector uint16_t shift7 = vec_splat_u16(7);
+ const vector int16_t max255 = vec_splat_s16(255);
+ int i;
+
+ // Various permutations
+ const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
+ 0x1, 0x11, 0,
+ 0x2, 0x12, 0,
+ 0x3, 0x13, 0,
+ 0x4, 0x14, 0,
+ 0x5 };
+ const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
+ 0x6, 0x16, 0,
+ 0x7, 0x17, 0 };
+ const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
+ 0x3, 0x4, 0x11,
+ 0x6, 0x7, 0x12,
+ 0x9, 0xa, 0x13,
+ 0xc, 0xd, 0x14,
+ 0xf };
+ const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
+ 0x2, 0x3, 0x16,
+ 0x5, 0x6, 0x17 };
+
+ for (i = 0; i < dstW; i += 8) { // The x86 asm also overwrites padding bytes.
+ vy = vec_ld(0, &buf0[i]);
+ vy32_l = vec_unpackh(vy);
+ vy32_r = vec_unpackl(vy);
+ vy32_l = vec_sl(vy32_l, shift2);
+ vy32_r = vec_sl(vy32_r, shift2);
+
+ vu = vec_ld(0, &ubuf0[i]);
+ vv = vec_ld(0, &vbuf0[i]);
+ if (uvalpha < 2048) {
+ vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub7);
+ vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub7);
+
+ tmp32 = vec_mule(vu, mul4);
+ tmp32_2 = vec_mulo(vu, mul4);
+ vu32_l = vec_mergeh(tmp32, tmp32_2);
+ vu32_r = vec_mergel(tmp32, tmp32_2);
+ tmp32 = vec_mule(vv, mul4);
+ tmp32_2 = vec_mulo(vv, mul4);
+ vv32_l = vec_mergeh(tmp32, tmp32_2);
+ vv32_r = vec_mergel(tmp32, tmp32_2);
+ } else {
+ tmp16 = vec_ld(0, &ubuf1[i]);
+ vu = vec_add(vu, tmp16);
+ vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub8);
+ tmp16 = vec_ld(0, &vbuf1[i]);
+ vv = vec_add(vv, tmp16);
+ vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub8);
+
+ vu32_l = vec_mule(vu, mul8);
+ vu32_r = vec_mulo(vu, mul8);
+ vv32_l = vec_mule(vv, mul8);
+ vv32_r = vec_mulo(vv, mul8);
+ }
+
+ if (hasAlpha) {
+ A = vec_ld(0, &abuf0[i]);
+ A = vec_add(A, add64);
+ A = vec_sr(A, shift7);
+ A = vec_max(A, max255);
+ ad = vec_packsu(A, (vector int16_t) zero16);
+ } else {
+ ad = vec_splats((uint8_t) 255);
+ }
+
+ vy32_l = vec_sub(vy32_l, y_offset);
+ vy32_r = vec_sub(vy32_r, y_offset);
+ vy32_l = vec_mul(vy32_l, y_coeff);
+ vy32_r = vec_mul(vy32_r, y_coeff);
+ vy32_l = vec_add(vy32_l, y_add);
+ vy32_r = vec_add(vy32_r, y_add);
+
+ R_l = vec_mul(vv32_l, v2r_coeff);
+ R_l = vec_add(R_l, vy32_l);
+ R_r = vec_mul(vv32_r, v2r_coeff);
+ R_r = vec_add(R_r, vy32_r);
+ G_l = vec_mul(vv32_l, v2g_coeff);
+ tmp32 = vec_mul(vu32_l, u2g_coeff);
+ G_l = vec_add(G_l, vy32_l);
+ G_l = vec_add(G_l, tmp32);
+ G_r = vec_mul(vv32_r, v2g_coeff);
+ tmp32 = vec_mul(vu32_r, u2g_coeff);
+ G_r = vec_add(G_r, vy32_r);
+ G_r = vec_add(G_r, tmp32);
+
+ B_l = vec_mul(vu32_l, u2b_coeff);
+ B_l = vec_add(B_l, vy32_l);
+ B_r = vec_mul(vu32_r, u2b_coeff);
+ B_r = vec_add(B_r, vy32_r);
+
+ R_l = vec_max(R_l, zero32);
+ R_r = vec_max(R_r, zero32);
+ G_l = vec_max(G_l, zero32);
+ G_r = vec_max(G_r, zero32);
+ B_l = vec_max(B_l, zero32);
+ B_r = vec_max(B_r, zero32);
+
+ R_l = vec_min(R_l, rgbclip);
+ R_r = vec_min(R_r, rgbclip);
+ G_l = vec_min(G_l, rgbclip);
+ G_r = vec_min(G_r, rgbclip);
+ B_l = vec_min(B_l, rgbclip);
+ B_r = vec_min(B_r, rgbclip);
+
+ R_l = vec_sr(R_l, shift22);
+ R_r = vec_sr(R_r, shift22);
+ G_l = vec_sr(G_l, shift22);
+ G_r = vec_sr(G_r, shift22);
+ B_l = vec_sr(B_l, shift22);
+ B_r = vec_sr(B_r, shift22);
+
+ rd16 = vec_packsu(R_l, R_r);
+ gd16 = vec_packsu(G_l, G_r);
+ bd16 = vec_packsu(B_l, B_r);
+ rd = vec_packsu(rd16, zero16);
+ gd = vec_packsu(gd16, zero16);
+ bd = vec_packsu(bd16, zero16);
+
+ switch(target) {
+ case AV_PIX_FMT_RGB24:
+ out0 = vec_perm(rd, gd, perm3rg0);
+ out0 = vec_perm(out0, bd, perm3tb0);
+ out1 = vec_perm(rd, gd, perm3rg1);
+ out1 = vec_perm(out1, bd, perm3tb1);
+
+ vec_vsx_st(out0, 0, dest);
+ vec_vsx_st(out1, 16, dest);
+
+ dest += 24;
+ break;
+ case AV_PIX_FMT_BGR24:
+ out0 = vec_perm(bd, gd, perm3rg0);
+ out0 = vec_perm(out0, rd, perm3tb0);
+ out1 = vec_perm(bd, gd, perm3rg1);
+ out1 = vec_perm(out1, rd, perm3tb1);
+
+ vec_vsx_st(out0, 0, dest);
+ vec_vsx_st(out1, 16, dest);
+
+ dest += 24;
+ break;
+ case AV_PIX_FMT_BGRA:
+ out0 = vec_mergeh(bd, gd);
+ out1 = vec_mergeh(rd, ad);
+
+ tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 0, dest);
+ tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 16, dest);
+
+ dest += 32;
+ break;
+ case AV_PIX_FMT_RGBA:
+ out0 = vec_mergeh(rd, gd);
+ out1 = vec_mergeh(bd, ad);
+
+ tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 0, dest);
+ tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 16, dest);
+
+ dest += 32;
+ break;
+ case AV_PIX_FMT_ARGB:
+ out0 = vec_mergeh(ad, rd);
+ out1 = vec_mergeh(gd, bd);
+
+ tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 0, dest);
+ tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 16, dest);
+
+ dest += 32;
+ break;
+ case AV_PIX_FMT_ABGR:
+ out0 = vec_mergeh(ad, bd);
+ out1 = vec_mergeh(gd, rd);
+
+ tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 0, dest);
+ tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
+ vec_vsx_st(tmp8, 16, dest);
+
+ dest += 32;
+ break;
+ }
+ }
+}
+
+#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
+static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
+ const int16_t *ubuf[2], const int16_t *vbuf[2], \
+ const int16_t *abuf0, uint8_t *dest, int dstW, \
+ int uvalpha, int y) \
+{ \
+ name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \
+ dstW, uvalpha, y, fmt, hasAlpha); \
+}
+
+YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
+YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
+YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
+YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
+
+YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
+YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
+
#endif /* !HAVE_BIGENDIAN */
#endif /* HAVE_VSX */
@@ -480,5 +722,54 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
#endif /* !HAVE_BIGENDIAN */
}
}
+
+ if (c->flags & SWS_BITEXACT)
+ return;
+
+#if !HAVE_BIGENDIAN
+ if (c->flags & SWS_FULL_CHR_H_INT) {
+ switch (dstFormat) {
+ case AV_PIX_FMT_RGB24:
+ if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+ c->yuv2packed1 = yuv2rgb24_full_1_vsx;
+ }
+ break;
+ case AV_PIX_FMT_BGR24:
+ if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+ c->yuv2packed1 = yuv2bgr24_full_1_vsx;
+ }
+ break;
+ case AV_PIX_FMT_BGRA:
+ if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+ if (!c->needAlpha) {
+ c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
+ }
+ }
+ break;
+ case AV_PIX_FMT_RGBA:
+ if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+ if (!c->needAlpha) {
+ c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
+ }
+ }
+ break;
+ case AV_PIX_FMT_ARGB:
+ if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+ if (!c->needAlpha) {
+ c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
+ }
+ }
+ break;
+ case AV_PIX_FMT_ABGR:
+ if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+ if (!c->needAlpha) {
+ c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
+ }
+ }
+ break;
+ }
+ }
+#endif /* !HAVE_BIGENDIAN */
+
#endif /* HAVE_VSX */
}
OpenPOWER on IntegriCloud