diff options
author | Lauri Kasanen <cand@gmx.com> | 2019-01-13 10:26:20 +0200 |
---|---|---|
committer | Lauri Kasanen <cand@gmx.com> | 2019-02-05 09:34:53 +0200 |
commit | 8522d219ce805ce69ff302f259e6f083fdb4887c (patch) | |
tree | 676d1b3db053d630f59b8dd3cac32ccf41dd5bcd /libswscale/ppc | |
parent | fc6022e1088df068b72159e8836c59643795e39e (diff) | |
download | ffmpeg-streaming-8522d219ce805ce69ff302f259e6f083fdb4887c.zip ffmpeg-streaming-8522d219ce805ce69ff302f259e6f083fdb4887c.tar.gz |
libswscale/ppc: VSX-optimize 9-16 bit yuv2planeX
./ffmpeg_g -f rawvideo -pix_fmt rgb24 -s hd1080 -i /dev/zero -pix_fmt yuv420p16be \
-s 1920x1728 -f null -vframes 100 -v error -nostats -
9-14 bit funcs get about 6x speedup, 16-bit gets about 15x.
Fate passes, each format tested with an image to video conversion.
Only POWER8 includes 32-bit vector multiplies, so POWER7 is locked out
of the 16-bit function. This includes the vec_mulo/mule functions too,
not just vmuluwm.
With TIMER_REPORT skips disabled:
yuv420p9le
12412 UNITS in planarX, 131072 runs, 0 skips
73136 UNITS in planarX, 131072 runs, 0 skips
yuv420p9be
12481 UNITS in planarX, 131072 runs, 0 skips
73410 UNITS in planarX, 131072 runs, 0 skips
yuv420p10le
12322 UNITS in planarX, 131072 runs, 0 skips
72546 UNITS in planarX, 131072 runs, 0 skips
yuv420p10be
12291 UNITS in planarX, 131072 runs, 0 skips
72935 UNITS in planarX, 131072 runs, 0 skips
yuv420p12le
12316 UNITS in planarX, 131072 runs, 0 skips
72708 UNITS in planarX, 131072 runs, 0 skips
yuv420p12be
12319 UNITS in planarX, 131072 runs, 0 skips
72577 UNITS in planarX, 131072 runs, 0 skips
yuv420p14le
12259 UNITS in planarX, 131072 runs, 0 skips
72516 UNITS in planarX, 131072 runs, 0 skips
yuv420p14be
12440 UNITS in planarX, 131072 runs, 0 skips
72962 UNITS in planarX, 131072 runs, 0 skips
yuv420p16le
10548 UNITS in planarX, 131072 runs, 0 skips
73429 UNITS in planarX, 131072 runs, 0 skips
yuv420p16be
10634 UNITS in planarX, 131072 runs, 0 skips
150959 UNITS in planarX, 131072 runs, 0 skips
Signed-off-by: Lauri Kasanen <cand@gmx.com>
Diffstat (limited to 'libswscale/ppc')
-rw-r--r-- | libswscale/ppc/swscale_ppc_template.c | 4 | ||||
-rw-r--r-- | libswscale/ppc/swscale_vsx.c | 186 |
2 files changed, 184 insertions, 6 deletions
diff --git a/libswscale/ppc/swscale_ppc_template.c b/libswscale/ppc/swscale_ppc_template.c index 00e4b99..11decab 100644 --- a/libswscale/ppc/swscale_ppc_template.c +++ b/libswscale/ppc/swscale_ppc_template.c @@ -21,7 +21,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -static void FUNC(yuv2planeX_16)(const int16_t *filter, int filterSize, +static void FUNC(yuv2planeX_8_16)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, const uint8_t *dither, int offset, int x) { @@ -88,7 +88,7 @@ static void FUNC(yuv2planeX)(const int16_t *filter, int filterSize, yuv2planeX_u(filter, filterSize, src, dest, dst_u, dither, offset, 0); for (i = dst_u; i < dstW - 15; i += 16) - FUNC(yuv2planeX_16)(filter, filterSize, src, dest + i, dither, + FUNC(yuv2planeX_8_16)(filter, filterSize, src, dest + i, dither, offset, i); yuv2planeX_u(filter, filterSize, src, dest, dstW, dither, offset, i); diff --git a/libswscale/ppc/swscale_vsx.c b/libswscale/ppc/swscale_vsx.c index 70da6ae..f6c7f1d 100644 --- a/libswscale/ppc/swscale_vsx.c +++ b/libswscale/ppc/swscale_vsx.c @@ -83,6 +83,8 @@ #include "swscale_ppc_template.c" #undef FUNC +#undef vzero + #endif /* !HAVE_BIGENDIAN */ static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW, @@ -180,6 +182,76 @@ static void yuv2plane1_nbps_vsx(const int16_t *src, uint16_t *dest, int dstW, yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i); } +static void yuv2planeX_nbps_u(const int16_t *filter, int filterSize, + const int16_t **src, uint16_t *dest, int dstW, + int big_endian, int output_bits, int start) +{ + int i; + int shift = 11 + 16 - output_bits; + + for (i = start; i < dstW; i++) { + int val = 1 << (shift - 1); + int j; + + for (j = 0; j < filterSize; j++) + val += src[j][i] * filter[j]; + + output_pixel(&dest[i], val); + } +} + +static void yuv2planeX_nbps_vsx(const int16_t *filter, int filterSize, + const int16_t **src, uint16_t *dest, int dstW, + int big_endian, int output_bits) +{ + const int dst_u = -(uintptr_t)dest & 7; + const int shift = 11 + 16 - output_bits; + const int add = (1 << (shift - 1)); + const int clip = (1 << output_bits) - 1; + const uint16_t swap = big_endian ? 8 : 0; + const vector uint32_t vadd = (vector uint32_t) {add, add, add, add}; + const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift}; + const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap}; + const vector uint16_t vlargest = (vector uint16_t) {clip, clip, clip, clip, clip, clip, clip, clip}; + const vector int16_t vzero = vec_splat_s16(0); + const vector uint8_t vperm = (vector uint8_t) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15}; + vector int16_t vfilter[MAX_FILTER_SIZE], vin; + vector uint16_t v; + vector uint32_t vleft, vright, vtmp; + int i, j; + + for (i = 0; i < filterSize; i++) { + vfilter[i] = (vector int16_t) {filter[i], filter[i], filter[i], filter[i], + filter[i], filter[i], filter[i], filter[i]}; + } + + yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0); + + for (i = dst_u; i < dstW - 7; i += 8) { + vleft = vright = vadd; + + for (j = 0; j < filterSize; j++) { + vin = vec_vsx_ld(0, &src[j][i]); + vtmp = (vector uint32_t) vec_mule(vin, vfilter[j]); + vleft = vec_add(vleft, vtmp); + vtmp = (vector uint32_t) vec_mulo(vin, vfilter[j]); + vright = vec_add(vright, vtmp); + } + + vleft = vec_sra(vleft, vshift); + vright = vec_sra(vright, vshift); + v = vec_packsu(vleft, vright); + v = (vector uint16_t) vec_max((vector int16_t) v, vzero); + v = vec_min(v, vlargest); + v = vec_rl(v, vswap); + v = vec_perm(v, v, vperm); + vec_st(v, 0, &dest[i]); + } + + yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i); +} + + #undef output_pixel #define output_pixel(pos, val, bias, signedness) \ @@ -234,7 +306,88 @@ static void yuv2plane1_16_vsx(const int32_t *src, uint16_t *dest, int dstW, yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i); } +#if HAVE_POWER8 + +static void yuv2planeX_16_u(const int16_t *filter, int filterSize, + const int32_t **src, uint16_t *dest, int dstW, + int big_endian, int output_bits, int start) +{ + int i; + int shift = 15; + + for (i = start; i < dstW; i++) { + int val = 1 << (shift - 1); + int j; + + /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline + * filters (or anything with negative coeffs, the range can be slightly + * wider in both directions. To account for this overflow, we subtract + * a constant so it always fits in the signed range (assuming a + * reasonable filterSize), and re-add that at the end. */ + val -= 0x40000000; + for (j = 0; j < filterSize; j++) + val += src[j][i] * (unsigned)filter[j]; + + output_pixel(&dest[i], val, 0x8000, int); + } +} + +static void yuv2planeX_16_vsx(const int16_t *filter, int filterSize, + const int32_t **src, uint16_t *dest, int dstW, + int big_endian, int output_bits) +{ + const int dst_u = -(uintptr_t)dest & 7; + const int shift = 15; + const int bias = 0x8000; + const int add = (1 << (shift - 1)) - 0x40000000; + const uint16_t swap = big_endian ? 8 : 0; + const vector uint32_t vadd = (vector uint32_t) {add, add, add, add}; + const vector uint32_t vshift = (vector uint32_t) {shift, shift, shift, shift}; + const vector uint16_t vswap = (vector uint16_t) {swap, swap, swap, swap, swap, swap, swap, swap}; + const vector uint16_t vbias = (vector uint16_t) {bias, bias, bias, bias, bias, bias, bias, bias}; + vector int32_t vfilter[MAX_FILTER_SIZE]; + vector uint16_t v; + vector uint32_t vleft, vright, vtmp; + vector int32_t vin32l, vin32r; + int i, j; + + for (i = 0; i < filterSize; i++) { + vfilter[i] = (vector int32_t) {filter[i], filter[i], filter[i], filter[i]}; + } + + yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0); + + for (i = dst_u; i < dstW - 7; i += 8) { + vleft = vright = vadd; + + for (j = 0; j < filterSize; j++) { + vin32l = vec_vsx_ld(0, &src[j][i]); + vin32r = vec_vsx_ld(0, &src[j][i + 4]); + + vtmp = (vector uint32_t) vec_mul(vin32l, vfilter[j]); + vleft = vec_add(vleft, vtmp); + vtmp = (vector uint32_t) vec_mul(vin32r, vfilter[j]); + vright = vec_add(vright, vtmp); + } + + vleft = vec_sra(vleft, vshift); + vright = vec_sra(vright, vshift); + v = (vector uint16_t) vec_packs((vector int32_t) vleft, (vector int32_t) vright); + v = vec_add(v, vbias); + v = vec_rl(v, vswap); + vec_st(v, 0, &dest[i]); + } + + yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i); +} + +#endif /* HAVE_POWER8 */ + #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \ + yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \ + yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) + +#define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \ static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \ uint8_t *dest, int dstW, \ const uint8_t *dither, int offset) \ @@ -243,6 +396,16 @@ static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \ (uint16_t *) dest, dstW, is_be, bits); \ } +#define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \ +static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \ + const int16_t **src, uint8_t *dest, int dstW, \ + const uint8_t *dither, int offset)\ +{ \ + yuv2planeX_## template_size ## _vsx(filter, \ + filterSize, (const typeX_t **) src, \ + (uint16_t *) dest, dstW, is_be, bits); \ +} + yuv2NBPS( 9, BE, 1, nbps, int16_t) yuv2NBPS( 9, LE, 0, nbps, int16_t) yuv2NBPS(10, BE, 1, nbps, int16_t) @@ -251,8 +414,13 @@ yuv2NBPS(12, BE, 1, nbps, int16_t) yuv2NBPS(12, LE, 0, nbps, int16_t) yuv2NBPS(14, BE, 1, nbps, int16_t) yuv2NBPS(14, LE, 0, nbps, int16_t) -yuv2NBPS(16, BE, 1, 16, int32_t) -yuv2NBPS(16, LE, 0, 16, int32_t) + +yuv2NBPS1(16, BE, 1, 16, int32_t) +yuv2NBPS1(16, LE, 0, 16, int32_t) +#if HAVE_POWER8 +yuv2NBPSX(16, BE, 1, 16, int32_t) +yuv2NBPSX(16, LE, 0, 16, int32_t) +#endif #endif /* !HAVE_BIGENDIAN */ @@ -262,8 +430,9 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c) { #if HAVE_VSX enum AVPixelFormat dstFormat = c->dstFormat; + const int cpu_flags = av_get_cpu_flags(); - if (!(av_get_cpu_flags() & AV_CPU_FLAG_VSX)) + if (!(cpu_flags & AV_CPU_FLAG_VSX)) return; #if !HAVE_BIGENDIAN @@ -286,20 +455,29 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c) #if !HAVE_BIGENDIAN case 9: c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx; + c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx; break; case 10: c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx; + c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx; break; case 12: c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx; + c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx; break; case 14: c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx; + c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx; break; case 16: c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx; +#if HAVE_POWER8 + if (cpu_flags & AV_CPU_FLAG_POWER8) { + c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx; + } +#endif /* HAVE_POWER8 */ break; -#endif +#endif /* !HAVE_BIGENDIAN */ } } #endif /* HAVE_VSX */ |