diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2010-09-25 16:43:42 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2010-09-25 16:43:42 +0000 |
commit | acbac56789750361b7d623cd85a136b58ce1fc31 (patch) | |
tree | df0c01b93a2123da2dc5b091b5eecd983247e33e /libavfilter/x86 | |
parent | b299c4e4d342bd7894a9fe9b805a5c0f8b6603a1 (diff) | |
download | ffmpeg-streaming-acbac56789750361b7d623cd85a136b58ce1fc31.zip ffmpeg-streaming-acbac56789750361b7d623cd85a136b58ce1fc31.tar.gz |
yadif filter, based on stefanos port of my yadif from mplayer.
Compared to stefanos, 2 frame output works with ffplay.
Originally committed as revision 25196 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavfilter/x86')
-rw-r--r-- | libavfilter/x86/yadif.c | 240 |
1 files changed, 240 insertions, 0 deletions
diff --git a/libavfilter/x86/yadif.c b/libavfilter/x86/yadif.c new file mode 100644 index 0000000..e92da8c --- /dev/null +++ b/libavfilter/x86/yadif.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include "libavutil/cpu.h" +#include "libavutil/x86_cpu.h" +#include "libavfilter/yadif.h" + +#if HAVE_MMX + +#define LOAD4(mem,dst) \ + "movd "mem", "#dst" \n\t"\ + "punpcklbw %%mm7, "#dst" \n\t" + +#define PABS(tmp,dst) \ + "pxor "#tmp", "#tmp" \n\t"\ + "psubw "#dst", "#tmp" \n\t"\ + "pmaxsw "#tmp", "#dst" \n\t" + +#define CHECK(pj,mj) \ + "movq "#pj"(%[cur],%[mrefs]), %%mm2 \n\t" /* cur[x-refs-1+j] */\ + "movq "#mj"(%[cur],%[prefs]), %%mm3 \n\t" /* cur[x+refs-1-j] */\ + "movq %%mm2, %%mm4 \n\t"\ + "movq %%mm2, %%mm5 \n\t"\ + "pxor %%mm3, %%mm4 \n\t"\ + "pavgb %%mm3, %%mm5 \n\t"\ + "pand %[pb1], %%mm4 \n\t"\ + "psubusb %%mm4, %%mm5 \n\t"\ + "psrlq $8, %%mm5 \n\t"\ + "punpcklbw %%mm7, %%mm5 \n\t" /* (cur[x-refs+j] + cur[x+refs-j])>>1 */\ + "movq %%mm2, %%mm4 \n\t"\ + "psubusb %%mm3, %%mm2 \n\t"\ + "psubusb %%mm4, %%mm3 \n\t"\ + "pmaxub %%mm3, %%mm2 \n\t"\ + "movq %%mm2, %%mm3 \n\t"\ + "movq %%mm2, %%mm4 \n\t" /* ABS(cur[x-refs-1+j] - cur[x+refs-1-j]) */\ + "psrlq $8, %%mm3 \n\t" /* ABS(cur[x-refs +j] - cur[x+refs -j]) */\ + "psrlq $16, %%mm4 \n\t" /* ABS(cur[x-refs+1+j] - cur[x+refs+1-j]) */\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpcklbw %%mm7, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm4 \n\t"\ + "paddw %%mm3, %%mm2 \n\t"\ + "paddw %%mm4, %%mm2 \n\t" /* score */ + +#define CHECK1 \ + "movq %%mm0, %%mm3 \n\t"\ + "pcmpgtw %%mm2, %%mm3 \n\t" /* if(score < spatial_score) */\ + "pminsw %%mm2, %%mm0 \n\t" /* spatial_score= score; */\ + "movq %%mm3, %%mm6 \n\t"\ + "pand %%mm3, %%mm5 \n\t"\ + "pandn %%mm1, %%mm3 \n\t"\ + "por %%mm5, %%mm3 \n\t"\ + "movq %%mm3, %%mm1 \n\t" /* spatial_pred= (cur[x-refs+j] + cur[x+refs-j])>>1; */ + +#define CHECK2 /* pretend not to have checked dir=2 if dir=1 was bad.\ + hurts both quality and speed, but matches the C version. */\ + "paddw %[pw1], %%mm6 \n\t"\ + "psllw $14, %%mm6 \n\t"\ + "paddsw %%mm6, %%mm2 \n\t"\ + "movq %%mm0, %%mm3 \n\t"\ + "pcmpgtw %%mm2, %%mm3 \n\t"\ + "pminsw %%mm2, %%mm0 \n\t"\ + "pand %%mm3, %%mm5 \n\t"\ + "pandn %%mm1, %%mm3 \n\t"\ + "por %%mm5, %%mm3 \n\t"\ + "movq %%mm3, %%mm1 \n\t" + +void ff_yadif_filter_line_mmx(uint8_t *dst, + uint8_t *prev, uint8_t *cur, uint8_t *next, + int w, int refs, int parity, int mode) +{ + static const uint64_t pw_1 = 0x0001000100010001ULL; + static const uint64_t pb_1 = 0x0101010101010101ULL; + uint64_t tmp0, tmp1, tmp2, tmp3; + int x; + +#define FILTER\ + for(x=0; x<w; x+=4){\ + __asm__ volatile(\ + "pxor %%mm7, %%mm7 \n\t"\ + LOAD4("(%[cur],%[mrefs])", %%mm0) /* c = cur[x-refs] */\ + LOAD4("(%[cur],%[prefs])", %%mm1) /* e = cur[x+refs] */\ + LOAD4("(%["prev2"])", %%mm2) /* prev2[x] */\ + LOAD4("(%["next2"])", %%mm3) /* next2[x] */\ + "movq %%mm3, %%mm4 \n\t"\ + "paddw %%mm2, %%mm3 \n\t"\ + "psraw $1, %%mm3 \n\t" /* d = (prev2[x] + next2[x])>>1 */\ + "movq %%mm0, %[tmp0] \n\t" /* c */\ + "movq %%mm3, %[tmp1] \n\t" /* d */\ + "movq %%mm1, %[tmp2] \n\t" /* e */\ + "psubw %%mm4, %%mm2 \n\t"\ + PABS( %%mm4, %%mm2) /* temporal_diff0 */\ + LOAD4("(%[prev],%[mrefs])", %%mm3) /* prev[x-refs] */\ + LOAD4("(%[prev],%[prefs])", %%mm4) /* prev[x+refs] */\ + "psubw %%mm0, %%mm3 \n\t"\ + "psubw %%mm1, %%mm4 \n\t"\ + PABS( %%mm5, %%mm3)\ + PABS( %%mm5, %%mm4)\ + "paddw %%mm4, %%mm3 \n\t" /* temporal_diff1 */\ + "psrlw $1, %%mm2 \n\t"\ + "psrlw $1, %%mm3 \n\t"\ + "pmaxsw %%mm3, %%mm2 \n\t"\ + LOAD4("(%[next],%[mrefs])", %%mm3) /* next[x-refs] */\ + LOAD4("(%[next],%[prefs])", %%mm4) /* next[x+refs] */\ + "psubw %%mm0, %%mm3 \n\t"\ + "psubw %%mm1, %%mm4 \n\t"\ + PABS( %%mm5, %%mm3)\ + PABS( %%mm5, %%mm4)\ + "paddw %%mm4, %%mm3 \n\t" /* temporal_diff2 */\ + "psrlw $1, %%mm3 \n\t"\ + "pmaxsw %%mm3, %%mm2 \n\t"\ + "movq %%mm2, %[tmp3] \n\t" /* diff */\ +\ + "paddw %%mm0, %%mm1 \n\t"\ + "paddw %%mm0, %%mm0 \n\t"\ + "psubw %%mm1, %%mm0 \n\t"\ + "psrlw $1, %%mm1 \n\t" /* spatial_pred */\ + PABS( %%mm2, %%mm0) /* ABS(c-e) */\ +\ + "movq -1(%[cur],%[mrefs]), %%mm2 \n\t" /* cur[x-refs-1] */\ + "movq -1(%[cur],%[prefs]), %%mm3 \n\t" /* cur[x+refs-1] */\ + "movq %%mm2, %%mm4 \n\t"\ + "psubusb %%mm3, %%mm2 \n\t"\ + "psubusb %%mm4, %%mm3 \n\t"\ + "pmaxub %%mm3, %%mm2 \n\t"\ + "pshufw $9,%%mm2, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t" /* ABS(cur[x-refs-1] - cur[x+refs-1]) */\ + "punpcklbw %%mm7, %%mm3 \n\t" /* ABS(cur[x-refs+1] - cur[x+refs+1]) */\ + "paddw %%mm2, %%mm0 \n\t"\ + "paddw %%mm3, %%mm0 \n\t"\ + "psubw %[pw1], %%mm0 \n\t" /* spatial_score */\ +\ + CHECK(-2,0)\ + CHECK1\ + CHECK(-3,1)\ + CHECK2\ + CHECK(0,-2)\ + CHECK1\ + CHECK(1,-3)\ + CHECK2\ +\ + /* if(p->mode<2) ... */\ + "movq %[tmp3], %%mm6 \n\t" /* diff */\ + "cmp $2, %[mode] \n\t"\ + "jge 1f \n\t"\ + LOAD4("(%["prev2"],%[mrefs],2)", %%mm2) /* prev2[x-2*refs] */\ + LOAD4("(%["next2"],%[mrefs],2)", %%mm4) /* next2[x-2*refs] */\ + LOAD4("(%["prev2"],%[prefs],2)", %%mm3) /* prev2[x+2*refs] */\ + LOAD4("(%["next2"],%[prefs],2)", %%mm5) /* next2[x+2*refs] */\ + "paddw %%mm4, %%mm2 \n\t"\ + "paddw %%mm5, %%mm3 \n\t"\ + "psrlw $1, %%mm2 \n\t" /* b */\ + "psrlw $1, %%mm3 \n\t" /* f */\ + "movq %[tmp0], %%mm4 \n\t" /* c */\ + "movq %[tmp1], %%mm5 \n\t" /* d */\ + "movq %[tmp2], %%mm7 \n\t" /* e */\ + "psubw %%mm4, %%mm2 \n\t" /* b-c */\ + "psubw %%mm7, %%mm3 \n\t" /* f-e */\ + "movq %%mm5, %%mm0 \n\t"\ + "psubw %%mm4, %%mm5 \n\t" /* d-c */\ + "psubw %%mm7, %%mm0 \n\t" /* d-e */\ + "movq %%mm2, %%mm4 \n\t"\ + "pminsw %%mm3, %%mm2 \n\t"\ + "pmaxsw %%mm4, %%mm3 \n\t"\ + "pmaxsw %%mm5, %%mm2 \n\t"\ + "pminsw %%mm5, %%mm3 \n\t"\ + "pmaxsw %%mm0, %%mm2 \n\t" /* max */\ + "pminsw %%mm0, %%mm3 \n\t" /* min */\ + "pxor %%mm4, %%mm4 \n\t"\ + "pmaxsw %%mm3, %%mm6 \n\t"\ + "psubw %%mm2, %%mm4 \n\t" /* -max */\ + "pmaxsw %%mm4, %%mm6 \n\t" /* diff= MAX3(diff, min, -max); */\ + "1: \n\t"\ +\ + "movq %[tmp1], %%mm2 \n\t" /* d */\ + "movq %%mm2, %%mm3 \n\t"\ + "psubw %%mm6, %%mm2 \n\t" /* d-diff */\ + "paddw %%mm6, %%mm3 \n\t" /* d+diff */\ + "pmaxsw %%mm2, %%mm1 \n\t"\ + "pminsw %%mm3, %%mm1 \n\t" /* d = clip(spatial_pred, d-diff, d+diff); */\ + "packuswb %%mm1, %%mm1 \n\t"\ +\ + :[tmp0]"=m"(tmp0),\ + [tmp1]"=m"(tmp1),\ + [tmp2]"=m"(tmp2),\ + [tmp3]"=m"(tmp3)\ + :[prev] "r"(prev),\ + [cur] "r"(cur),\ + [next] "r"(next),\ + [prefs]"r"((x86_reg)refs),\ + [mrefs]"r"((x86_reg)-refs),\ + [pw1] "m"(pw_1),\ + [pb1] "m"(pb_1),\ + [mode] "g"(mode)\ + );\ + __asm__ volatile("movd %%mm1, %0" :"=m"(*dst));\ + dst += 4;\ + prev+= 4;\ + cur += 4;\ + next+= 4;\ + } + + if (parity) { +#define prev2 "prev" +#define next2 "cur" + FILTER +#undef prev2 +#undef next2 + } else { +#define prev2 "cur" +#define next2 "next" + FILTER +#undef prev2 +#undef next2 + } +} +#undef LOAD4 +#undef PABS +#undef CHECK +#undef CHECK1 +#undef CHECK2 +#undef FILTER + +#endif /* HAVE_MMX */ |