diff options
Diffstat (limited to 'libavcodec/x86/videodsp.asm')
-rw-r--r-- | libavcodec/x86/videodsp.asm | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/libavcodec/x86/videodsp.asm b/libavcodec/x86/videodsp.asm index 53b9e82..1ac0257 100644 --- a/libavcodec/x86/videodsp.asm +++ b/libavcodec/x86/videodsp.asm @@ -2,20 +2,20 @@ ;* Core video DSP functions ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com> ;* -;* This file is part of Libav. +;* This file is part of FFmpeg. ;* -;* Libav is free software; you can redistribute it and/or +;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* -;* Libav is distributed in the hope that it will be useful, +;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public -;* License along with Libav; if not, write to the Free Software +;* License along with FFmpeg; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** @@ -54,13 +54,13 @@ SECTION .text ; | | <- bottom is copied from last line in body of source ; '----' <- bh %if ARCH_X86_64 -cglobal emu_edge_vvar, 7, 8, 1, dst, src, dst_stride, src_stride, \ +cglobal emu_edge_vvar, 7, 8, 1, dst, dst_stride, src, src_stride, \ start_y, end_y, bh, w %else ; x86-32 cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w %define src_strideq r3mp -%define dst_strideq r2mp - mov srcq, r1mp +%define dst_strideq r1mp + mov srcq, r2mp mov start_yq, r4mp mov end_yq, r5mp mov bhq, r6mp @@ -262,30 +262,30 @@ hvar_fn %rep 1+%2-%1 %if %%n <= 3 %if ARCH_X86_64 -cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, src, dst_stride, src_stride, \ +cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, dst_stride, src, src_stride, \ start_y, end_y, val, bh mov bhq, r6mp ; r6mp = bhmp %else ; x86-32 cglobal emu_edge_vfix %+ %%n, 0, 6, 0, val, dst, src, start_y, end_y, bh mov dstq, r0mp - mov srcq, r1mp + mov srcq, r2mp mov start_yq, r4mp mov end_yq, r5mp mov bhq, r6mp -%define dst_strideq r2mp +%define dst_strideq r1mp %define src_strideq r3mp %endif ; x86-64/32 %else %if ARCH_X86_64 -cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, src, dst_stride, src_stride, \ +cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, dst_stride, src, src_stride, \ start_y, end_y, bh %else ; x86-32 cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh - mov srcq, r1mp + mov srcq, r2mp mov start_yq, r4mp mov end_yq, r5mp mov bhq, r6mp -%define dst_strideq r2mp +%define dst_strideq r1mp %define src_strideq r3mp %endif ; x86-64/32 %endif @@ -344,10 +344,6 @@ VERTICAL_EXTEND 16, 22 ; obviously not the same on both sides. %macro READ_V_PIXEL 2 -%if %1 == 2 - movzx valw, byte %2 - imul valw, 0x0101 -%else movzx vald, byte %2 imul vald, 0x01010101 %if %1 >= 8 @@ -356,13 +352,15 @@ VERTICAL_EXTEND 16, 22 pshufd m0, m0, q0000 %else punpckldq m0, m0 -%endif -%endif ; %1 >= 8 -%endif +%endif ; mmsize == 16 +%endif ; %1 > 16 %endmacro ; READ_V_PIXEL %macro WRITE_V_PIXEL 2 %assign %%off 0 + +%if %1 >= 8 + %rep %1/mmsize movu [%2+%%off], m0 %assign %%off %%off+mmsize @@ -378,27 +376,29 @@ VERTICAL_EXTEND 16, 22 %assign %%off %%off+8 %endif %endif ; %1-%%off >= 8 -%endif +%endif ; mmsize == 16 %if %1-%%off >= 4 %if %1 > 8 && %1-%%off > 4 movq [%2+%1-8], m0 %assign %%off %1 -%elif %1 >= 8 && %1-%%off >= 4 - movd [%2+%%off], m0 -%assign %%off %%off+4 %else - mov [%2+%%off], vald + movd [%2+%%off], m0 %assign %%off %%off+4 %endif %endif ; %1-%%off >= 4 -%if %1-%%off >= 2 -%if %1 >= 8 - movd [%2+%1-4], m0 -%else +%else ; %1 < 8 + +%rep %1/4 + mov [%2+%%off], vald +%assign %%off %%off+4 +%endrep ; %1/4 + +%endif ; %1 >=/< 8 + +%if %1-%%off == 2 mov [%2+%%off], valw -%endif %endif ; (%1-%%off)/2 %endmacro ; WRITE_V_PIXEL |