;****************************************************************************** ;* ;* Copyright (c) 2000-2001 Fabrice Bellard ;* Copyright (c) Nick Kurshev ;* Copyright (c) 2002 Michael Niedermayer ;* Copyright (c) 2002 Zdenek Kabelac ;* Copyright (c) 2013 Daniel Kang ;* ;* SIMD-optimized halfpel functions ;* ;* This file is part of FFmpeg. ;* ;* FFmpeg is free software; you can redistribute it and/or ;* modify it under the terms of the GNU Lesser General Public ;* License as published by the Free Software Foundation; either ;* version 2.1 of the License, or (at your option) any later version. ;* ;* FFmpeg is distributed in the hope that it will be useful, ;* but WITHOUT ANY WARRANTY; without even the implied warranty of ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;* Lesser General Public License for more details. ;* ;* You should have received a copy of the GNU Lesser General Public ;* License along with FFmpeg; if not, write to the Free Software ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ;****************************************************************************** %include "libavutil/x86/x86util.asm" SECTION_RODATA cextern pb_1 cextern pw_1 cextern pw_2 pb_interleave16: db 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15 cextern pw_8192 SECTION .text ; void ff_put_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) %macro PUT_PIXELS8_X2 0 %if cpuflag(sse2) cglobal put_pixels16_x2, 4,5,4 %else cglobal put_pixels8_x2, 4,5 %endif lea r4, [r2*2] .loop: movu m0, [r1+1] movu m1, [r1+r2+1] %if cpuflag(sse2) movu m2, [r1] movu m3, [r1+r2] pavgb m0, m2 pavgb m1, m3 %else pavgb m0, [r1] pavgb m1, [r1+r2] %endif mova [r0], m0 mova [r0+r2], m1 add r1, r4 add r0, r4 movu m0, [r1+1] movu m1, [r1+r2+1] %if cpuflag(sse2) movu m2, [r1] movu m3, [r1+r2] pavgb m0, m2 pavgb m1, m3 %else pavgb m0, [r1] pavgb m1, [r1+r2] %endif add r1, r4 mova [r0], m0 mova [r0+r2], m1 add r0, r4 sub r3d, 4 jne .loop RET %endmacro INIT_MMX mmxext PUT_PIXELS8_X2 ; void ff_put_pixels16_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) ; The 8_X2 macro can easily be used here INIT_XMM sse2 PUT_PIXELS8_X2 ; void ff_put_no_rnd_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) INIT_MMX mmxext cglobal put_no_rnd_pixels8_x2, 4,5 mova m6, [pb_1] lea r4, [r2*2] .loop: mova m0, [r1] mova m2, [r1+r2] mova m1, [r1+1] mova m3, [r1+r2+1] add r1, r4 psubusb m0, m6 psubusb m2, m6 pavgb m0, m1 pavgb m2, m3 mova [r0], m0 mova [r0+r2], m2 mova m0, [r1] mova m1, [r1+1] mova m2, [r1+r2] mova m3, [r1+r2+1] add r0, r4 add r1, r4 psubusb m0, m6 psubusb m2, m6 pavgb m0, m1 pavgb m2, m3 mova [r0], m0 mova [r0+r2], m2 add r0, r4 sub r3d, 4 jne .loop RET %macro NO_RND_PIXELS_X2 1 %if cpuflag(sse2) cglobal %1_no_rnd_pixels16_x2, 4,5,5 %else ; void ff_put_no_rnd_pixels8_x2_exact(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) cglobal %1_no_rnd_pixels8_x2_exact, 4,5 %endif lea r4, [r2*3] pcmpeqb m4, m4 .loop: movu m0, [r1] movu m2, [r1+r2] movu m1, [r1+1] movu m3, [r1+r2+1] pxor m0, m4 pxor m2, m4 pxor m1, m4 pxor m3, m4 pavgb m0, m1 pavgb m2, m3 pxor m0, m4 pxor m2, m4 %ifidn %1, avg pavgb m0, [r0] pavgb m2, [r0+r2] %endif mova [r0], m0 mova [r0+r2], m2 movu m0, [r1+r2*2] movu m1, [r1+r2*2+1] movu m2, [r1+r4] movu m3, [r1+r4+1] pxor m0, m4 pxor m1, m4 pxor m2, m4 pxor m3, m4 pavgb m0, m1 pavgb m2, m3 pxor m0, m4 pxor m2, m4 %ifidn %1, avg pavgb m0, [r0+r2*2] pavgb m2, [r0+r4] %endif mova [r0+r2*2], m0 mova [r0+r4], m2 lea r1, [r1+r2*4] lea r0, [r0+r2*4] sub r3d, 4 jg .loop RET %endmacro INIT_MMX mmxext NO_RND_PIXELS_X2 put INIT_XMM sse2 NO_RND_PIXELS_X2 avg NO_RND_PIXELS_X2 put ; void ff_put_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) %macro PUT_PIXELS8_Y2 0 %if cpuflag(sse2) cglobal put_pixels16_y2, 4,5,3 %else cglobal put_pixels8_y2, 4,5 %endif lea r4, [r2*2] movu m0, [r1] sub r0, r2 .loop: movu m1, [r1+r2] movu m2, [r1+r4] add r1, r4 pavgb m0, m1 pavgb m1, m2 mova [r0+r2], m0 mova [r0+r4], m1 movu m1, [r1+r2] movu m0, [r1+r4] add r0, r4 add r1, r4 pavgb m2, m1 pavgb m1, m0 mova [r0+r2], m2 mova [r0+r4], m1 add r0, r4 sub r3d, 4 jne .loop RET %endmacro INIT_MMX mmxext PUT_PIXELS8_Y2 ; actually, put_pixels16_y2_sse2 INIT_XMM sse2 PUT_PIXELS8_Y2 ; void ff_put_no_rnd_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) INIT_MMX mmxext cglobal put_no_rnd_pixels8_y2, 4,5 mova m6, [pb_1] lea r4, [r2+r2] mova m0, [r1] sub r0, r2 .loop: mova m1, [r1+r2] mova m2, [r1+r4] add r1, r4 psubusb m1, m6 pavgb m0, m1 pavgb m1, m2 mova [r0+r2], m0 mova [r0+r4], m1 mova m1, [r1+r2] mova m0, [r1+r4] add r0, r4 add r1, r4 psubusb m1, m6 pavgb m2, m1 pavgb m1, m0 mova [r0+r2], m2 mova [r0+r4], m1 add r0, r4 sub r3d, 4 jne .loop RET %macro NO_RND_PIXELS_Y2 1 %if cpuflag(sse2) cglobal %1_no_rnd_pixels16_y2, 4,5,4 %else ; void ff_put_no_rnd_pixels8_y2_exact(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) cglobal %1_no_rnd_pixels8_y2_exact, 4,5 %endif lea r4, [r2*3] movu m0, [r1] pcmpeqb m3, m3 add r1, r2 pxor m0, m3 .loop: movu m1, [r1] movu m2, [r1+r2] pxor m1, m3 pxor m2, m3 pavgb m0, m1 pavgb m1, m2 pxor m0, m3 pxor m1, m3 %ifidn %1, avg pavgb m0, [r0] pavgb m1, [r0+r2] %endif mova [r0], m0 mova [r0+r2], m1 movu m1, [r1+r2*2] movu m0, [r1+r4] pxor m1, m3 pxor m0, m3 pavgb m2, m1 pavgb m1, m0 pxor m2, m3 pxor m1, m3 %ifidn %1, avg pavgb m2,[r0+r2*2] pavgb m1,[r0+r4] %endif mova [r0+r2*2], m2 mova [r0+r4], m1 lea r1, [r1+r2*4] lea r0, [r0+r2*4] sub r3d, 4 jg .loop RET %endmacro INIT_MMX mmxext NO_RND_PIXELS_Y2 put INIT_XMM sse2 NO_RND_PIXELS_Y2 avg NO_RND_PIXELS_Y2 put ; void ff_avg_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) %macro AVG_PIXELS8_X2 0 %if cpuflag(sse2) cglobal avg_pixels16_x2, 4,5,4 %else cglobal avg_pixels8_x2, 4,5 %endif lea r4, [r2*2] .loop: movu m0, [r1] movu m2, [r1+r2] %if cpuflag(sse2) movu m1, [r1+1] movu m3, [r1+r2+1] pavgb m0, m1 pavgb m2, m3 %else pavgb m0, [r1+1] pavgb m2, [r1+r2+1] %endif pavgb m0, [r0] pavgb m2, [r0+r2] add r1, r4 mova [r0], m0 mova [r0+r2], m2 movu m0, [r1] movu m2, [r1+r2] %if cpuflag(sse2) movu m1, [r1+1] movu m3, [r1+r2+1] pavgb m0, m1 pavgb m2, m3 %else pavgb m0, [r1+1] pavgb m2, [r1+r2+1] %endif add r0, r4 add r1, r4 pavgb m0, [r0] pavgb m2, [r0+r2] mova [r0], m0 mova [r0+r2], m2 add r0, r4 sub r3d, 4 jne .loop RET %endmacro INIT_MMX mmxext AVG_PIXELS8_X2 ; actually avg_pixels16_x2 INIT_XMM sse2 AVG_PIXELS8_X2 ; void ff_avg_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) %macro AVG_PIXELS8_Y2 0 %if cpuflag(sse2) cglobal avg_pixels16_y2, 4,5,3 %else cglobal avg_pixels8_y2, 4,5 %endif lea r4, [r2*2] movu m0, [r1] sub r0, r2 .loop: movu m1, [r1+r2] movu m2, [r1+r4] add r1, r4 pavgb m0, m1 pavgb m1, m2 pavgb m0, [r0+r2] pavgb m1, [r0+r4] mova [r0+r2], m0 mova [r0+r4], m1 movu m1, [r1+r2] movu m0, [r1+r4] pavgb m2, m1 pavgb m1, m0 add r0, r4 add r1, r4 pavgb m2, [r0+r2] pavgb m1, [r0+r4] mova [r0+r2], m2 mova [r0+r4], m1 add r0, r4 sub r3d, 4 jne .loop RET %endmacro INIT_MMX mmxext AVG_PIXELS8_Y2 ; actually avg_pixels16_y2 INIT_XMM sse2 AVG_PIXELS8_Y2 ; void ff_put_no_rnd_pixels8_xy2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) %macro SET_PIXELS8_XY2 2-3 cglobal %1%3_pixels8_xy2, 4,5,5 mova m4, [pb_1] mova m3, [%2] movh m0, [r1] movh m2, [r1+1] punpcklbw m2, m0 pmaddubsw m2, m4 xor r4, r4 add r1, r2 .loop: movh m0, [r1+r4] movh m1, [r1+r4+1] punpcklbw m0, m1 pmaddubsw m0, m4 %ifidn %3, _no_rnd paddw m2, m3 paddw m2, m0 psrlw m2, 2 %else paddw m2, m0 pmulhrsw m2, m3 %endif %ifidn %1, avg movh m1, [r0+r4] packuswb m2, m2 pavgb m2, m1 %else packuswb m2, m2 %endif movh [r0+r4], m2 add r4, r2 movh m1, [r1+r4] movh m2, [r1+r4+1] punpcklbw m2, m1 pmaddubsw m2, m4 %ifidn %3, _no_rnd paddw m0, m3 paddw m0, m2 psrlw m0, 2 %else paddw m0, m2 pmulhrsw m0, m3 %endif %ifidn %1, avg movh m1, [r0+r4] packuswb m0, m0 pavgb m0, m1 %else packuswb m0, m0 %endif movh [r0+r4], m0 add r4, r2 sub r3d, 2 jnz .loop RET %endmacro INIT_XMM ssse3 SET_PIXELS8_XY2 put, pw_1, _no_rnd SET_PIXELS8_XY2 avg, pw_8192 SET_PIXELS8_XY2 put, pw_8192 ; void ff_avg_pixels16_xy2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) %macro SET_PIXELS_XY2 2-3 cglobal %1%3_pixels16_xy2, 4,5,8 pxor m7, m7 mova m6, [%2] movu m0, [r1] movu m4, [r1+1] mova m1, m0 mova m5, m4 punpcklbw m0, m7 punpcklbw m4, m7 punpckhbw m1, m7 punpckhbw m5, m7 paddw m4, m0 paddw m5, m1 xor r4, r4 add r1, r2 .loop: movu m0, [r1+r4] movu m2, [r1+r4+1] mova m1, m0 mova m3, m2 punpcklbw m0, m7 punpcklbw m2, m7 punpckhbw m1, m7 punpckhbw m3, m7 paddw m0, m2 paddw m1, m3 paddw m4, m6 paddw m5, m6 paddw m4, m0 paddw m5, m1 psrlw m4, 2 psrlw m5, 2 %ifidn %1, avg mova m3, [r0+r4] packuswb m4, m5 pavgb m4, m3 %else packuswb m4, m5 %endif mova [r0+r4], m4 add r4, r2 movu m2, [r1+r4] movu m4, [r1+r4+1] mova m3, m2 mova m5, m4 punpcklbw m2, m7 punpcklbw m4, m7 punpckhbw m3, m7 punpckhbw m5, m7 paddw m4, m2 paddw m5, m3 paddw m0, m6 paddw m1, m6 paddw m0, m4 paddw m1, m5 psrlw m0, 2 psrlw m1, 2 %ifidn %1, avg mova m3, [r0+r4] packuswb m0, m1 pavgb m0, m3 %else packuswb m0, m1 %endif mova [r0+r4], m0 add r4, r2 sub r3d, 2 jnz .loop RET %endmacro INIT_XMM sse2 SET_PIXELS_XY2 put, pw_2 SET_PIXELS_XY2 avg, pw_2 SET_PIXELS_XY2 put, pw_1, _no_rnd SET_PIXELS_XY2 avg, pw_1, _no_rnd %macro SSSE3_PIXELS_XY2 1-2 cglobal %1_pixels16_xy2, 4,5,%2 mova m4, [pb_interleave16] mova m5, [pb_1] movu m0, [r1] movu m1, [r1+1] pmaddubsw m0, m5 pmaddubsw m1, m5 xor r4, r4 add r1, r2 .loop: movu m2, [r1+r4] movu m3, [r1+r4+1] pmaddubsw m2, m5 pmaddubsw m3, m5 paddw m0, m2 paddw m1, m3 pmulhrsw m0, [pw_8192] pmulhrsw m1, [pw_8192] %ifidn %1, avg mova m6, [r0+r4] packuswb m0, m1 pshufb m0, m4 pavgb m0, m6 %else packuswb m0, m1 pshufb m0, m4 %endif mova [r0+r4], m0 add r4, r2 movu m0, [r1+r4] movu m1, [r1+r4+1] pmaddubsw m0, m5 pmaddubsw m1, m5 paddw m2, m0 paddw m3, m1 pmulhrsw m2, [pw_8192] pmulhrsw m3, [pw_8192] %ifidn %1, avg mova m6, [r0+r4] packuswb m2, m3 pshufb m2, m4 pavgb m2, m6 %else packuswb m2, m3 pshufb m2, m4 %endif mova [r0+r4], m2 add r4, r2 sub r3d, 2 jnz .loop RET %endmacro INIT_XMM ssse3 SSSE3_PIXELS_XY2 put, 6 SSSE3_PIXELS_XY2 avg, 7