|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
%include "libavutil/x86/x86util.asm" |
|
|
|
cextern pw_9 |
|
cextern pw_128 |
|
|
|
SECTION .text |
|
|
|
%if HAVE_MMX_INLINE |
|
|
|
|
|
|
|
|
|
%macro OP_PUT 2 |
|
%endmacro |
|
|
|
%macro OP_AVG 2 |
|
pavgb %1, %2 |
|
%endmacro |
|
|
|
%macro NORMALIZE_MMX 1 |
|
paddw m3, m7 |
|
paddw m4, m7 |
|
psraw m3, %1 |
|
psraw m4, %1 |
|
%endmacro |
|
|
|
%macro TRANSFER_DO_PACK 2 |
|
packuswb m3, m4 |
|
%1 m3, [%2] |
|
mova [%2], m3 |
|
%endmacro |
|
|
|
%macro TRANSFER_DONT_PACK 2 |
|
%1 m3, [%2] |
|
%1 m3, [%2 + mmsize] |
|
mova [%2], m3 |
|
mova [mmsize + %2], m4 |
|
%endmacro |
|
|
|
|
|
%macro DO_UNPACK 1 |
|
punpcklbw %1, m0 |
|
%endmacro |
|
%macro DONT_UNPACK 1 |
|
%endmacro |
|
|
|
|
|
%macro LOAD_ROUNDER_MMX 1 |
|
movd m7, %1 |
|
punpcklwd m7, m7 |
|
punpckldq m7, m7 |
|
%endmacro |
|
|
|
%macro SHIFT2_LINE 5 |
|
paddw m%3, m%4 |
|
movh m%2, [srcq + stride_neg2] |
|
pmullw m%3, m6 |
|
punpcklbw m%2, m0 |
|
movh m%5, [srcq + strideq] |
|
psubw m%3, m%2 |
|
punpcklbw m%5, m0 |
|
paddw m%3, m7 |
|
psubw m%3, m%5 |
|
psraw m%3, shift |
|
movu [dstq + %1], m%3 |
|
add srcq, strideq |
|
%endmacro |
|
|
|
INIT_MMX mmx |
|
|
|
|
|
|
|
%if ARCH_X86_32 |
|
cglobal vc1_put_ver_16b_shift2, 3,6,0, dst, src, stride |
|
DECLARE_REG_TMP 3, 4, 5 |
|
%define rnd r3mp |
|
%define shift qword r4m |
|
%else |
|
cglobal vc1_put_ver_16b_shift2, 4,7,0, dst, src, stride |
|
DECLARE_REG_TMP 4, 5, 6 |
|
%define rnd r3d |
|
|
|
|
|
|
|
%if WIN64 |
|
%define shift r4mp |
|
%else |
|
%define shift m5 |
|
mova shift, r4q |
|
%endif |
|
%endif |
|
%define stride_neg2 t0q |
|
%define stride_9minus4 t1q |
|
%define i t2q |
|
mov stride_neg2, strideq |
|
neg stride_neg2 |
|
add stride_neg2, stride_neg2 |
|
lea stride_9minus4, [strideq * 9 - 4] |
|
mov i, 3 |
|
LOAD_ROUNDER_MMX rnd |
|
mova m6, [pw_9] |
|
pxor m0, m0 |
|
.loop: |
|
movh m2, [srcq] |
|
add srcq, strideq |
|
movh m3, [srcq] |
|
punpcklbw m2, m0 |
|
punpcklbw m3, m0 |
|
SHIFT2_LINE 0, 1, 2, 3, 4 |
|
SHIFT2_LINE 24, 2, 3, 4, 1 |
|
SHIFT2_LINE 48, 3, 4, 1, 2 |
|
SHIFT2_LINE 72, 4, 1, 2, 3 |
|
SHIFT2_LINE 96, 1, 2, 3, 4 |
|
SHIFT2_LINE 120, 2, 3, 4, 1 |
|
SHIFT2_LINE 144, 3, 4, 1, 2 |
|
SHIFT2_LINE 168, 4, 1, 2, 3 |
|
sub srcq, stride_9minus4 |
|
add dstq, 8 |
|
dec i |
|
jnz .loop |
|
RET |
|
%undef rnd |
|
%undef shift |
|
%undef stride_neg2 |
|
%undef stride_9minus4 |
|
%undef i |
|
|
|
|
|
|
|
|
|
|
|
%macro HOR_16B_SHIFT2 2 |
|
cglobal vc1_%2_hor_16b_shift2, 4, 5, 0, dst, stride, src, rnd, h |
|
mov hq, 8 |
|
sub srcq, 2 |
|
sub rndd, (-1+9+9-1) * 1024 |
|
LOAD_ROUNDER_MMX rndd |
|
mova m5, [pw_9] |
|
mova m6, [pw_128] |
|
pxor m0, m0 |
|
|
|
.loop: |
|
mova m1, [srcq + 2 * 0] |
|
mova m2, [srcq + 2 * 0 + mmsize] |
|
mova m3, [srcq + 2 * 1] |
|
mova m4, [srcq + 2 * 1 + mmsize] |
|
paddw m3, [srcq + 2 * 2] |
|
paddw m4, [srcq + 2 * 2 + mmsize] |
|
paddw m1, [srcq + 2 * 3] |
|
paddw m2, [srcq + 2 * 3 + mmsize] |
|
pmullw m3, m5 |
|
pmullw m4, m5 |
|
psubw m3, m1 |
|
psubw m4, m2 |
|
NORMALIZE_MMX 7 |
|
|
|
paddw m3, m6 |
|
paddw m4, m6 |
|
TRANSFER_DO_PACK %1, dstq |
|
add srcq, 24 |
|
add dstq, strideq |
|
dec hq |
|
jnz .loop |
|
|
|
RET |
|
%endmacro |
|
|
|
INIT_MMX mmx |
|
HOR_16B_SHIFT2 OP_PUT, put |
|
|
|
INIT_MMX mmxext |
|
HOR_16B_SHIFT2 OP_AVG, avg |
|
%endif |
|
|
|
%macro INV_TRANS_INIT 0 |
|
movsxdifnidn linesizeq, linesized |
|
movd m0, blockd |
|
SPLATW m0, m0 |
|
pxor m1, m1 |
|
psubw m1, m0 |
|
packuswb m0, m0 |
|
packuswb m1, m1 |
|
|
|
DEFINE_ARGS dest, linesize, linesize3 |
|
lea linesize3q, [linesizeq*3] |
|
%endmacro |
|
|
|
%macro INV_TRANS_PROCESS 1 |
|
mov%1 m2, [destq+linesizeq*0] |
|
mov%1 m3, [destq+linesizeq*1] |
|
mov%1 m4, [destq+linesizeq*2] |
|
mov%1 m5, [destq+linesize3q] |
|
paddusb m2, m0 |
|
paddusb m3, m0 |
|
paddusb m4, m0 |
|
paddusb m5, m0 |
|
psubusb m2, m1 |
|
psubusb m3, m1 |
|
psubusb m4, m1 |
|
psubusb m5, m1 |
|
mov%1 [linesizeq*0+destq], m2 |
|
mov%1 [linesizeq*1+destq], m3 |
|
mov%1 [linesizeq*2+destq], m4 |
|
mov%1 [linesize3q +destq], m5 |
|
%endmacro |
|
|
|
|
|
INIT_MMX mmxext |
|
cglobal vc1_inv_trans_4x4_dc, 3,4,0, dest, linesize, block |
|
movsx r3d, WORD [blockq] |
|
mov blockd, r3d |
|
shl blockd, 4 |
|
lea blockd, [blockq+r3+4] |
|
sar blockd, 3 |
|
mov r3d, blockd |
|
shl blockd, 4 |
|
lea blockd, [blockq+r3+64] |
|
sar blockd, 7 |
|
|
|
INV_TRANS_INIT |
|
|
|
INV_TRANS_PROCESS h |
|
RET |
|
|
|
INIT_MMX mmxext |
|
cglobal vc1_inv_trans_4x8_dc, 3,4,0, dest, linesize, block |
|
movsx r3d, WORD [blockq] |
|
mov blockd, r3d |
|
shl blockd, 4 |
|
lea blockd, [blockq+r3+4] |
|
sar blockd, 3 |
|
shl blockd, 2 |
|
lea blockd, [blockq*3+64] |
|
sar blockd, 7 |
|
|
|
INV_TRANS_INIT |
|
|
|
INV_TRANS_PROCESS h |
|
lea destq, [destq+linesizeq*4] |
|
INV_TRANS_PROCESS h |
|
RET |
|
|
|
INIT_MMX mmxext |
|
cglobal vc1_inv_trans_8x4_dc, 3,4,0, dest, linesize, block |
|
movsx blockd, WORD [blockq] |
|
lea blockd, [blockq*3+1] |
|
sar blockd, 1 |
|
mov r3d, blockd |
|
shl blockd, 4 |
|
lea blockd, [blockq+r3+64] |
|
sar blockd, 7 |
|
|
|
INV_TRANS_INIT |
|
|
|
INV_TRANS_PROCESS a |
|
RET |
|
|
|
INIT_MMX mmxext |
|
cglobal vc1_inv_trans_8x8_dc, 3,3,0, dest, linesize, block |
|
movsx blockd, WORD [blockq] |
|
lea blockd, [blockq*3+1] |
|
sar blockd, 1 |
|
lea blockd, [blockq*3+16] |
|
sar blockd, 5 |
|
|
|
INV_TRANS_INIT |
|
|
|
INV_TRANS_PROCESS a |
|
lea destq, [destq+linesizeq*4] |
|
INV_TRANS_PROCESS a |
|
RET |
|
|