name
stringlengths 1
473k
| code
stringlengths 7
647k
| asm
stringlengths 4
3.39M
| file
stringlengths 8
196
|
---|---|---|---|
final_filter_fast | static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A,
const int32_t *B, int buf_stride,
const void *dgd8, int dgd_stride, int width,
int height, int highbd) {
const int nb0 = 5;
const int nb1 = 4;
const __m128i rounding0 =
round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
const __m128i rounding1 =
round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
const uint8_t *dgd_real =
highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
for (int i = 0; i < height; ++i) {
if (!(i & 1)) { // even row
for (int j = 0; j < width; j += 4) {
const __m128i a =
cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride);
const __m128i b =
cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride);
const __m128i raw =
xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
const __m128i src =
highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
__m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
__m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding0),
SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
xx_storeu_128(dst + i * dst_stride + j, w);
}
} else { // odd row
for (int j = 0; j < width; j += 4) {
const __m128i a = cross_sum_fast_odd_row(A + i * buf_stride + j);
const __m128i b = cross_sum_fast_odd_row(B + i * buf_stride + j);
const __m128i raw =
xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
const __m128i src =
highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
__m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
__m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding1),
SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
xx_storeu_128(dst + i * dst_stride + j, w);
}
}
}
} | subq $0x2b8, %rsp # imm = 0x2B8
movl 0x2d8(%rsp), %eax
movl 0x2d0(%rsp), %eax
movl 0x2c8(%rsp), %eax
movl 0x2c0(%rsp), %eax
movq %rdi, 0x168(%rsp)
movl %esi, 0x164(%rsp)
movq %rdx, 0x158(%rsp)
movq %rcx, 0x150(%rsp)
movl %r8d, 0x14c(%rsp)
movq %r9, 0x140(%rsp)
movl $0x5, 0x13c(%rsp)
movl $0x4, 0x138(%rsp)
movl $0x9, %edi
callq 0x7db840
movdqa %xmm0, 0x120(%rsp)
movl $0x8, %edi
callq 0x7db840
movdqa %xmm0, 0x110(%rsp)
cmpl $0x0, 0x2d8(%rsp)
je 0x7d9b9a
movq 0x140(%rsp), %rax
shlq %rax
movq %rax, 0x28(%rsp)
jmp 0x7d9ba7
movq 0x140(%rsp), %rax
movq %rax, 0x28(%rsp)
movq 0x28(%rsp), %rax
movq %rax, 0x108(%rsp)
movl $0x0, 0x104(%rsp)
movl 0x104(%rsp), %eax
cmpl 0x2d0(%rsp), %eax
jge 0x7da0b1
movl 0x104(%rsp), %eax
andl $0x1, %eax
cmpl $0x0, %eax
jne 0x7d9e5a
movl $0x0, 0x100(%rsp)
movl 0x100(%rsp), %eax
cmpl 0x2c8(%rsp), %eax
jge 0x7d9e55
movq 0x158(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x14c(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x100(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movl 0x14c(%rsp), %esi
callq 0x7dbfb0
movdqa %xmm0, 0xf0(%rsp)
movq 0x150(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x14c(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x100(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movl 0x14c(%rsp), %esi
callq 0x7dbfb0
movdqa %xmm0, 0xe0(%rsp)
movq 0x108(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x2c0(%rsp), %eax
addl 0x100(%rsp), %eax
movl 0x2d8(%rsp), %ecx
shll %cl, %eax
cltq
addq %rax, %rdi
callq 0x7db810
movdqa %xmm0, 0xd0(%rsp)
cmpl $0x0, 0x2d8(%rsp)
je 0x7d9cf2
movaps 0xd0(%rsp), %xmm0
movaps %xmm0, 0x180(%rsp)
pmovzxwd 0x180(%rsp), %xmm0
movaps %xmm0, 0x10(%rsp)
jmp 0x7d9d11
movaps 0xd0(%rsp), %xmm0
movaps %xmm0, 0x2a0(%rsp)
pmovzxbd 0x2a0(%rsp), %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0x10(%rsp), %xmm0
movdqa %xmm0, 0xc0(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm1, 0x280(%rsp)
movdqa %xmm0, 0x270(%rsp)
movdqa 0x280(%rsp), %xmm1
movdqa 0x270(%rsp), %xmm0
pmaddwd %xmm0, %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0x200(%rsp)
movdqa %xmm0, 0x1f0(%rsp)
movdqa 0x200(%rsp), %xmm0
movdqa 0x1f0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xb0(%rsp)
movdqa 0xb0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x1e0(%rsp)
movdqa %xmm0, 0x1d0(%rsp)
movdqa 0x1e0(%rsp), %xmm0
movdqa 0x1d0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x240(%rsp)
movl $0x9, 0x23c(%rsp)
movdqa 0x240(%rsp), %xmm0
movl 0x23c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movq 0x168(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x164(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x100(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movdqa 0xa0(%rsp), %xmm0
callq 0x7db8a0
movl 0x100(%rsp), %eax
addl $0x4, %eax
movl %eax, 0x100(%rsp)
jmp 0x7d9bf1
jmp 0x7da099
movl $0x0, 0x9c(%rsp)
movl 0x9c(%rsp), %eax
cmpl 0x2c8(%rsp), %eax
jge 0x7da097
movq 0x158(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x14c(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x9c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x7dc260
movdqa %xmm0, 0x80(%rsp)
movq 0x150(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x14c(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x9c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x7dc260
movdqa %xmm0, 0x70(%rsp)
movq 0x108(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x2c0(%rsp), %eax
addl 0x9c(%rsp), %eax
movl 0x2d8(%rsp), %ecx
shll %cl, %eax
cltq
addq %rax, %rdi
callq 0x7db810
movdqa %xmm0, 0x60(%rsp)
cmpl $0x0, 0x2d8(%rsp)
je 0x7d9f4e
movaps 0x60(%rsp), %xmm0
movaps %xmm0, 0x170(%rsp)
pmovzxwd 0x170(%rsp), %xmm0
movaps %xmm0, (%rsp)
jmp 0x7d9f69
movaps 0x60(%rsp), %xmm0
movaps %xmm0, 0x290(%rsp)
pmovzxbd 0x290(%rsp), %xmm0
movaps %xmm0, (%rsp)
movaps (%rsp), %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x50(%rsp), %xmm0
movdqa %xmm1, 0x260(%rsp)
movdqa %xmm0, 0x250(%rsp)
movdqa 0x260(%rsp), %xmm1
movdqa 0x250(%rsp), %xmm0
pmaddwd %xmm0, %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0x1c0(%rsp)
movdqa %xmm0, 0x1b0(%rsp)
movdqa 0x1c0(%rsp), %xmm0
movdqa 0x1b0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x1a0(%rsp)
movdqa %xmm0, 0x190(%rsp)
movdqa 0x1a0(%rsp), %xmm0
movdqa 0x190(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x220(%rsp)
movl $0x8, 0x21c(%rsp)
movdqa 0x220(%rsp), %xmm0
movl 0x21c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x30(%rsp)
movq 0x168(%rsp), %rdi
movl 0x104(%rsp), %eax
imull 0x164(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x9c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movdqa 0x30(%rsp), %xmm0
callq 0x7db8a0
movl 0x9c(%rsp), %eax
addl $0x4, %eax
movl %eax, 0x9c(%rsp)
jmp 0x7d9e65
jmp 0x7da099
jmp 0x7da09b
movl 0x104(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x104(%rsp)
jmp 0x7d9bbf
addq $0x2b8, %rsp # imm = 0x2B8
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/common/x86/selfguided_sse4.c |
final_filter | static void final_filter(int32_t *dst, int dst_stride, const int32_t *A,
const int32_t *B, int buf_stride, const void *dgd8,
int dgd_stride, int width, int height, int highbd) {
const int nb = 5;
const __m128i rounding =
round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
const uint8_t *dgd_real =
highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 4) {
const __m128i a = cross_sum(A + i * buf_stride + j, buf_stride);
const __m128i b = cross_sum(B + i * buf_stride + j, buf_stride);
const __m128i raw =
xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
const __m128i src =
highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
__m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
__m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding),
SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
xx_storeu_128(dst + i * dst_stride + j, w);
}
}
} | subq $0x188, %rsp # imm = 0x188
movl 0x1a8(%rsp), %eax
movl 0x1a0(%rsp), %eax
movl 0x198(%rsp), %eax
movl 0x190(%rsp), %eax
movq %rdi, 0xd8(%rsp)
movl %esi, 0xd4(%rsp)
movq %rdx, 0xc8(%rsp)
movq %rcx, 0xc0(%rsp)
movl %r8d, 0xbc(%rsp)
movq %r9, 0xb0(%rsp)
movl $0x5, 0xac(%rsp)
movl $0x9, %edi
callq 0x7db840
movdqa %xmm0, 0x90(%rsp)
cmpl $0x0, 0x1a8(%rsp)
je 0x7daa5c
movq 0xb0(%rsp), %rax
shlq %rax
movq %rax, 0x18(%rsp)
jmp 0x7daa69
movq 0xb0(%rsp), %rax
movq %rax, 0x18(%rsp)
movq 0x18(%rsp), %rax
movq %rax, 0x88(%rsp)
movl $0x0, 0x84(%rsp)
movl 0x84(%rsp), %eax
cmpl 0x1a0(%rsp), %eax
jge 0x7dacf2
movl $0x0, 0x80(%rsp)
movl 0x80(%rsp), %eax
cmpl 0x198(%rsp), %eax
jge 0x7dacda
movq 0xc8(%rsp), %rdi
movl 0x84(%rsp), %eax
imull 0xbc(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x80(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movl 0xbc(%rsp), %esi
callq 0x7dc3b0
movdqa %xmm0, 0x70(%rsp)
movq 0xc0(%rsp), %rdi
movl 0x84(%rsp), %eax
imull 0xbc(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x80(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movl 0xbc(%rsp), %esi
callq 0x7dc3b0
movdqa %xmm0, 0x60(%rsp)
movq 0x88(%rsp), %rdi
movl 0x84(%rsp), %eax
imull 0x190(%rsp), %eax
addl 0x80(%rsp), %eax
movl 0x1a8(%rsp), %ecx
shll %cl, %eax
cltq
addq %rax, %rdi
callq 0x7db810
movdqa %xmm0, 0x50(%rsp)
cmpl $0x0, 0x1a8(%rsp)
je 0x7dab94
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0xe0(%rsp)
pmovzxwd 0xe0(%rsp), %xmm0
movaps %xmm0, (%rsp)
jmp 0x7dabaf
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0x170(%rsp)
pmovzxbd 0x170(%rsp), %xmm0
movaps %xmm0, (%rsp)
movaps (%rsp), %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0x70(%rsp), %xmm1
movdqa 0x40(%rsp), %xmm0
movdqa %xmm1, 0x160(%rsp)
movdqa %xmm0, 0x150(%rsp)
movdqa 0x160(%rsp), %xmm1
movdqa 0x150(%rsp), %xmm0
pmaddwd %xmm0, %xmm1
movdqa 0x60(%rsp), %xmm0
movdqa %xmm1, 0x120(%rsp)
movdqa %xmm0, 0x110(%rsp)
movdqa 0x120(%rsp), %xmm0
movdqa 0x110(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x90(%rsp), %xmm0
movdqa %xmm1, 0x100(%rsp)
movdqa %xmm0, 0xf0(%rsp)
movdqa 0x100(%rsp), %xmm0
movdqa 0xf0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x140(%rsp)
movl $0x9, 0x13c(%rsp)
movdqa 0x140(%rsp), %xmm0
movl 0x13c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movq 0xd8(%rsp), %rdi
movl 0x84(%rsp), %eax
imull 0xd4(%rsp), %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
movslq 0x80(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
movdqa 0x20(%rsp), %xmm0
callq 0x7db8a0
movl 0x80(%rsp), %eax
addl $0x4, %eax
movl %eax, 0x80(%rsp)
jmp 0x7daaa0
jmp 0x7dacdc
movl 0x84(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x84(%rsp)
jmp 0x7daa81
addq $0x188, %rsp # imm = 0x188
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/selfguided_sse4.c |
fwd_txfm2d_sse4_1 | static inline void fwd_txfm2d_sse4_1(const int16_t *input, int32_t *output,
const int stride,
const TXFM_2D_FLIP_CFG *cfg,
int32_t *txfm_buf) {
// TODO(sarahparker) This does not currently support rectangular transforms
// and will break without splitting txfm_size out into row and col size.
// Rectangular transforms use c code only, so it should be ok for now.
// It will be corrected when there are sse implementations for rectangular
// transforms.
assert(cfg->tx_size < TX_SIZES);
const int txfm_size = tx_size_wide[cfg->tx_size];
const int8_t *shift = cfg->shift;
const int8_t *stage_range_col = cfg->stage_range_col;
const int8_t *stage_range_row = cfg->stage_range_row;
const int8_t cos_bit_col = cfg->cos_bit_col;
const int8_t cos_bit_row = cfg->cos_bit_row;
const TxfmFuncSSE2 txfm_func_col = fwd_txfm_type_to_func(cfg->txfm_type_col);
const TxfmFuncSSE2 txfm_func_row = fwd_txfm_type_to_func(cfg->txfm_type_row);
__m128i *buf_128 = (__m128i *)txfm_buf;
__m128i *out_128 = (__m128i *)output;
int num_per_128 = 4;
int txfm2d_size_128 = txfm_size * txfm_size / num_per_128;
int16_array_with_stride_to_int32_array_without_stride(input, stride, txfm_buf,
txfm_size);
av1_round_shift_array_32_sse4_1(buf_128, out_128, txfm2d_size_128, -shift[0]);
txfm_func_col(out_128, buf_128, cos_bit_col, stage_range_col);
av1_round_shift_array_32_sse4_1(buf_128, out_128, txfm2d_size_128, -shift[1]);
transpose_32(txfm_size, out_128, buf_128);
txfm_func_row(buf_128, out_128, cos_bit_row, stage_range_row);
av1_round_shift_array_32_sse4_1(out_128, out_128, txfm2d_size_128, -shift[2]);
} | subq $0x78, %rsp
movq %rdi, 0x70(%rsp)
movq %rsi, 0x68(%rsp)
movl %edx, 0x64(%rsp)
movq %rcx, 0x58(%rsp)
movq %r8, 0x50(%rsp)
movq 0x58(%rsp), %rax
movzbl (%rax), %eax
movl %eax, %ecx
leaq 0x32ebe3(%rip), %rax # 0xb19fd0
movl (%rax,%rcx,4), %eax
movl %eax, 0x4c(%rsp)
movq 0x58(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, 0x40(%rsp)
movq 0x58(%rsp), %rax
addq $0x1a, %rax
movq %rax, 0x38(%rsp)
movq 0x58(%rsp), %rax
addq $0x26, %rax
movq %rax, 0x30(%rsp)
movq 0x58(%rsp), %rax
movb 0x18(%rax), %al
movb %al, 0x2f(%rsp)
movq 0x58(%rsp), %rax
movb 0x19(%rax), %al
movb %al, 0x2e(%rsp)
movq 0x58(%rsp), %rax
movzbl 0x32(%rax), %edi
callq 0x7eb810
movq %rax, 0x20(%rsp)
movq 0x58(%rsp), %rax
movzbl 0x33(%rax), %edi
callq 0x7eb810
movq %rax, 0x18(%rsp)
movq 0x50(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x68(%rsp), %rax
movq %rax, 0x8(%rsp)
movl $0x4, 0x4(%rsp)
movl 0x4c(%rsp), %eax
imull 0x4c(%rsp), %eax
cltd
idivl 0x4(%rsp)
movl %eax, (%rsp)
movq 0x70(%rsp), %rdi
movl 0x64(%rsp), %esi
movq 0x50(%rsp), %rdx
movl 0x4c(%rsp), %ecx
callq 0x7eb880
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
movl (%rsp), %edx
movq 0x40(%rsp), %rax
movsbl (%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
callq 0x7eb910
movq 0x20(%rsp), %rax
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movb 0x2f(%rsp), %dl
movq 0x38(%rsp), %rcx
movsbl %dl, %edx
callq *%rax
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
movl (%rsp), %edx
movq 0x40(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
callq 0x7eb910
movl 0x4c(%rsp), %edi
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
callq 0x7eba00
movq 0x18(%rsp), %rax
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %rsi
movb 0x2e(%rsp), %dl
movq 0x30(%rsp), %rcx
movsbl %dl, %edx
callq *%rax
movq 0x8(%rsp), %rdi
movq 0x8(%rsp), %rsi
movl (%rsp), %edx
movq 0x40(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
callq 0x7eb910
addq $0x78, %rsp
retq
nop
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
fwd_txfm2d_64x64_sse4_1 | static inline void fwd_txfm2d_64x64_sse4_1(const int16_t *input,
int32_t *output, const int stride,
const TXFM_2D_FLIP_CFG *cfg,
int32_t *txfm_buf) {
assert(cfg->tx_size < TX_SIZES);
const int txfm_size = tx_size_wide[cfg->tx_size];
const int8_t *shift = cfg->shift;
const int8_t *stage_range_col = cfg->stage_range_col;
const int8_t cos_bit_col = cfg->cos_bit_col;
const int8_t cos_bit_row = cfg->cos_bit_row;
const TxfmFuncSSE2 txfm_func_col = fwd_txfm_type_to_func(cfg->txfm_type_col);
__m128i *buf_128 = (__m128i *)txfm_buf;
__m128i *out_128 = (__m128i *)output;
const int num_per_128 = 4;
int txfm2d_size_128 = txfm_size * txfm_size / num_per_128;
int col_num = txfm_size / num_per_128;
int16_array_with_stride_to_int32_array_without_stride(input, stride, output,
txfm_size);
/*col wise transform*/
txfm_func_col(out_128, buf_128, cos_bit_col, stage_range_col);
av1_round_shift_array_32_sse4_1(buf_128, out_128, txfm2d_size_128, -shift[1]);
transpose_32(txfm_size, out_128, buf_128);
/*row wise transform*/
for (int col = 0; col < (col_num >> 1); col++) {
av1_fdct64_sse4_1((buf_128 + col), (out_128 + col), cos_bit_row, col_num,
(col_num >> 1));
}
txfm2d_size_128 = (col_num >> 1) * (txfm_size >> 1);
av1_round_shift_array_32_sse4_1(out_128, out_128, txfm2d_size_128, -shift[2]);
} | subq $0x78, %rsp
movq %rdi, 0x70(%rsp)
movq %rsi, 0x68(%rsp)
movl %edx, 0x64(%rsp)
movq %rcx, 0x58(%rsp)
movq %r8, 0x50(%rsp)
movq 0x58(%rsp), %rax
movzbl (%rax), %eax
movl %eax, %ecx
leaq 0x32e9e3(%rip), %rax # 0xb19fd0
movl (%rax,%rcx,4), %eax
movl %eax, 0x4c(%rsp)
movq 0x58(%rsp), %rax
movq 0x10(%rax), %rax
movq %rax, 0x40(%rsp)
movq 0x58(%rsp), %rax
addq $0x1a, %rax
movq %rax, 0x38(%rsp)
movq 0x58(%rsp), %rax
movb 0x18(%rax), %al
movb %al, 0x37(%rsp)
movq 0x58(%rsp), %rax
movb 0x19(%rax), %al
movb %al, 0x36(%rsp)
movq 0x58(%rsp), %rax
movzbl 0x32(%rax), %edi
callq 0x7eb810
movq %rax, 0x28(%rsp)
movq 0x50(%rsp), %rax
movq %rax, 0x20(%rsp)
movq 0x68(%rsp), %rax
movq %rax, 0x18(%rsp)
movl $0x4, 0x14(%rsp)
movl 0x4c(%rsp), %eax
imull 0x4c(%rsp), %eax
movl $0x4, %ecx
cltd
idivl %ecx
movl %eax, 0x10(%rsp)
movl 0x4c(%rsp), %eax
movl $0x4, %ecx
cltd
idivl %ecx
movl %eax, 0xc(%rsp)
movq 0x70(%rsp), %rdi
movl 0x64(%rsp), %esi
movq 0x68(%rsp), %rdx
movl 0x4c(%rsp), %ecx
callq 0x7eb880
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movb 0x37(%rsp), %dl
movq 0x38(%rsp), %rcx
movsbl %dl, %edx
callq *%rax
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %rsi
movl 0x10(%rsp), %edx
movq 0x40(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
callq 0x7eb910
movl 0x4c(%rsp), %edi
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %rdx
callq 0x7eba00
movl $0x0, 0x8(%rsp)
movl 0x8(%rsp), %eax
movl 0xc(%rsp), %ecx
sarl %ecx
cmpl %ecx, %eax
jge 0x7eb740
movq 0x20(%rsp), %rdi
movslq 0x8(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rdi
movq 0x18(%rsp), %rsi
movslq 0x8(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rsi
movb 0x36(%rsp), %al
movl 0xc(%rsp), %ecx
movl 0xc(%rsp), %r8d
sarl %r8d
movsbl %al, %edx
callq 0xa64110
movl 0x8(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8(%rsp)
jmp 0x7eb6eb
movl 0xc(%rsp), %eax
sarl %eax
movl 0x4c(%rsp), %ecx
sarl %ecx
imull %ecx, %eax
movl %eax, 0x10(%rsp)
movq 0x18(%rsp), %rdi
movq 0x18(%rsp), %rsi
movl 0x10(%rsp), %edx
movq 0x40(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
callq 0x7eb910
addq $0x78, %rsp
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
av1_lowbd_fwd_txfm_sse4_1 | void av1_lowbd_fwd_txfm_sse4_1(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
FwdTxfm2dFunc fwd_txfm2d_func = fwd_txfm2d_func_ls[txfm_param->tx_size];
if (txfm_param->lossless && txfm_param->tx_size == TX_4X4) {
av1_lowbd_fwd_txfm_c(src_diff, coeff, diff_stride, txfm_param);
} else {
fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type,
txfm_param->bd);
}
} | subq $0x28, %rsp
movq %rdi, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
movl %edx, 0x14(%rsp)
movq %rcx, 0x8(%rsp)
movq 0x8(%rsp), %rax
movzbl 0x1(%rax), %eax
movl %eax, %ecx
leaq 0x3ba5a7(%rip), %rax # 0xba5d50
movq (%rax,%rcx,8), %rax
movq %rax, (%rsp)
movq 0x8(%rsp), %rax
cmpl $0x0, 0x4(%rax)
je 0x7eb7e4
movq 0x8(%rsp), %rax
movzbl 0x1(%rax), %eax
cmpl $0x0, %eax
jne 0x7eb7e4
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %rsi
movl 0x14(%rsp), %edx
movq 0x8(%rsp), %rcx
callq 0x164020
jmp 0x7eb80b
movq (%rsp), %rax
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %rsi
movl 0x14(%rsp), %edx
movq 0x8(%rsp), %rcx
movb (%rcx), %cl
movq 0x8(%rsp), %r8
movl 0x8(%r8), %r8d
movzbl %cl, %ecx
callq *%rax
addq $0x28, %rsp
retq
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
fwd_txfm_type_to_func | static inline TxfmFuncSSE2 fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
switch (txfm_type) {
case TXFM_TYPE_DCT32: return fdct32_sse4_1;
case TXFM_TYPE_DCT64: return fdct64_new_sse4_1;
case TXFM_TYPE_IDENTITY32: return idtx32x32_sse4_1;
default: assert(0);
}
return NULL;
} | movb %dil, %al
movb %al, -0x9(%rsp)
movzbl -0x9(%rsp), %eax
movl %eax, -0x10(%rsp)
subl $0x3, %eax
je 0x7eb83d
jmp 0x7eb827
movl -0x10(%rsp), %eax
subl $0x4, %eax
je 0x7eb84b
jmp 0x7eb832
movl -0x10(%rsp), %eax
subl $0xb, %eax
je 0x7eb859
jmp 0x7eb867
leaq 0x29c(%rip), %rax # 0x7ebae0
movq %rax, -0x8(%rsp)
jmp 0x7eb872
leaq 0x31e(%rip), %rax # 0x7ebb70
movq %rax, -0x8(%rsp)
jmp 0x7eb872
leaq 0x3a0(%rip), %rax # 0x7ebc00
movq %rax, -0x8(%rsp)
jmp 0x7eb872
jmp 0x7eb869
movq $0x0, -0x8(%rsp)
movq -0x8(%rsp), %rax
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
transpose_32 | static inline void transpose_32(int txfm_size, const __m128i *input,
__m128i *output) {
const int num_per_128 = 4;
const int row_size = txfm_size;
const int col_size = txfm_size / num_per_128;
int r, c;
// transpose each 4x4 block internally
for (r = 0; r < row_size; r += 4) {
for (c = 0; c < col_size; c++) {
transpose_32_4x4(col_size, &input[r * col_size + c],
&output[c * 4 * col_size + r / 4]);
}
}
} | subq $0x38, %rsp
movl %edi, 0x34(%rsp)
movq %rsi, 0x28(%rsp)
movq %rdx, 0x20(%rsp)
movl $0x4, 0x1c(%rsp)
movl 0x34(%rsp), %eax
movl %eax, 0x18(%rsp)
movl 0x34(%rsp), %eax
movl $0x4, %ecx
cltd
idivl %ecx
movl %eax, 0x14(%rsp)
movl $0x0, 0x10(%rsp)
movl 0x10(%rsp), %eax
cmpl 0x18(%rsp), %eax
jge 0x7ebad7
movl $0x0, 0xc(%rsp)
movl 0xc(%rsp), %eax
cmpl 0x14(%rsp), %eax
jge 0x7ebac5
movl 0x14(%rsp), %edi
movq 0x28(%rsp), %rsi
movl 0x10(%rsp), %eax
imull 0x14(%rsp), %eax
addl 0xc(%rsp), %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movq 0x20(%rsp), %rax
movq %rax, (%rsp)
movl 0xc(%rsp), %eax
shll $0x2, %eax
imull 0x14(%rsp), %eax
movl %eax, 0x8(%rsp)
movl 0x10(%rsp), %eax
movl $0x4, %ecx
cltd
idivl %ecx
movq (%rsp), %rdx
movl %eax, %ecx
movl 0x8(%rsp), %eax
addl %ecx, %eax
cltq
shlq $0x4, %rax
addq %rax, %rdx
callq 0x7ebd40
movl 0xc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xc(%rsp)
jmp 0x7eba50
jmp 0x7ebac7
movl 0x10(%rsp), %eax
addl $0x4, %eax
movl %eax, 0x10(%rsp)
jmp 0x7eba3a
addq $0x38, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_txfm1d_sse4.h |
fdct64_new_sse4_1 | static void fdct64_new_sse4_1(__m128i *input, __m128i *output,
const int8_t cos_bit, const int8_t *stage_range) {
const int txfm_size = 64;
const int num_per_128 = 4;
int col_num = txfm_size / num_per_128;
(void)stage_range;
for (int col = 0; col < col_num; col++) {
av1_fdct64_sse4_1((input + col), (output + col), cos_bit, col_num, col_num);
}
} | subq $0x38, %rsp
movb %dl, %al
movq %rdi, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movb %al, 0x27(%rsp)
movq %rcx, 0x18(%rsp)
movl $0x40, 0x14(%rsp)
movl $0x4, 0x10(%rsp)
movl $0x10, 0xc(%rsp)
movl $0x0, 0x8(%rsp)
movl 0x8(%rsp), %eax
cmpl 0xc(%rsp), %eax
jge 0x7ebbf7
movq 0x30(%rsp), %rdi
movslq 0x8(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rdi
movq 0x28(%rsp), %rsi
movslq 0x8(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rsi
movb 0x27(%rsp), %al
movl 0xc(%rsp), %ecx
movl 0xc(%rsp), %r8d
movsbl %al, %edx
callq 0xa64110
movl 0x8(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8(%rsp)
jmp 0x7ebba9
addq $0x38, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
transpose_32_4x4 | static inline void transpose_32_4x4(int stride, const __m128i *input,
__m128i *output) {
__m128i temp0 = _mm_unpacklo_epi32(input[0 * stride], input[2 * stride]);
__m128i temp1 = _mm_unpackhi_epi32(input[0 * stride], input[2 * stride]);
__m128i temp2 = _mm_unpacklo_epi32(input[1 * stride], input[3 * stride]);
__m128i temp3 = _mm_unpackhi_epi32(input[1 * stride], input[3 * stride]);
output[0 * stride] = _mm_unpacklo_epi32(temp0, temp2);
output[1 * stride] = _mm_unpackhi_epi32(temp0, temp2);
output[2 * stride] = _mm_unpacklo_epi32(temp1, temp3);
output[3 * stride] = _mm_unpackhi_epi32(temp1, temp3);
} | subq $0xe8, %rsp
movl %edi, -0x24(%rsp)
movq %rsi, -0x30(%rsp)
movq %rdx, -0x38(%rsp)
movq -0x30(%rsp), %rax
movl -0x24(%rsp), %ecx
movaps (%rax), %xmm1
addl %ecx, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm1, 0x50(%rsp)
movaps %xmm0, 0x40(%rsp)
movaps 0x50(%rsp), %xmm0
movaps 0x40(%rsp), %xmm1
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movaps %xmm0, -0x50(%rsp)
movq -0x30(%rsp), %rax
movl -0x24(%rsp), %ecx
movaps (%rax), %xmm1
addl %ecx, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm1, 0xd0(%rsp)
movaps %xmm0, 0xc0(%rsp)
movaps 0xd0(%rsp), %xmm0
movaps 0xc0(%rsp), %xmm1
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movaps %xmm0, -0x60(%rsp)
movq -0x30(%rsp), %rax
movslq -0x24(%rsp), %rcx
movl %ecx, %edx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm1
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm1, 0x30(%rsp)
movaps %xmm0, 0x20(%rsp)
movaps 0x30(%rsp), %xmm0
movaps 0x20(%rsp), %xmm1
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movaps %xmm0, -0x70(%rsp)
movq -0x30(%rsp), %rax
movslq -0x24(%rsp), %rcx
movl %ecx, %edx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm1
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm1, 0xb0(%rsp)
movaps %xmm0, 0xa0(%rsp)
movaps 0xb0(%rsp), %xmm0
movaps 0xa0(%rsp), %xmm1
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movaps %xmm0, -0x80(%rsp)
movaps -0x50(%rsp), %xmm1
movaps -0x70(%rsp), %xmm0
movaps %xmm1, 0x10(%rsp)
movaps %xmm0, (%rsp)
movaps 0x10(%rsp), %xmm0
movaps (%rsp), %xmm1
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movq -0x38(%rsp), %rax
movaps %xmm0, (%rax)
movaps -0x50(%rsp), %xmm1
movaps -0x70(%rsp), %xmm0
movaps %xmm1, 0x90(%rsp)
movaps %xmm0, 0x80(%rsp)
movaps 0x90(%rsp), %xmm0
movaps 0x80(%rsp), %xmm1
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq -0x38(%rsp), %rax
movslq -0x24(%rsp), %rcx
shlq $0x4, %rcx
movaps %xmm0, (%rax,%rcx)
movaps -0x60(%rsp), %xmm1
movaps -0x80(%rsp), %xmm0
movaps %xmm1, -0x10(%rsp)
movaps %xmm0, -0x20(%rsp)
movaps -0x10(%rsp), %xmm0
movaps -0x20(%rsp), %xmm1
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movq -0x38(%rsp), %rax
movl -0x24(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
movaps %xmm0, (%rax,%rcx)
movaps -0x60(%rsp), %xmm1
movaps -0x80(%rsp), %xmm0
movaps %xmm1, 0x70(%rsp)
movaps %xmm0, 0x60(%rsp)
movaps 0x70(%rsp), %xmm0
movaps 0x60(%rsp), %xmm1
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq -0x38(%rsp), %rax
imull $0x3, -0x24(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
addq $0xe8, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_txfm1d_sse4.h |
lowbd_fwd_txfm2d_64x64_sse4_1 | static void lowbd_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
int stride, TX_TYPE tx_type, int bd) {
(void)bd;
(void)tx_type;
assert(tx_type == DCT_DCT);
const TX_SIZE tx_size = TX_64X64;
__m128i buf0[64], buf1[512];
const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
const int txw_idx = get_txw_idx(tx_size);
const int txh_idx = get_txh_idx(tx_size);
const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
const int width = tx_size_wide[tx_size];
const int height = tx_size_high[tx_size];
const transform_1d_sse2 col_txfm = av1_fdct8x64_new_sse2;
const int width_div8 = (width >> 3);
const int height_div8 = (height >> 3);
for (int i = 0; i < width_div8; i++) {
load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
round_shift_16bit(buf0, height, shift[0]);
col_txfm(buf0, buf0, cos_bit_col);
round_shift_16bit(buf0, height, shift[1]);
for (int j = 0; j < AOMMIN(4, height_div8); ++j) {
transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
}
}
for (int i = 0; i < AOMMIN(4, height_div8); i++) {
__m128i bufA[64];
__m128i bufB[64];
__m128i *buf = buf1 + width * i;
for (int j = 0; j < width; ++j) {
bufA[j] = _mm_cvtepi16_epi32(buf[j]);
bufB[j] = _mm_cvtepi16_epi32(_mm_unpackhi_epi64(buf[j], buf[j]));
}
av1_fdct64_sse4_1(bufA, bufA, cos_bit_row, 1, 1);
av1_fdct64_sse4_1(bufB, bufB, cos_bit_row, 1, 1);
av1_round_shift_array_32_sse4_1(bufA, bufA, 32, -shift[2]);
av1_round_shift_array_32_sse4_1(bufB, bufB, 32, -shift[2]);
store_output_32bit_w8(output + i * 8, bufA, bufB, 32, 32);
}
} | subq $0x2cb8, %rsp # imm = 0x2CB8
movb %cl, %al
movq %rdi, 0x2c68(%rsp)
movq %rsi, 0x2c60(%rsp)
movl %edx, 0x2c5c(%rsp)
movb %al, 0x2c5b(%rsp)
movl %r8d, 0x2c54(%rsp)
movb $0x4, 0x2c53(%rsp)
leaq 0x3b9b92(%rip), %rax # 0xba5b10
movq 0x20(%rax), %rax
movq %rax, 0x848(%rsp)
movl $0x4, %edi
callq 0x7ecb90
movl %eax, 0x844(%rsp)
movl $0x4, %edi
callq 0x7ecbb0
movl %eax, 0x840(%rsp)
movslq 0x844(%rsp), %rcx
leaq 0x32b055(%rip), %rax # 0xb17010
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x840(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x83c(%rsp)
movslq 0x844(%rsp), %rcx
leaq 0x32b04c(%rip), %rax # 0xb17030
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x840(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x838(%rsp)
movl 0x32dfdc(%rip), %eax # 0xb19fe0
movl %eax, 0x834(%rsp)
movl 0x32e01f(%rip), %eax # 0xb1a030
movl %eax, 0x830(%rsp)
leaq 0x235c61(%rip), %rax # 0xa21c80
movq %rax, 0x828(%rsp)
movl $0x8, 0x824(%rsp)
movl $0x8, 0x820(%rsp)
movl $0x0, 0x81c(%rsp)
cmpl $0x8, 0x81c(%rsp)
jge 0x7ec171
movq 0x2c68(%rsp), %rdi
movl 0x81c(%rsp), %eax
shll $0x3, %eax
cltq
shlq %rax
addq %rax, %rdi
movl 0x2c5c(%rsp), %esi
leaq 0x2850(%rsp), %rdx
movl $0x40, %ecx
callq 0x7ecbd0
leaq 0x2850(%rsp), %rdi
movq 0x848(%rsp), %rax
movsbl (%rax), %edx
movl $0x40, %esi
callq 0x7ecc40
leaq 0x2850(%rsp), %rdi
leaq 0x2850(%rsp), %rsi
movl 0x83c(%rsp), %eax
movsbl %al, %edx
callq 0xa21c80
leaq 0x2850(%rsp), %rdi
movq 0x848(%rsp), %rax
movsbl 0x1(%rax), %edx
movl $0x40, %esi
callq 0x7ecc40
movl $0x0, 0x818(%rsp)
cmpl $0x4, 0x818(%rsp)
jge 0x7ec159
leaq 0x2850(%rsp), %rdi
movl 0x818(%rsp), %eax
shll $0x3, %eax
cltq
shlq $0x4, %rax
addq %rax, %rdi
leaq 0x850(%rsp), %rsi
movl 0x818(%rsp), %eax
shll $0x6, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x81c(%rsp), %eax
shll $0x3, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
callq 0x7ece50
movl 0x818(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x818(%rsp)
jmp 0x7ec0ee
jmp 0x7ec15b
movl 0x81c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x81c(%rsp)
jmp 0x7ec048
movl $0x0, 0x814(%rsp)
cmpl $0x4, 0x814(%rsp)
jge 0x7ec344
leaq 0x850(%rsp), %rax
movl 0x814(%rsp), %ecx
shll $0x6, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x8(%rsp)
movl $0x0, 0x4(%rsp)
cmpl $0x40, 0x4(%rsp)
jge 0x7ec260
movq 0x8(%rsp), %rax
movslq 0x4(%rsp), %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm0, 0x2c80(%rsp)
pmovsxwd 0x2c80(%rsp), %xmm0
movslq 0x4(%rsp), %rax
shlq $0x4, %rax
movaps %xmm0, 0x410(%rsp,%rax)
movq 0x8(%rsp), %rax
movslq 0x4(%rsp), %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm0, 0x2ca0(%rsp)
movaps %xmm0, 0x2c90(%rsp)
movaps 0x2ca0(%rsp), %xmm0
movaps 0x2c90(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, 0x2c70(%rsp)
pmovsxwd 0x2c70(%rsp), %xmm0
movslq 0x4(%rsp), %rcx
leaq 0x10(%rsp), %rax
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl 0x4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x4(%rsp)
jmp 0x7ec1b3
leaq 0x410(%rsp), %rdi
leaq 0x410(%rsp), %rsi
movl 0x838(%rsp), %eax
movl $0x1, %r8d
movsbl %al, %edx
movl %r8d, %ecx
callq 0xa64110
leaq 0x10(%rsp), %rdi
leaq 0x10(%rsp), %rsi
movl 0x838(%rsp), %eax
movl $0x1, %r8d
movsbl %al, %edx
movl %r8d, %ecx
callq 0xa64110
leaq 0x410(%rsp), %rdi
leaq 0x410(%rsp), %rsi
movq 0x848(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
movl $0x20, %edx
callq 0x7eb910
leaq 0x10(%rsp), %rdi
leaq 0x10(%rsp), %rsi
movq 0x848(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
movl $0x20, %edx
callq 0x7eb910
movq 0x2c60(%rsp), %rdi
movl 0x814(%rsp), %eax
shll $0x3, %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
leaq 0x410(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movl $0x20, %r8d
movl %r8d, %ecx
callq 0x7ed3a0
movl 0x814(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x814(%rsp)
jmp 0x7ec17c
addq $0x2cb8, %rsp # imm = 0x2CB8
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
lowbd_fwd_txfm2d_64x32_sse4_1 | static void lowbd_fwd_txfm2d_64x32_sse4_1(const int16_t *input, int32_t *output,
int stride, TX_TYPE tx_type, int bd) {
(void)bd;
const TX_SIZE tx_size = TX_64X32;
__m128i buf0[64], buf1[256];
const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
const int txw_idx = get_txw_idx(tx_size);
const int txh_idx = get_txh_idx(tx_size);
const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
const int width = tx_size_wide[tx_size];
const int height = tx_size_high[tx_size];
const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type];
const int width_div8 = (width >> 3);
const int height_div8 = (height >> 3);
for (int i = 0; i < width_div8; i++) {
load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
round_shift_16bit(buf0, height, shift[0]);
col_txfm(buf0, buf0, cos_bit_col);
round_shift_16bit(buf0, height, shift[1]);
for (int j = 0; j < AOMMIN(4, height_div8); ++j) {
transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
}
}
assert(tx_type == DCT_DCT);
for (int i = 0; i < AOMMIN(4, height_div8); i++) {
__m128i bufA[64];
__m128i bufB[64];
__m128i *buf = buf1 + width * i;
for (int j = 0; j < width; ++j) {
bufA[j] = _mm_cvtepi16_epi32(buf[j]);
bufB[j] = _mm_cvtepi16_epi32(_mm_unpackhi_epi64(buf[j], buf[j]));
}
av1_fdct64_sse4_1(bufA, bufA, cos_bit_row, 1, 1);
av1_fdct64_sse4_1(bufB, bufB, cos_bit_row, 1, 1);
av1_round_shift_rect_array_32_sse4_1(bufA, bufA, 32, -shift[2], NewSqrt2);
av1_round_shift_rect_array_32_sse4_1(bufB, bufB, 32, -shift[2], NewSqrt2);
store_output_32bit_w8(output + i * 8, bufA, bufB, 32, 32);
}
} | subq $0x1cb8, %rsp # imm = 0x1CB8
movb %cl, %al
movq %rdi, 0x1c68(%rsp)
movq %rsi, 0x1c60(%rsp)
movl %edx, 0x1c5c(%rsp)
movb %al, 0x1c5b(%rsp)
movl %r8d, 0x1c54(%rsp)
movb $0xc, 0x1c53(%rsp)
leaq 0x3b9372(%rip), %rax # 0xba5b10
movq 0x60(%rax), %rax
movq %rax, 0x848(%rsp)
movl $0xc, %edi
callq 0x7ecb90
movl %eax, 0x844(%rsp)
movl $0xc, %edi
callq 0x7ecbb0
movl %eax, 0x840(%rsp)
movslq 0x844(%rsp), %rcx
leaq 0x32a835(%rip), %rax # 0xb17010
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x840(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x83c(%rsp)
movslq 0x844(%rsp), %rcx
leaq 0x32a82c(%rip), %rax # 0xb17030
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x840(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x838(%rsp)
movl 0x32d7dc(%rip), %eax # 0xb1a000
movl %eax, 0x834(%rsp)
movl 0x32d81f(%rip), %eax # 0xb1a050
movl %eax, 0x830(%rsp)
movzbl 0x1c5b(%rsp), %eax
movl %eax, %ecx
leaq 0x3b06f7(%rip), %rax # 0xb9cf40
movq (%rax,%rcx,8), %rax
movq %rax, 0x828(%rsp)
movl $0x8, 0x824(%rsp)
movl $0x4, 0x820(%rsp)
movl $0x0, 0x81c(%rsp)
cmpl $0x8, 0x81c(%rsp)
jge 0x7ec9a4
movq 0x1c68(%rsp), %rdi
movl 0x81c(%rsp), %eax
shll $0x3, %eax
cltq
shlq %rax
addq %rax, %rdi
movl 0x1c5c(%rsp), %esi
leaq 0x1850(%rsp), %rdx
movl $0x20, %ecx
callq 0x7ecbd0
leaq 0x1850(%rsp), %rdi
movq 0x848(%rsp), %rax
movsbl (%rax), %edx
movl $0x20, %esi
callq 0x7ecc40
movq 0x828(%rsp), %rax
leaq 0x1850(%rsp), %rdi
leaq 0x1850(%rsp), %rsi
movl 0x83c(%rsp), %ecx
movsbl %cl, %edx
callq *%rax
leaq 0x1850(%rsp), %rdi
movq 0x848(%rsp), %rax
movsbl 0x1(%rax), %edx
movl $0x20, %esi
callq 0x7ecc40
movl $0x0, 0x818(%rsp)
cmpl $0x4, 0x818(%rsp)
jge 0x7ec98c
leaq 0x1850(%rsp), %rdi
movl 0x818(%rsp), %eax
shll $0x3, %eax
cltq
shlq $0x4, %rax
addq %rax, %rdi
leaq 0x850(%rsp), %rsi
movl 0x818(%rsp), %eax
shll $0x6, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x81c(%rsp), %eax
shll $0x3, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
callq 0x7ece50
movl 0x818(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x818(%rsp)
jmp 0x7ec921
jmp 0x7ec98e
movl 0x81c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x81c(%rsp)
jmp 0x7ec876
movl $0x0, 0x814(%rsp)
cmpl $0x4, 0x814(%rsp)
jge 0x7ecb83
leaq 0x850(%rsp), %rax
movl 0x814(%rsp), %ecx
shll $0x6, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x8(%rsp)
movl $0x0, 0x4(%rsp)
cmpl $0x40, 0x4(%rsp)
jge 0x7eca93
movq 0x8(%rsp), %rax
movslq 0x4(%rsp), %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm0, 0x1c80(%rsp)
pmovsxwd 0x1c80(%rsp), %xmm0
movslq 0x4(%rsp), %rax
shlq $0x4, %rax
movaps %xmm0, 0x410(%rsp,%rax)
movq 0x8(%rsp), %rax
movslq 0x4(%rsp), %rcx
shlq $0x4, %rcx
movaps (%rax,%rcx), %xmm0
movaps %xmm0, 0x1ca0(%rsp)
movaps %xmm0, 0x1c90(%rsp)
movaps 0x1ca0(%rsp), %xmm0
movaps 0x1c90(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, 0x1c70(%rsp)
pmovsxwd 0x1c70(%rsp), %xmm0
movslq 0x4(%rsp), %rcx
leaq 0x10(%rsp), %rax
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl 0x4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x4(%rsp)
jmp 0x7ec9e6
leaq 0x410(%rsp), %rdi
leaq 0x410(%rsp), %rsi
movl 0x838(%rsp), %eax
movl $0x1, %r8d
movsbl %al, %edx
movl %r8d, %ecx
callq 0xa64110
leaq 0x10(%rsp), %rdi
leaq 0x10(%rsp), %rsi
movl 0x838(%rsp), %eax
movl $0x1, %r8d
movsbl %al, %edx
movl %r8d, %ecx
callq 0xa64110
leaq 0x410(%rsp), %rdi
leaq 0x410(%rsp), %rsi
movq 0x848(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
movl $0x20, %edx
movl $0x16a1, %r8d # imm = 0x16A1
callq 0x7ed4a0
leaq 0x10(%rsp), %rdi
leaq 0x10(%rsp), %rsi
movq 0x848(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
movl $0x20, %edx
movl $0x16a1, %r8d # imm = 0x16A1
callq 0x7ed4a0
movq 0x1c60(%rsp), %rdi
movl 0x814(%rsp), %eax
shll $0x3, %eax
cltq
shlq $0x2, %rax
addq %rax, %rdi
leaq 0x410(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movl $0x20, %r8d
movl %r8d, %ecx
callq 0x7ed3a0
movl 0x814(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x814(%rsp)
jmp 0x7ec9af
addq $0x1cb8, %rsp # imm = 0x1CB8
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c |
transpose_32bit_4x4 | static inline void transpose_32bit_4x4(const __m128i *const in,
__m128i *const out) {
// Unpack 32 bit elements. Goes from:
// in[0]: 00 01 02 03
// in[1]: 10 11 12 13
// in[2]: 20 21 22 23
// in[3]: 30 31 32 33
// to:
// a0: 00 10 01 11
// a1: 20 30 21 31
// a2: 02 12 03 13
// a3: 22 32 23 33
const __m128i a0 = _mm_unpacklo_epi32(in[0], in[1]);
const __m128i a1 = _mm_unpacklo_epi32(in[2], in[3]);
const __m128i a2 = _mm_unpackhi_epi32(in[0], in[1]);
const __m128i a3 = _mm_unpackhi_epi32(in[2], in[3]);
// Unpack 64 bit elements resulting in:
// out[0]: 00 10 20 30
// out[1]: 01 11 21 31
// out[2]: 02 12 22 32
// out[3]: 03 13 23 33
out[0] = _mm_unpacklo_epi64(a0, a1);
out[1] = _mm_unpackhi_epi64(a0, a1);
out[2] = _mm_unpacklo_epi64(a2, a3);
out[3] = _mm_unpackhi_epi64(a2, a3);
} | subq $0xd8, %rsp
movq %rdi, -0x38(%rsp)
movq %rsi, -0x40(%rsp)
movq -0x38(%rsp), %rax
movaps (%rax), %xmm1
movaps 0x10(%rax), %xmm0
movaps %xmm1, (%rsp)
movaps %xmm0, -0x10(%rsp)
movaps (%rsp), %xmm0
movaps -0x10(%rsp), %xmm1
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movaps %xmm0, -0x50(%rsp)
movq -0x38(%rsp), %rax
movaps 0x20(%rax), %xmm1
movaps 0x30(%rax), %xmm0
movaps %xmm1, -0x20(%rsp)
movaps %xmm0, -0x30(%rsp)
movaps -0x20(%rsp), %xmm0
movaps -0x30(%rsp), %xmm1
punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movaps %xmm0, -0x60(%rsp)
movq -0x38(%rsp), %rax
movaps (%rax), %xmm1
movaps 0x10(%rax), %xmm0
movaps %xmm1, 0x40(%rsp)
movaps %xmm0, 0x30(%rsp)
movaps 0x40(%rsp), %xmm0
movaps 0x30(%rsp), %xmm1
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movaps %xmm0, -0x70(%rsp)
movq -0x38(%rsp), %rax
movaps 0x20(%rax), %xmm1
movaps 0x30(%rax), %xmm0
movaps %xmm1, 0x20(%rsp)
movaps %xmm0, 0x10(%rsp)
movaps 0x20(%rsp), %xmm0
movaps 0x10(%rsp), %xmm1
punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movaps %xmm0, -0x80(%rsp)
movaps -0x50(%rsp), %xmm1
movaps -0x60(%rsp), %xmm0
movaps %xmm1, 0x80(%rsp)
movaps %xmm0, 0x70(%rsp)
movaps 0x80(%rsp), %xmm0
movaps 0x70(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq -0x40(%rsp), %rax
movaps %xmm0, (%rax)
movaps -0x50(%rsp), %xmm1
movaps -0x60(%rsp), %xmm0
movaps %xmm1, 0xc0(%rsp)
movaps %xmm0, 0xb0(%rsp)
movaps 0xc0(%rsp), %xmm0
movaps 0xb0(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movq -0x40(%rsp), %rax
movaps %xmm0, 0x10(%rax)
movaps -0x70(%rsp), %xmm1
movaps -0x80(%rsp), %xmm0
movaps %xmm1, 0x60(%rsp)
movaps %xmm0, 0x50(%rsp)
movaps 0x60(%rsp), %xmm0
movaps 0x50(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq -0x40(%rsp), %rax
movaps %xmm0, 0x20(%rax)
movaps -0x70(%rsp), %xmm1
movaps -0x80(%rsp), %xmm0
movaps %xmm1, 0xa0(%rsp)
movaps %xmm0, 0x90(%rsp)
movaps 0xa0(%rsp), %xmm0
movaps 0x90(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movq -0x40(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
addq $0xd8, %rsp
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/transpose_sse2.h |
load_buffer_4x4 | static inline void load_buffer_4x4(const int16_t *input, __m128i *in,
int stride, int flipud, int fliplr,
int shift) {
if (!flipud) {
in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
} else {
in[0] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
in[1] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
in[2] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
in[3] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
}
if (fliplr) {
in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
}
in[0] = _mm_cvtepi16_epi32(in[0]);
in[1] = _mm_cvtepi16_epi32(in[1]);
in[2] = _mm_cvtepi16_epi32(in[2]);
in[3] = _mm_cvtepi16_epi32(in[3]);
in[0] = _mm_slli_epi32(in[0], shift);
in[1] = _mm_slli_epi32(in[1], shift);
in[2] = _mm_slli_epi32(in[2], shift);
in[3] = _mm_slli_epi32(in[3], shift);
} | subq $0x168, %rsp # imm = 0x168
movq %rdi, -0x68(%rsp)
movq %rsi, -0x70(%rsp)
movl %edx, -0x74(%rsp)
movl %ecx, -0x78(%rsp)
movl %r8d, -0x7c(%rsp)
movl %r9d, -0x80(%rsp)
cmpl $0x0, -0x78(%rsp)
jne 0x7ef1ef
movq -0x68(%rsp), %rax
movq %rax, 0x98(%rsp)
movq 0x98(%rsp), %rax
movq (%rax), %xmm0
movaps %xmm0, 0x80(%rsp)
movaps 0x80(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, (%rax)
movq -0x68(%rsp), %rax
movslq -0x74(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x78(%rsp)
movq 0x78(%rsp), %rax
movq (%rax), %xmm0
movaps %xmm0, 0x60(%rsp)
movaps 0x60(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, 0x10(%rax)
movq -0x68(%rsp), %rax
movl -0x74(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x58(%rsp)
movq 0x58(%rsp), %rax
movq (%rax), %xmm0
movaps %xmm0, 0x40(%rsp)
movaps 0x40(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, 0x20(%rax)
movq -0x68(%rsp), %rax
movl -0x74(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x38(%rsp)
movq 0x38(%rsp), %rax
movq (%rax), %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x20(%rsp), %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
jmp 0x7ef2ad
movq -0x68(%rsp), %rax
movl -0x74(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x18(%rsp)
movq 0x18(%rsp), %rax
movq (%rax), %xmm0
movaps %xmm0, (%rsp)
movaps (%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x74(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, -0x8(%rsp)
movq -0x8(%rsp), %rax
movq (%rax), %xmm0
movaps %xmm0, -0x20(%rsp)
movaps -0x20(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, 0x10(%rax)
movq -0x68(%rsp), %rax
movslq -0x74(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, -0x28(%rsp)
movq -0x28(%rsp), %rax
movq (%rax), %xmm0
movaps %xmm0, -0x40(%rsp)
movaps -0x40(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, 0x20(%rax)
movq -0x68(%rsp), %rax
movq %rax, -0x48(%rsp)
movq -0x48(%rsp), %rax
movq (%rax), %xmm0
movdqa %xmm0, -0x60(%rsp)
movdqa -0x60(%rsp), %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
cmpl $0x0, -0x7c(%rsp)
je 0x7ef300
movq -0x70(%rsp), %rax
movaps (%rax), %xmm0
pshuflw $0x1b, %xmm0, %xmm0 # xmm0 = xmm0[3,2,1,0,4,5,6,7]
movaps %xmm0, (%rax)
movq -0x70(%rsp), %rax
movaps 0x10(%rax), %xmm0
pshuflw $0x1b, %xmm0, %xmm0 # xmm0 = xmm0[3,2,1,0,4,5,6,7]
movaps %xmm0, 0x10(%rax)
movq -0x70(%rsp), %rax
movaps 0x20(%rax), %xmm0
pshuflw $0x1b, %xmm0, %xmm0 # xmm0 = xmm0[3,2,1,0,4,5,6,7]
movaps %xmm0, 0x20(%rax)
movq -0x70(%rsp), %rax
movaps 0x30(%rax), %xmm0
pshuflw $0x1b, %xmm0, %xmm0 # xmm0 = xmm0[3,2,1,0,4,5,6,7]
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq -0x70(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0xd0(%rsp)
pmovsxwd 0xd0(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, (%rax)
movq -0x70(%rsp), %rax
movaps 0x10(%rax), %xmm0
movaps %xmm0, 0xc0(%rsp)
pmovsxwd 0xc0(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, 0x10(%rax)
movq -0x70(%rsp), %rax
movaps 0x20(%rax), %xmm0
movaps %xmm0, 0xb0(%rsp)
pmovsxwd 0xb0(%rsp), %xmm0
movq -0x70(%rsp), %rax
movaps %xmm0, 0x20(%rax)
movq -0x70(%rsp), %rax
movaps 0x30(%rax), %xmm0
movaps %xmm0, 0xa0(%rsp)
pmovsxwd 0xa0(%rsp), %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq -0x70(%rsp), %rax
movdqa (%rax), %xmm0
movl -0x80(%rsp), %eax
movdqa %xmm0, 0x150(%rsp)
movl %eax, 0x14c(%rsp)
movdqa 0x150(%rsp), %xmm0
movl 0x14c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x70(%rsp), %rax
movdqa 0x10(%rax), %xmm0
movl -0x80(%rsp), %eax
movdqa %xmm0, 0x130(%rsp)
movl %eax, 0x12c(%rsp)
movdqa 0x130(%rsp), %xmm0
movl 0x12c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq -0x70(%rsp), %rax
movdqa 0x20(%rax), %xmm0
movl -0x80(%rsp), %eax
movdqa %xmm0, 0x110(%rsp)
movl %eax, 0x10c(%rsp)
movdqa 0x110(%rsp), %xmm0
movl 0x10c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq -0x70(%rsp), %rax
movdqa 0x30(%rax), %xmm0
movl -0x80(%rsp), %eax
movdqa %xmm0, 0xf0(%rsp)
movl %eax, 0xec(%rsp)
movdqa 0xf0(%rsp), %xmm0
movl 0xec(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq -0x70(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
addq $0x168, %rsp # imm = 0x168
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
fadst4x4_sse4_1 | static void fadst4x4_sse4_1(__m128i *in, __m128i *out, int bit,
const int num_col) {
const int32_t *sinpi = sinpi_arr(bit);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
const __m128i sinpi1 = _mm_set1_epi32((int)sinpi[1]);
const __m128i sinpi2 = _mm_set1_epi32((int)sinpi[2]);
const __m128i sinpi3 = _mm_set1_epi32((int)sinpi[3]);
const __m128i sinpi4 = _mm_set1_epi32((int)sinpi[4]);
__m128i t;
__m128i s0, s1, s2, s3, s4, s5, s6, s7;
__m128i x0, x1, x2, x3;
__m128i u0, u1, u2, u3;
int idx = 0 * num_col;
s0 = _mm_mullo_epi32(in[idx], sinpi1);
s1 = _mm_mullo_epi32(in[idx], sinpi4);
t = _mm_add_epi32(in[idx], in[idx + num_col]);
idx += num_col;
s2 = _mm_mullo_epi32(in[idx], sinpi2);
s3 = _mm_mullo_epi32(in[idx], sinpi1);
idx += num_col;
s4 = _mm_mullo_epi32(in[idx], sinpi3);
idx += num_col;
s5 = _mm_mullo_epi32(in[idx], sinpi4);
s6 = _mm_mullo_epi32(in[idx], sinpi2);
s7 = _mm_sub_epi32(t, in[idx]);
t = _mm_add_epi32(s0, s2);
x0 = _mm_add_epi32(t, s5);
x1 = _mm_mullo_epi32(s7, sinpi3);
t = _mm_sub_epi32(s1, s3);
x2 = _mm_add_epi32(t, s6);
x3 = s4;
s0 = _mm_add_epi32(x0, x3);
s1 = x1;
s2 = _mm_sub_epi32(x2, x3);
t = _mm_sub_epi32(x2, x0);
s3 = _mm_add_epi32(t, x3);
u0 = _mm_add_epi32(s0, rnding);
u0 = _mm_srai_epi32(u0, bit);
u1 = _mm_add_epi32(s1, rnding);
u1 = _mm_srai_epi32(u1, bit);
u2 = _mm_add_epi32(s2, rnding);
u2 = _mm_srai_epi32(u2, bit);
u3 = _mm_add_epi32(s3, rnding);
u3 = _mm_srai_epi32(u3, bit);
out[0] = u0;
out[1] = u1;
out[2] = u2;
out[3] = u3;
} | subq $0x598, %rsp # imm = 0x598
movq %rdi, 0x188(%rsp)
movq %rsi, 0x180(%rsp)
movl %edx, 0x17c(%rsp)
movl %ecx, 0x178(%rsp)
movl 0x17c(%rsp), %edi
callq 0x804060
movq %rax, 0x170(%rsp)
movb 0x17c(%rsp), %cl
decb %cl
movl $0x1, %eax
shll %cl, %eax
movl %eax, 0x3ec(%rsp)
movl 0x3ec(%rsp), %eax
movl %eax, 0x50c(%rsp)
movl %eax, 0x508(%rsp)
movl %eax, 0x504(%rsp)
movl %eax, 0x500(%rsp)
movl 0x504(%rsp), %edx
movl 0x508(%rsp), %ecx
movl 0x50c(%rsp), %eax
movd 0x500(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x4f0(%rsp)
movaps 0x4f0(%rsp), %xmm0
movaps %xmm0, 0x160(%rsp)
movq 0x170(%rsp), %rax
movl 0x4(%rax), %eax
movl %eax, 0x3e8(%rsp)
movl 0x3e8(%rsp), %eax
movl %eax, 0x52c(%rsp)
movl %eax, 0x528(%rsp)
movl %eax, 0x524(%rsp)
movl %eax, 0x520(%rsp)
movl 0x524(%rsp), %edx
movl 0x528(%rsp), %ecx
movl 0x52c(%rsp), %eax
movd 0x520(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x510(%rsp)
movaps 0x510(%rsp), %xmm0
movaps %xmm0, 0x150(%rsp)
movq 0x170(%rsp), %rax
movl 0x8(%rax), %eax
movl %eax, 0x3e4(%rsp)
movl 0x3e4(%rsp), %eax
movl %eax, 0x54c(%rsp)
movl %eax, 0x548(%rsp)
movl %eax, 0x544(%rsp)
movl %eax, 0x540(%rsp)
movl 0x544(%rsp), %edx
movl 0x548(%rsp), %ecx
movl 0x54c(%rsp), %eax
movd 0x540(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x530(%rsp)
movaps 0x530(%rsp), %xmm0
movaps %xmm0, 0x140(%rsp)
movq 0x170(%rsp), %rax
movl 0xc(%rax), %eax
movl %eax, 0x3e0(%rsp)
movl 0x3e0(%rsp), %eax
movl %eax, 0x56c(%rsp)
movl %eax, 0x568(%rsp)
movl %eax, 0x564(%rsp)
movl %eax, 0x560(%rsp)
movl 0x564(%rsp), %edx
movl 0x568(%rsp), %ecx
movl 0x56c(%rsp), %eax
movd 0x560(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x550(%rsp)
movaps 0x550(%rsp), %xmm0
movaps %xmm0, 0x130(%rsp)
movq 0x170(%rsp), %rax
movl 0x10(%rax), %eax
movl %eax, 0x3dc(%rsp)
movl 0x3dc(%rsp), %eax
movl %eax, 0x594(%rsp)
movl %eax, 0x590(%rsp)
movl %eax, 0x58c(%rsp)
movl %eax, 0x588(%rsp)
movl 0x58c(%rsp), %edx
movl 0x590(%rsp), %ecx
movl 0x594(%rsp), %eax
movd 0x588(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movdqa %xmm0, 0x570(%rsp)
movdqa 0x570(%rsp), %xmm0
movdqa %xmm0, 0x120(%rsp)
imull $0x0, 0x178(%rsp), %eax
movl %eax, 0xc(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x150(%rsp), %xmm0
movdqa %xmm1, 0x4e0(%rsp)
movdqa %xmm0, 0x4d0(%rsp)
movdqa 0x4e0(%rsp), %xmm0
movdqa 0x4d0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x100(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x4c0(%rsp)
movdqa %xmm0, 0x4b0(%rsp)
movdqa 0x4c0(%rsp), %xmm0
movdqa 0x4b0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0xf0(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x188(%rsp), %rax
movl 0xc(%rsp), %ecx
addl 0x178(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x2c0(%rsp)
movdqa %xmm0, 0x2b0(%rsp)
movdqa 0x2c0(%rsp), %xmm0
movdqa 0x2b0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movl 0x178(%rsp), %eax
addl 0xc(%rsp), %eax
movl %eax, 0xc(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x140(%rsp), %xmm0
movdqa %xmm1, 0x4a0(%rsp)
movdqa %xmm0, 0x490(%rsp)
movdqa 0x4a0(%rsp), %xmm0
movdqa 0x490(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x150(%rsp), %xmm0
movdqa %xmm1, 0x480(%rsp)
movdqa %xmm0, 0x470(%rsp)
movdqa 0x480(%rsp), %xmm0
movdqa 0x470(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0xd0(%rsp)
movl 0x178(%rsp), %eax
addl 0xc(%rsp), %eax
movl %eax, 0xc(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x130(%rsp), %xmm0
movdqa %xmm1, 0x460(%rsp)
movdqa %xmm0, 0x450(%rsp)
movdqa 0x460(%rsp), %xmm0
movdqa 0x450(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0xc0(%rsp)
movl 0x178(%rsp), %eax
addl 0xc(%rsp), %eax
movl %eax, 0xc(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x440(%rsp)
movdqa %xmm0, 0x430(%rsp)
movdqa 0x440(%rsp), %xmm0
movdqa 0x430(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0xb0(%rsp)
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa 0x140(%rsp), %xmm0
movdqa %xmm1, 0x420(%rsp)
movdqa %xmm0, 0x410(%rsp)
movdqa 0x420(%rsp), %xmm0
movdqa 0x410(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0x110(%rsp), %xmm1
movq 0x188(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x340(%rsp)
movdqa %xmm0, 0x330(%rsp)
movdqa 0x340(%rsp), %xmm0
movdqa 0x330(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x100(%rsp), %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0x2a0(%rsp)
movdqa %xmm0, 0x290(%rsp)
movdqa 0x2a0(%rsp), %xmm0
movdqa 0x290(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movdqa 0x110(%rsp), %xmm1
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm1, 0x280(%rsp)
movdqa %xmm0, 0x270(%rsp)
movdqa 0x280(%rsp), %xmm0
movdqa 0x270(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x80(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0x130(%rsp), %xmm0
movdqa %xmm1, 0x400(%rsp)
movdqa %xmm0, 0x3f0(%rsp)
movdqa 0x400(%rsp), %xmm0
movdqa 0x3f0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0xd0(%rsp), %xmm0
movdqa %xmm1, 0x320(%rsp)
movdqa %xmm0, 0x310(%rsp)
movdqa 0x320(%rsp), %xmm0
movdqa 0x310(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movdqa 0x110(%rsp), %xmm1
movdqa 0xa0(%rsp), %xmm0
movdqa %xmm1, 0x260(%rsp)
movdqa %xmm0, 0x250(%rsp)
movdqa 0x260(%rsp), %xmm0
movdqa 0x250(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x50(%rsp), %xmm0
movdqa %xmm1, 0x240(%rsp)
movdqa %xmm0, 0x230(%rsp)
movdqa 0x240(%rsp), %xmm0
movdqa 0x230(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x100(%rsp)
movdqa 0x70(%rsp), %xmm0
movdqa %xmm0, 0xf0(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0x50(%rsp), %xmm0
movdqa %xmm1, 0x300(%rsp)
movdqa %xmm0, 0x2f0(%rsp)
movdqa 0x300(%rsp), %xmm0
movdqa 0x2f0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0x80(%rsp), %xmm0
movdqa %xmm1, 0x2e0(%rsp)
movdqa %xmm0, 0x2d0(%rsp)
movdqa 0x2e0(%rsp), %xmm0
movdqa 0x2d0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movdqa 0x110(%rsp), %xmm1
movdqa 0x50(%rsp), %xmm0
movdqa %xmm1, 0x220(%rsp)
movdqa %xmm0, 0x210(%rsp)
movdqa 0x220(%rsp), %xmm0
movdqa 0x210(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xd0(%rsp)
movdqa 0x100(%rsp), %xmm1
movdqa 0x160(%rsp), %xmm0
movdqa %xmm1, 0x200(%rsp)
movdqa %xmm0, 0x1f0(%rsp)
movdqa 0x200(%rsp), %xmm0
movdqa 0x1f0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0x40(%rsp), %xmm0
movl 0x17c(%rsp), %eax
movdqa %xmm0, 0x3c0(%rsp)
movl %eax, 0x3bc(%rsp)
movdqa 0x3c0(%rsp), %xmm0
movl 0x3bc(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x160(%rsp), %xmm0
movdqa %xmm1, 0x1e0(%rsp)
movdqa %xmm0, 0x1d0(%rsp)
movdqa 0x1e0(%rsp), %xmm0
movdqa 0x1d0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0x30(%rsp), %xmm0
movl 0x17c(%rsp), %eax
movdqa %xmm0, 0x3a0(%rsp)
movl %eax, 0x39c(%rsp)
movdqa 0x3a0(%rsp), %xmm0
movl 0x39c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x160(%rsp), %xmm0
movdqa %xmm1, 0x1c0(%rsp)
movdqa %xmm0, 0x1b0(%rsp)
movdqa 0x1c0(%rsp), %xmm0
movdqa 0x1b0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x20(%rsp), %xmm0
movl 0x17c(%rsp), %eax
movdqa %xmm0, 0x380(%rsp)
movl %eax, 0x37c(%rsp)
movdqa 0x380(%rsp), %xmm0
movl 0x37c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0xd0(%rsp), %xmm1
movdqa 0x160(%rsp), %xmm0
movdqa %xmm1, 0x1a0(%rsp)
movdqa %xmm0, 0x190(%rsp)
movdqa 0x1a0(%rsp), %xmm0
movdqa 0x190(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm0
movl 0x17c(%rsp), %eax
movdqa %xmm0, 0x360(%rsp)
movl %eax, 0x35c(%rsp)
movdqa 0x360(%rsp), %xmm0
movl 0x35c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x40(%rsp), %xmm0
movq 0x180(%rsp), %rax
movdqa %xmm0, (%rax)
movdqa 0x30(%rsp), %xmm0
movq 0x180(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movdqa 0x20(%rsp), %xmm0
movq 0x180(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movdqa 0x10(%rsp), %xmm0
movq 0x180(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
addq $0x598, %rsp # imm = 0x598
retq
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
load_buffer_8x8 | static inline void load_buffer_8x8(const int16_t *input, __m128i *in,
int stride, int flipud, int fliplr,
int shift) {
__m128i u;
if (!flipud) {
in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
} else {
in[0] = _mm_load_si128((const __m128i *)(input + 7 * stride));
in[1] = _mm_load_si128((const __m128i *)(input + 6 * stride));
in[2] = _mm_load_si128((const __m128i *)(input + 5 * stride));
in[3] = _mm_load_si128((const __m128i *)(input + 4 * stride));
in[4] = _mm_load_si128((const __m128i *)(input + 3 * stride));
in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride));
in[6] = _mm_load_si128((const __m128i *)(input + 1 * stride));
in[7] = _mm_load_si128((const __m128i *)(input + 0 * stride));
}
if (fliplr) {
in[0] = mm_reverse_epi16(in[0]);
in[1] = mm_reverse_epi16(in[1]);
in[2] = mm_reverse_epi16(in[2]);
in[3] = mm_reverse_epi16(in[3]);
in[4] = mm_reverse_epi16(in[4]);
in[5] = mm_reverse_epi16(in[5]);
in[6] = mm_reverse_epi16(in[6]);
in[7] = mm_reverse_epi16(in[7]);
}
u = _mm_unpackhi_epi64(in[4], in[4]);
in[8] = _mm_cvtepi16_epi32(in[4]);
in[9] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[5], in[5]);
in[10] = _mm_cvtepi16_epi32(in[5]);
in[11] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[6], in[6]);
in[12] = _mm_cvtepi16_epi32(in[6]);
in[13] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[7], in[7]);
in[14] = _mm_cvtepi16_epi32(in[7]);
in[15] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[3], in[3]);
in[6] = _mm_cvtepi16_epi32(in[3]);
in[7] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[2], in[2]);
in[4] = _mm_cvtepi16_epi32(in[2]);
in[5] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[1], in[1]);
in[2] = _mm_cvtepi16_epi32(in[1]);
in[3] = _mm_cvtepi16_epi32(u);
u = _mm_unpackhi_epi64(in[0], in[0]);
in[0] = _mm_cvtepi16_epi32(in[0]);
in[1] = _mm_cvtepi16_epi32(u);
in[0] = _mm_slli_epi32(in[0], shift);
in[1] = _mm_slli_epi32(in[1], shift);
in[2] = _mm_slli_epi32(in[2], shift);
in[3] = _mm_slli_epi32(in[3], shift);
in[4] = _mm_slli_epi32(in[4], shift);
in[5] = _mm_slli_epi32(in[5], shift);
in[6] = _mm_slli_epi32(in[6], shift);
in[7] = _mm_slli_epi32(in[7], shift);
in[8] = _mm_slli_epi32(in[8], shift);
in[9] = _mm_slli_epi32(in[9], shift);
in[10] = _mm_slli_epi32(in[10], shift);
in[11] = _mm_slli_epi32(in[11], shift);
in[12] = _mm_slli_epi32(in[12], shift);
in[13] = _mm_slli_epi32(in[13], shift);
in[14] = _mm_slli_epi32(in[14], shift);
in[15] = _mm_slli_epi32(in[15], shift);
} | subq $0x4b8, %rsp # imm = 0x4B8
movq %rdi, 0x28(%rsp)
movq %rsi, 0x20(%rsp)
movl %edx, 0x1c(%rsp)
movl %ecx, 0x18(%rsp)
movl %r8d, 0x14(%rsp)
movl %r9d, 0x10(%rsp)
cmpl $0x0, 0x18(%rsp)
jne 0x7f198f
movq 0x28(%rsp), %rax
imull $0x0, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x4b0(%rsp)
movq 0x4b0(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, (%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rsp), %ecx
shll $0x0, %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x4a8(%rsp)
movq 0x4a8(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rsp), %ecx
shll %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x4a0(%rsp)
movq 0x4a0(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq 0x28(%rsp), %rax
imull $0x3, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x498(%rsp)
movq 0x498(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x490(%rsp)
movq 0x490(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x40(%rax)
movq 0x28(%rsp), %rax
imull $0x5, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x488(%rsp)
movq 0x488(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x50(%rax)
movq 0x28(%rsp), %rax
imull $0x6, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x480(%rsp)
movq 0x480(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x60(%rax)
movq 0x28(%rsp), %rax
imull $0x7, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x478(%rsp)
movq 0x478(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x70(%rax)
jmp 0x7f1b1b
movq 0x28(%rsp), %rax
imull $0x7, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x470(%rsp)
movq 0x470(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, (%rax)
movq 0x28(%rsp), %rax
imull $0x6, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x468(%rsp)
movq 0x468(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq 0x28(%rsp), %rax
imull $0x5, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x460(%rsp)
movq 0x460(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x458(%rsp)
movq 0x458(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq 0x28(%rsp), %rax
imull $0x3, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x450(%rsp)
movq 0x450(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x40(%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rsp), %ecx
shll %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x448(%rsp)
movq 0x448(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x50(%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rsp), %ecx
shll $0x0, %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x440(%rsp)
movq 0x440(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x60(%rax)
movq 0x28(%rsp), %rax
imull $0x0, 0x1c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x438(%rsp)
movq 0x438(%rsp), %rax
movdqa (%rax), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x70(%rax)
cmpl $0x0, 0x14(%rsp)
je 0x7f1bec
movq 0x20(%rsp), %rax
movdqa (%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, (%rax)
movq 0x20(%rsp), %rax
movdqa 0x10(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq 0x20(%rsp), %rax
movdqa 0x20(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq 0x20(%rsp), %rax
movdqa 0x30(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq 0x20(%rsp), %rax
movdqa 0x40(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x40(%rax)
movq 0x20(%rsp), %rax
movdqa 0x50(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x50(%rax)
movq 0x20(%rsp), %rax
movdqa 0x60(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x60(%rax)
movq 0x20(%rsp), %rax
movdqa 0x70(%rax), %xmm0
callq 0x804080
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x70(%rax)
movq 0x20(%rsp), %rax
movaps 0x40(%rax), %xmm0
movaps %xmm0, 0x420(%rsp)
movaps %xmm0, 0x410(%rsp)
movaps 0x420(%rsp), %xmm0
movaps 0x410(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x40(%rax), %xmm0
movaps %xmm0, 0x120(%rsp)
pmovsxwd 0x120(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x80(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0x110(%rsp)
pmovsxwd 0x110(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x90(%rax)
movq 0x20(%rsp), %rax
movaps 0x50(%rax), %xmm0
movaps %xmm0, 0x400(%rsp)
movaps %xmm0, 0x3f0(%rsp)
movaps 0x400(%rsp), %xmm0
movaps 0x3f0(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x50(%rax), %xmm0
movaps %xmm0, 0x100(%rsp)
pmovsxwd 0x100(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0xa0(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0xf0(%rsp)
pmovsxwd 0xf0(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0xb0(%rax)
movq 0x20(%rsp), %rax
movaps 0x60(%rax), %xmm0
movaps %xmm0, 0x3e0(%rsp)
movaps %xmm0, 0x3d0(%rsp)
movaps 0x3e0(%rsp), %xmm0
movaps 0x3d0(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x60(%rax), %xmm0
movaps %xmm0, 0xe0(%rsp)
pmovsxwd 0xe0(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0xc0(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
pmovsxwd 0xd0(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0xd0(%rax)
movq 0x20(%rsp), %rax
movaps 0x70(%rax), %xmm0
movaps %xmm0, 0x3c0(%rsp)
movaps %xmm0, 0x3b0(%rsp)
movaps 0x3c0(%rsp), %xmm0
movaps 0x3b0(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x70(%rax), %xmm0
movaps %xmm0, 0xc0(%rsp)
pmovsxwd 0xc0(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0xe0(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0xb0(%rsp)
pmovsxwd 0xb0(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0xf0(%rax)
movq 0x20(%rsp), %rax
movaps 0x30(%rax), %xmm0
movaps %xmm0, 0x3a0(%rsp)
movaps %xmm0, 0x390(%rsp)
movaps 0x3a0(%rsp), %xmm0
movaps 0x390(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x30(%rax), %xmm0
movaps %xmm0, 0xa0(%rsp)
pmovsxwd 0xa0(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x60(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0x90(%rsp)
pmovsxwd 0x90(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x70(%rax)
movq 0x20(%rsp), %rax
movaps 0x20(%rax), %xmm0
movaps %xmm0, 0x380(%rsp)
movaps %xmm0, 0x370(%rsp)
movaps 0x380(%rsp), %xmm0
movaps 0x370(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x20(%rax), %xmm0
movaps %xmm0, 0x80(%rsp)
pmovsxwd 0x80(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x40(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
pmovsxwd 0x70(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x50(%rax)
movq 0x20(%rsp), %rax
movaps 0x10(%rax), %xmm0
movaps %xmm0, 0x360(%rsp)
movaps %xmm0, 0x350(%rsp)
movaps 0x360(%rsp), %xmm0
movaps 0x350(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps 0x10(%rax), %xmm0
movaps %xmm0, 0x60(%rsp)
pmovsxwd 0x60(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x20(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0x50(%rsp)
pmovsxwd 0x50(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, 0x30(%rax)
movq 0x20(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x340(%rsp)
movaps %xmm0, 0x330(%rsp)
movaps 0x340(%rsp), %xmm0
movaps 0x330(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movaps %xmm0, (%rsp)
movq 0x20(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x40(%rsp)
pmovsxwd 0x40(%rsp), %xmm0
movq 0x20(%rsp), %rax
movaps %xmm0, (%rax)
movaps (%rsp), %xmm0
movaps %xmm0, 0x30(%rsp)
pmovsxwd 0x30(%rsp), %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq 0x20(%rsp), %rax
movdqa (%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x320(%rsp)
movl %eax, 0x31c(%rsp)
movdqa 0x320(%rsp), %xmm0
movl 0x31c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, (%rax)
movq 0x20(%rsp), %rax
movdqa 0x10(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x300(%rsp)
movl %eax, 0x2fc(%rsp)
movdqa 0x300(%rsp), %xmm0
movl 0x2fc(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq 0x20(%rsp), %rax
movdqa 0x20(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x2e0(%rsp)
movl %eax, 0x2dc(%rsp)
movdqa 0x2e0(%rsp), %xmm0
movl 0x2dc(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq 0x20(%rsp), %rax
movdqa 0x30(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x2c0(%rsp)
movl %eax, 0x2bc(%rsp)
movdqa 0x2c0(%rsp), %xmm0
movl 0x2bc(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq 0x20(%rsp), %rax
movdqa 0x40(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x2a0(%rsp)
movl %eax, 0x29c(%rsp)
movdqa 0x2a0(%rsp), %xmm0
movl 0x29c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x40(%rax)
movq 0x20(%rsp), %rax
movdqa 0x50(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x280(%rsp)
movl %eax, 0x27c(%rsp)
movdqa 0x280(%rsp), %xmm0
movl 0x27c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x50(%rax)
movq 0x20(%rsp), %rax
movdqa 0x60(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x260(%rsp)
movl %eax, 0x25c(%rsp)
movdqa 0x260(%rsp), %xmm0
movl 0x25c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x60(%rax)
movq 0x20(%rsp), %rax
movdqa 0x70(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x240(%rsp)
movl %eax, 0x23c(%rsp)
movdqa 0x240(%rsp), %xmm0
movl 0x23c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x70(%rax)
movq 0x20(%rsp), %rax
movdqa 0x80(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x220(%rsp)
movl %eax, 0x21c(%rsp)
movdqa 0x220(%rsp), %xmm0
movl 0x21c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x80(%rax)
movq 0x20(%rsp), %rax
movdqa 0x90(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x200(%rsp)
movl %eax, 0x1fc(%rsp)
movdqa 0x200(%rsp), %xmm0
movl 0x1fc(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0x90(%rax)
movq 0x20(%rsp), %rax
movdqa 0xa0(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x1e0(%rsp)
movl %eax, 0x1dc(%rsp)
movdqa 0x1e0(%rsp), %xmm0
movl 0x1dc(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0xa0(%rax)
movq 0x20(%rsp), %rax
movdqa 0xb0(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x1c0(%rsp)
movl %eax, 0x1bc(%rsp)
movdqa 0x1c0(%rsp), %xmm0
movl 0x1bc(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0xb0(%rax)
movq 0x20(%rsp), %rax
movdqa 0xc0(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x1a0(%rsp)
movl %eax, 0x19c(%rsp)
movdqa 0x1a0(%rsp), %xmm0
movl 0x19c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0xc0(%rax)
movq 0x20(%rsp), %rax
movdqa 0xd0(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x180(%rsp)
movl %eax, 0x17c(%rsp)
movdqa 0x180(%rsp), %xmm0
movl 0x17c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0xd0(%rax)
movq 0x20(%rsp), %rax
movdqa 0xe0(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x160(%rsp)
movl %eax, 0x15c(%rsp)
movdqa 0x160(%rsp), %xmm0
movl 0x15c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0xe0(%rax)
movq 0x20(%rsp), %rax
movdqa 0xf0(%rax), %xmm0
movl 0x10(%rsp), %eax
movdqa %xmm0, 0x140(%rsp)
movl %eax, 0x13c(%rsp)
movdqa 0x140(%rsp), %xmm0
movl 0x13c(%rsp), %eax
movd %eax, %xmm1
pslld %xmm1, %xmm0
movq 0x20(%rsp), %rax
movdqa %xmm0, 0xf0(%rax)
addq $0x4b8, %rsp # imm = 0x4B8
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
write_buffer_8x8 | static inline void write_buffer_8x8(const __m128i *res, int32_t *output) {
_mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
_mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
_mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
_mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
_mm_store_si128((__m128i *)(output + 4 * 4), res[4]);
_mm_store_si128((__m128i *)(output + 5 * 4), res[5]);
_mm_store_si128((__m128i *)(output + 6 * 4), res[6]);
_mm_store_si128((__m128i *)(output + 7 * 4), res[7]);
_mm_store_si128((__m128i *)(output + 8 * 4), res[8]);
_mm_store_si128((__m128i *)(output + 9 * 4), res[9]);
_mm_store_si128((__m128i *)(output + 10 * 4), res[10]);
_mm_store_si128((__m128i *)(output + 11 * 4), res[11]);
_mm_store_si128((__m128i *)(output + 12 * 4), res[12]);
_mm_store_si128((__m128i *)(output + 13 * 4), res[13]);
_mm_store_si128((__m128i *)(output + 14 * 4), res[14]);
_mm_store_si128((__m128i *)(output + 15 * 4), res[15]);
} | subq $0x188, %rsp # imm = 0x188
movq %rdi, -0x78(%rsp)
movq %rsi, -0x80(%rsp)
movq -0x80(%rsp), %rax
movq -0x78(%rsp), %rcx
movdqa (%rcx), %xmm0
movq %rax, 0x180(%rsp)
movdqa %xmm0, 0x170(%rsp)
movdqa 0x170(%rsp), %xmm0
movq 0x180(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x10, %rax
movq -0x78(%rsp), %rcx
movdqa 0x10(%rcx), %xmm0
movq %rax, 0x168(%rsp)
movdqa %xmm0, 0x150(%rsp)
movdqa 0x150(%rsp), %xmm0
movq 0x168(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x20, %rax
movq -0x78(%rsp), %rcx
movdqa 0x20(%rcx), %xmm0
movq %rax, 0x148(%rsp)
movdqa %xmm0, 0x130(%rsp)
movdqa 0x130(%rsp), %xmm0
movq 0x148(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x30, %rax
movq -0x78(%rsp), %rcx
movdqa 0x30(%rcx), %xmm0
movq %rax, 0x128(%rsp)
movdqa %xmm0, 0x110(%rsp)
movdqa 0x110(%rsp), %xmm0
movq 0x128(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x40, %rax
movq -0x78(%rsp), %rcx
movdqa 0x40(%rcx), %xmm0
movq %rax, 0x108(%rsp)
movdqa %xmm0, 0xf0(%rsp)
movdqa 0xf0(%rsp), %xmm0
movq 0x108(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x50, %rax
movq -0x78(%rsp), %rcx
movdqa 0x50(%rcx), %xmm0
movq %rax, 0xe8(%rsp)
movdqa %xmm0, 0xd0(%rsp)
movdqa 0xd0(%rsp), %xmm0
movq 0xe8(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x60, %rax
movq -0x78(%rsp), %rcx
movdqa 0x60(%rcx), %xmm0
movq %rax, 0xc8(%rsp)
movdqa %xmm0, 0xb0(%rsp)
movdqa 0xb0(%rsp), %xmm0
movq 0xc8(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x70, %rax
movq -0x78(%rsp), %rcx
movdqa 0x70(%rcx), %xmm0
movq %rax, 0xa8(%rsp)
movdqa %xmm0, 0x90(%rsp)
movdqa 0x90(%rsp), %xmm0
movq 0xa8(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x80, %rax
movq -0x78(%rsp), %rcx
movdqa 0x80(%rcx), %xmm0
movq %rax, 0x88(%rsp)
movdqa %xmm0, 0x70(%rsp)
movdqa 0x70(%rsp), %xmm0
movq 0x88(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0x90, %rax
movq -0x78(%rsp), %rcx
movdqa 0x90(%rcx), %xmm0
movq %rax, 0x68(%rsp)
movdqa %xmm0, 0x50(%rsp)
movdqa 0x50(%rsp), %xmm0
movq 0x68(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0xa0, %rax
movq -0x78(%rsp), %rcx
movdqa 0xa0(%rcx), %xmm0
movq %rax, 0x48(%rsp)
movdqa %xmm0, 0x30(%rsp)
movdqa 0x30(%rsp), %xmm0
movq 0x48(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0xb0, %rax
movq -0x78(%rsp), %rcx
movdqa 0xb0(%rcx), %xmm0
movq %rax, 0x28(%rsp)
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm0
movq 0x28(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0xc0, %rax
movq -0x78(%rsp), %rcx
movdqa 0xc0(%rcx), %xmm0
movq %rax, 0x8(%rsp)
movdqa %xmm0, -0x10(%rsp)
movdqa -0x10(%rsp), %xmm0
movq 0x8(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0xd0, %rax
movq -0x78(%rsp), %rcx
movdqa 0xd0(%rcx), %xmm0
movq %rax, -0x18(%rsp)
movdqa %xmm0, -0x30(%rsp)
movdqa -0x30(%rsp), %xmm0
movq -0x18(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0xe0, %rax
movq -0x78(%rsp), %rcx
movdqa 0xe0(%rcx), %xmm0
movq %rax, -0x38(%rsp)
movdqa %xmm0, -0x50(%rsp)
movdqa -0x50(%rsp), %xmm0
movq -0x38(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x80(%rsp), %rax
addq $0xf0, %rax
movq -0x78(%rsp), %rcx
movdqa 0xf0(%rcx), %xmm0
movq %rax, -0x58(%rsp)
movdqa %xmm0, -0x70(%rsp)
movdqa -0x70(%rsp), %xmm0
movq -0x58(%rsp), %rax
movdqa %xmm0, (%rax)
addq $0x188, %rsp # imm = 0x188
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
fadst16x16_sse4_1 | static void fadst16x16_sse4_1(__m128i *in, __m128i *out, int bit,
const int num_cols) {
const int32_t *cospi = cospi_arr(bit);
const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
const __m128i cospim8 = _mm_set1_epi32(-cospi[8]);
const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
const __m128i cospim40 = _mm_set1_epi32(-cospi[40]);
const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
const __m128i cospim2 = _mm_set1_epi32(-cospi[2]);
const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
const __m128i cospim10 = _mm_set1_epi32(-cospi[10]);
const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
const __m128i cospim18 = _mm_set1_epi32(-cospi[18]);
const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
const __m128i cospim26 = _mm_set1_epi32(-cospi[26]);
const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
const __m128i cospim34 = _mm_set1_epi32(-cospi[34]);
const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
const __m128i cospim42 = _mm_set1_epi32(-cospi[42]);
const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
const __m128i cospim50 = _mm_set1_epi32(-cospi[50]);
const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
const __m128i cospim58 = _mm_set1_epi32(-cospi[58]);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
const __m128i zero = _mm_setzero_si128();
__m128i u[16], v[16], x, y;
int col;
for (col = 0; col < num_cols; ++col) {
// stage 0
// stage 1
u[0] = in[0 * num_cols + col];
u[1] = _mm_sub_epi32(zero, in[15 * num_cols + col]);
u[2] = _mm_sub_epi32(zero, in[7 * num_cols + col]);
u[3] = in[8 * num_cols + col];
u[4] = _mm_sub_epi32(zero, in[3 * num_cols + col]);
u[5] = in[12 * num_cols + col];
u[6] = in[4 * num_cols + col];
u[7] = _mm_sub_epi32(zero, in[11 * num_cols + col]);
u[8] = _mm_sub_epi32(zero, in[1 * num_cols + col]);
u[9] = in[14 * num_cols + col];
u[10] = in[6 * num_cols + col];
u[11] = _mm_sub_epi32(zero, in[9 * num_cols + col]);
u[12] = in[2 * num_cols + col];
u[13] = _mm_sub_epi32(zero, in[13 * num_cols + col]);
u[14] = _mm_sub_epi32(zero, in[5 * num_cols + col]);
u[15] = in[10 * num_cols + col];
// stage 2
v[0] = u[0];
v[1] = u[1];
x = _mm_mullo_epi32(u[2], cospi32);
y = _mm_mullo_epi32(u[3], cospi32);
v[2] = _mm_add_epi32(x, y);
v[2] = _mm_add_epi32(v[2], rnding);
v[2] = _mm_srai_epi32(v[2], bit);
v[3] = _mm_sub_epi32(x, y);
v[3] = _mm_add_epi32(v[3], rnding);
v[3] = _mm_srai_epi32(v[3], bit);
v[4] = u[4];
v[5] = u[5];
x = _mm_mullo_epi32(u[6], cospi32);
y = _mm_mullo_epi32(u[7], cospi32);
v[6] = _mm_add_epi32(x, y);
v[6] = _mm_add_epi32(v[6], rnding);
v[6] = _mm_srai_epi32(v[6], bit);
v[7] = _mm_sub_epi32(x, y);
v[7] = _mm_add_epi32(v[7], rnding);
v[7] = _mm_srai_epi32(v[7], bit);
v[8] = u[8];
v[9] = u[9];
x = _mm_mullo_epi32(u[10], cospi32);
y = _mm_mullo_epi32(u[11], cospi32);
v[10] = _mm_add_epi32(x, y);
v[10] = _mm_add_epi32(v[10], rnding);
v[10] = _mm_srai_epi32(v[10], bit);
v[11] = _mm_sub_epi32(x, y);
v[11] = _mm_add_epi32(v[11], rnding);
v[11] = _mm_srai_epi32(v[11], bit);
v[12] = u[12];
v[13] = u[13];
x = _mm_mullo_epi32(u[14], cospi32);
y = _mm_mullo_epi32(u[15], cospi32);
v[14] = _mm_add_epi32(x, y);
v[14] = _mm_add_epi32(v[14], rnding);
v[14] = _mm_srai_epi32(v[14], bit);
v[15] = _mm_sub_epi32(x, y);
v[15] = _mm_add_epi32(v[15], rnding);
v[15] = _mm_srai_epi32(v[15], bit);
// stage 3
u[0] = _mm_add_epi32(v[0], v[2]);
u[1] = _mm_add_epi32(v[1], v[3]);
u[2] = _mm_sub_epi32(v[0], v[2]);
u[3] = _mm_sub_epi32(v[1], v[3]);
u[4] = _mm_add_epi32(v[4], v[6]);
u[5] = _mm_add_epi32(v[5], v[7]);
u[6] = _mm_sub_epi32(v[4], v[6]);
u[7] = _mm_sub_epi32(v[5], v[7]);
u[8] = _mm_add_epi32(v[8], v[10]);
u[9] = _mm_add_epi32(v[9], v[11]);
u[10] = _mm_sub_epi32(v[8], v[10]);
u[11] = _mm_sub_epi32(v[9], v[11]);
u[12] = _mm_add_epi32(v[12], v[14]);
u[13] = _mm_add_epi32(v[13], v[15]);
u[14] = _mm_sub_epi32(v[12], v[14]);
u[15] = _mm_sub_epi32(v[13], v[15]);
// stage 4
v[0] = u[0];
v[1] = u[1];
v[2] = u[2];
v[3] = u[3];
v[4] = half_btf_sse4_1(&cospi16, &u[4], &cospi48, &u[5], &rnding, bit);
v[5] = half_btf_sse4_1(&cospi48, &u[4], &cospim16, &u[5], &rnding, bit);
v[6] = half_btf_sse4_1(&cospim48, &u[6], &cospi16, &u[7], &rnding, bit);
v[7] = half_btf_sse4_1(&cospi16, &u[6], &cospi48, &u[7], &rnding, bit);
v[8] = u[8];
v[9] = u[9];
v[10] = u[10];
v[11] = u[11];
v[12] = half_btf_sse4_1(&cospi16, &u[12], &cospi48, &u[13], &rnding, bit);
v[13] = half_btf_sse4_1(&cospi48, &u[12], &cospim16, &u[13], &rnding, bit);
v[14] = half_btf_sse4_1(&cospim48, &u[14], &cospi16, &u[15], &rnding, bit);
v[15] = half_btf_sse4_1(&cospi16, &u[14], &cospi48, &u[15], &rnding, bit);
// stage 5
u[0] = _mm_add_epi32(v[0], v[4]);
u[1] = _mm_add_epi32(v[1], v[5]);
u[2] = _mm_add_epi32(v[2], v[6]);
u[3] = _mm_add_epi32(v[3], v[7]);
u[4] = _mm_sub_epi32(v[0], v[4]);
u[5] = _mm_sub_epi32(v[1], v[5]);
u[6] = _mm_sub_epi32(v[2], v[6]);
u[7] = _mm_sub_epi32(v[3], v[7]);
u[8] = _mm_add_epi32(v[8], v[12]);
u[9] = _mm_add_epi32(v[9], v[13]);
u[10] = _mm_add_epi32(v[10], v[14]);
u[11] = _mm_add_epi32(v[11], v[15]);
u[12] = _mm_sub_epi32(v[8], v[12]);
u[13] = _mm_sub_epi32(v[9], v[13]);
u[14] = _mm_sub_epi32(v[10], v[14]);
u[15] = _mm_sub_epi32(v[11], v[15]);
// stage 6
v[0] = u[0];
v[1] = u[1];
v[2] = u[2];
v[3] = u[3];
v[4] = u[4];
v[5] = u[5];
v[6] = u[6];
v[7] = u[7];
v[8] = half_btf_sse4_1(&cospi8, &u[8], &cospi56, &u[9], &rnding, bit);
v[9] = half_btf_sse4_1(&cospi56, &u[8], &cospim8, &u[9], &rnding, bit);
v[10] = half_btf_sse4_1(&cospi40, &u[10], &cospi24, &u[11], &rnding, bit);
v[11] = half_btf_sse4_1(&cospi24, &u[10], &cospim40, &u[11], &rnding, bit);
v[12] = half_btf_sse4_1(&cospim56, &u[12], &cospi8, &u[13], &rnding, bit);
v[13] = half_btf_sse4_1(&cospi8, &u[12], &cospi56, &u[13], &rnding, bit);
v[14] = half_btf_sse4_1(&cospim24, &u[14], &cospi40, &u[15], &rnding, bit);
v[15] = half_btf_sse4_1(&cospi40, &u[14], &cospi24, &u[15], &rnding, bit);
// stage 7
u[0] = _mm_add_epi32(v[0], v[8]);
u[1] = _mm_add_epi32(v[1], v[9]);
u[2] = _mm_add_epi32(v[2], v[10]);
u[3] = _mm_add_epi32(v[3], v[11]);
u[4] = _mm_add_epi32(v[4], v[12]);
u[5] = _mm_add_epi32(v[5], v[13]);
u[6] = _mm_add_epi32(v[6], v[14]);
u[7] = _mm_add_epi32(v[7], v[15]);
u[8] = _mm_sub_epi32(v[0], v[8]);
u[9] = _mm_sub_epi32(v[1], v[9]);
u[10] = _mm_sub_epi32(v[2], v[10]);
u[11] = _mm_sub_epi32(v[3], v[11]);
u[12] = _mm_sub_epi32(v[4], v[12]);
u[13] = _mm_sub_epi32(v[5], v[13]);
u[14] = _mm_sub_epi32(v[6], v[14]);
u[15] = _mm_sub_epi32(v[7], v[15]);
// stage 8
v[0] = half_btf_sse4_1(&cospi2, &u[0], &cospi62, &u[1], &rnding, bit);
v[1] = half_btf_sse4_1(&cospi62, &u[0], &cospim2, &u[1], &rnding, bit);
v[2] = half_btf_sse4_1(&cospi10, &u[2], &cospi54, &u[3], &rnding, bit);
v[3] = half_btf_sse4_1(&cospi54, &u[2], &cospim10, &u[3], &rnding, bit);
v[4] = half_btf_sse4_1(&cospi18, &u[4], &cospi46, &u[5], &rnding, bit);
v[5] = half_btf_sse4_1(&cospi46, &u[4], &cospim18, &u[5], &rnding, bit);
v[6] = half_btf_sse4_1(&cospi26, &u[6], &cospi38, &u[7], &rnding, bit);
v[7] = half_btf_sse4_1(&cospi38, &u[6], &cospim26, &u[7], &rnding, bit);
v[8] = half_btf_sse4_1(&cospi34, &u[8], &cospi30, &u[9], &rnding, bit);
v[9] = half_btf_sse4_1(&cospi30, &u[8], &cospim34, &u[9], &rnding, bit);
v[10] = half_btf_sse4_1(&cospi42, &u[10], &cospi22, &u[11], &rnding, bit);
v[11] = half_btf_sse4_1(&cospi22, &u[10], &cospim42, &u[11], &rnding, bit);
v[12] = half_btf_sse4_1(&cospi50, &u[12], &cospi14, &u[13], &rnding, bit);
v[13] = half_btf_sse4_1(&cospi14, &u[12], &cospim50, &u[13], &rnding, bit);
v[14] = half_btf_sse4_1(&cospi58, &u[14], &cospi6, &u[15], &rnding, bit);
v[15] = half_btf_sse4_1(&cospi6, &u[14], &cospim58, &u[15], &rnding, bit);
// stage 9
out[0 * num_cols + col] = v[1];
out[1 * num_cols + col] = v[14];
out[2 * num_cols + col] = v[3];
out[3 * num_cols + col] = v[12];
out[4 * num_cols + col] = v[5];
out[5 * num_cols + col] = v[10];
out[6 * num_cols + col] = v[7];
out[7 * num_cols + col] = v[8];
out[8 * num_cols + col] = v[9];
out[9 * num_cols + col] = v[6];
out[10 * num_cols + col] = v[11];
out[11 * num_cols + col] = v[4];
out[12 * num_cols + col] = v[13];
out[13 * num_cols + col] = v[2];
out[14 * num_cols + col] = v[15];
out[15 * num_cols + col] = v[0];
}
} | subq $0x1538, %rsp # imm = 0x1538
movq %rdi, 0x4b8(%rsp)
movq %rsi, 0x4b0(%rsp)
movl %edx, 0x4ac(%rsp)
movl %ecx, 0x4a8(%rsp)
movl 0x4ac(%rsp), %edi
callq 0x804040
movq %rax, 0x4a0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x80(%rax), %eax
movl %eax, 0xf5c(%rsp)
movl 0xf5c(%rsp), %eax
movl %eax, 0x107c(%rsp)
movl %eax, 0x1078(%rsp)
movl %eax, 0x1074(%rsp)
movl %eax, 0x1070(%rsp)
movl 0x1074(%rsp), %edx
movl 0x1078(%rsp), %ecx
movl 0x107c(%rsp), %eax
movd 0x1070(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1060(%rsp)
movaps 0x1060(%rsp), %xmm0
movaps %xmm0, 0x490(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xc0(%rax), %eax
movl %eax, 0xf58(%rsp)
movl 0xf58(%rsp), %eax
movl %eax, 0x109c(%rsp)
movl %eax, 0x1098(%rsp)
movl %eax, 0x1094(%rsp)
movl %eax, 0x1090(%rsp)
movl 0x1094(%rsp), %edx
movl 0x1098(%rsp), %ecx
movl 0x109c(%rsp), %eax
movd 0x1090(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1080(%rsp)
movaps 0x1080(%rsp), %xmm0
movaps %xmm0, 0x480(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x40(%rax), %eax
movl %eax, 0xf54(%rsp)
movl 0xf54(%rsp), %eax
movl %eax, 0x10bc(%rsp)
movl %eax, 0x10b8(%rsp)
movl %eax, 0x10b4(%rsp)
movl %eax, 0x10b0(%rsp)
movl 0x10b4(%rsp), %edx
movl 0x10b8(%rsp), %ecx
movl 0x10bc(%rsp), %eax
movd 0x10b0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x10a0(%rsp)
movaps 0x10a0(%rsp), %xmm0
movaps %xmm0, 0x470(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x40(%rax), %eax
negl %eax
movl %eax, 0xf50(%rsp)
movl 0xf50(%rsp), %eax
movl %eax, 0x10dc(%rsp)
movl %eax, 0x10d8(%rsp)
movl %eax, 0x10d4(%rsp)
movl %eax, 0x10d0(%rsp)
movl 0x10d4(%rsp), %edx
movl 0x10d8(%rsp), %ecx
movl 0x10dc(%rsp), %eax
movd 0x10d0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x10c0(%rsp)
movaps 0x10c0(%rsp), %xmm0
movaps %xmm0, 0x460(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xc0(%rax), %eax
negl %eax
movl %eax, 0xf4c(%rsp)
movl 0xf4c(%rsp), %eax
movl %eax, 0x10fc(%rsp)
movl %eax, 0x10f8(%rsp)
movl %eax, 0x10f4(%rsp)
movl %eax, 0x10f0(%rsp)
movl 0x10f4(%rsp), %edx
movl 0x10f8(%rsp), %ecx
movl 0x10fc(%rsp), %eax
movd 0x10f0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x10e0(%rsp)
movaps 0x10e0(%rsp), %xmm0
movaps %xmm0, 0x450(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x20(%rax), %eax
movl %eax, 0xf48(%rsp)
movl 0xf48(%rsp), %eax
movl %eax, 0x111c(%rsp)
movl %eax, 0x1118(%rsp)
movl %eax, 0x1114(%rsp)
movl %eax, 0x1110(%rsp)
movl 0x1114(%rsp), %edx
movl 0x1118(%rsp), %ecx
movl 0x111c(%rsp), %eax
movd 0x1110(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1100(%rsp)
movaps 0x1100(%rsp), %xmm0
movaps %xmm0, 0x440(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xe0(%rax), %eax
movl %eax, 0xf44(%rsp)
movl 0xf44(%rsp), %eax
movl %eax, 0x113c(%rsp)
movl %eax, 0x1138(%rsp)
movl %eax, 0x1134(%rsp)
movl %eax, 0x1130(%rsp)
movl 0x1134(%rsp), %edx
movl 0x1138(%rsp), %ecx
movl 0x113c(%rsp), %eax
movd 0x1130(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1120(%rsp)
movaps 0x1120(%rsp), %xmm0
movaps %xmm0, 0x430(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xe0(%rax), %eax
negl %eax
movl %eax, 0xf40(%rsp)
movl 0xf40(%rsp), %eax
movl %eax, 0x115c(%rsp)
movl %eax, 0x1158(%rsp)
movl %eax, 0x1154(%rsp)
movl %eax, 0x1150(%rsp)
movl 0x1154(%rsp), %edx
movl 0x1158(%rsp), %ecx
movl 0x115c(%rsp), %eax
movd 0x1150(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1140(%rsp)
movaps 0x1140(%rsp), %xmm0
movaps %xmm0, 0x420(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x20(%rax), %eax
negl %eax
movl %eax, 0xf3c(%rsp)
movl 0xf3c(%rsp), %eax
movl %eax, 0x117c(%rsp)
movl %eax, 0x1178(%rsp)
movl %eax, 0x1174(%rsp)
movl %eax, 0x1170(%rsp)
movl 0x1174(%rsp), %edx
movl 0x1178(%rsp), %ecx
movl 0x117c(%rsp), %eax
movd 0x1170(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1160(%rsp)
movaps 0x1160(%rsp), %xmm0
movaps %xmm0, 0x410(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x60(%rax), %eax
movl %eax, 0xf38(%rsp)
movl 0xf38(%rsp), %eax
movl %eax, 0x119c(%rsp)
movl %eax, 0x1198(%rsp)
movl %eax, 0x1194(%rsp)
movl %eax, 0x1190(%rsp)
movl 0x1194(%rsp), %edx
movl 0x1198(%rsp), %ecx
movl 0x119c(%rsp), %eax
movd 0x1190(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1180(%rsp)
movaps 0x1180(%rsp), %xmm0
movaps %xmm0, 0x400(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x60(%rax), %eax
negl %eax
movl %eax, 0xf34(%rsp)
movl 0xf34(%rsp), %eax
movl %eax, 0x11bc(%rsp)
movl %eax, 0x11b8(%rsp)
movl %eax, 0x11b4(%rsp)
movl %eax, 0x11b0(%rsp)
movl 0x11b4(%rsp), %edx
movl 0x11b8(%rsp), %ecx
movl 0x11bc(%rsp), %eax
movd 0x11b0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x11a0(%rsp)
movaps 0x11a0(%rsp), %xmm0
movaps %xmm0, 0x3f0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xa0(%rax), %eax
negl %eax
movl %eax, 0xf30(%rsp)
movl 0xf30(%rsp), %eax
movl %eax, 0x11dc(%rsp)
movl %eax, 0x11d8(%rsp)
movl %eax, 0x11d4(%rsp)
movl %eax, 0x11d0(%rsp)
movl 0x11d4(%rsp), %edx
movl 0x11d8(%rsp), %ecx
movl 0x11dc(%rsp), %eax
movd 0x11d0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x11c0(%rsp)
movaps 0x11c0(%rsp), %xmm0
movaps %xmm0, 0x3e0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xa0(%rax), %eax
movl %eax, 0xf2c(%rsp)
movl 0xf2c(%rsp), %eax
movl %eax, 0x11fc(%rsp)
movl %eax, 0x11f8(%rsp)
movl %eax, 0x11f4(%rsp)
movl %eax, 0x11f0(%rsp)
movl 0x11f4(%rsp), %edx
movl 0x11f8(%rsp), %ecx
movl 0x11fc(%rsp), %eax
movd 0x11f0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x11e0(%rsp)
movaps 0x11e0(%rsp), %xmm0
movaps %xmm0, 0x3d0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x8(%rax), %eax
movl %eax, 0xf28(%rsp)
movl 0xf28(%rsp), %eax
movl %eax, 0x121c(%rsp)
movl %eax, 0x1218(%rsp)
movl %eax, 0x1214(%rsp)
movl %eax, 0x1210(%rsp)
movl 0x1214(%rsp), %edx
movl 0x1218(%rsp), %ecx
movl 0x121c(%rsp), %eax
movd 0x1210(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1200(%rsp)
movaps 0x1200(%rsp), %xmm0
movaps %xmm0, 0x3c0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xf8(%rax), %eax
movl %eax, 0xf24(%rsp)
movl 0xf24(%rsp), %eax
movl %eax, 0x123c(%rsp)
movl %eax, 0x1238(%rsp)
movl %eax, 0x1234(%rsp)
movl %eax, 0x1230(%rsp)
movl 0x1234(%rsp), %edx
movl 0x1238(%rsp), %ecx
movl 0x123c(%rsp), %eax
movd 0x1230(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1220(%rsp)
movaps 0x1220(%rsp), %xmm0
movaps %xmm0, 0x3b0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x8(%rax), %eax
negl %eax
movl %eax, 0xf20(%rsp)
movl 0xf20(%rsp), %eax
movl %eax, 0x125c(%rsp)
movl %eax, 0x1258(%rsp)
movl %eax, 0x1254(%rsp)
movl %eax, 0x1250(%rsp)
movl 0x1254(%rsp), %edx
movl 0x1258(%rsp), %ecx
movl 0x125c(%rsp), %eax
movd 0x1250(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1240(%rsp)
movaps 0x1240(%rsp), %xmm0
movaps %xmm0, 0x3a0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x28(%rax), %eax
movl %eax, 0xf1c(%rsp)
movl 0xf1c(%rsp), %eax
movl %eax, 0x127c(%rsp)
movl %eax, 0x1278(%rsp)
movl %eax, 0x1274(%rsp)
movl %eax, 0x1270(%rsp)
movl 0x1274(%rsp), %edx
movl 0x1278(%rsp), %ecx
movl 0x127c(%rsp), %eax
movd 0x1270(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1260(%rsp)
movaps 0x1260(%rsp), %xmm0
movaps %xmm0, 0x390(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xd8(%rax), %eax
movl %eax, 0xf18(%rsp)
movl 0xf18(%rsp), %eax
movl %eax, 0x129c(%rsp)
movl %eax, 0x1298(%rsp)
movl %eax, 0x1294(%rsp)
movl %eax, 0x1290(%rsp)
movl 0x1294(%rsp), %edx
movl 0x1298(%rsp), %ecx
movl 0x129c(%rsp), %eax
movd 0x1290(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1280(%rsp)
movaps 0x1280(%rsp), %xmm0
movaps %xmm0, 0x380(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x28(%rax), %eax
negl %eax
movl %eax, 0xf14(%rsp)
movl 0xf14(%rsp), %eax
movl %eax, 0x12bc(%rsp)
movl %eax, 0x12b8(%rsp)
movl %eax, 0x12b4(%rsp)
movl %eax, 0x12b0(%rsp)
movl 0x12b4(%rsp), %edx
movl 0x12b8(%rsp), %ecx
movl 0x12bc(%rsp), %eax
movd 0x12b0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x12a0(%rsp)
movaps 0x12a0(%rsp), %xmm0
movaps %xmm0, 0x370(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x48(%rax), %eax
movl %eax, 0xf10(%rsp)
movl 0xf10(%rsp), %eax
movl %eax, 0x12dc(%rsp)
movl %eax, 0x12d8(%rsp)
movl %eax, 0x12d4(%rsp)
movl %eax, 0x12d0(%rsp)
movl 0x12d4(%rsp), %edx
movl 0x12d8(%rsp), %ecx
movl 0x12dc(%rsp), %eax
movd 0x12d0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x12c0(%rsp)
movaps 0x12c0(%rsp), %xmm0
movaps %xmm0, 0x360(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xb8(%rax), %eax
movl %eax, 0xf0c(%rsp)
movl 0xf0c(%rsp), %eax
movl %eax, 0x12fc(%rsp)
movl %eax, 0x12f8(%rsp)
movl %eax, 0x12f4(%rsp)
movl %eax, 0x12f0(%rsp)
movl 0x12f4(%rsp), %edx
movl 0x12f8(%rsp), %ecx
movl 0x12fc(%rsp), %eax
movd 0x12f0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x12e0(%rsp)
movaps 0x12e0(%rsp), %xmm0
movaps %xmm0, 0x350(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x48(%rax), %eax
negl %eax
movl %eax, 0xf08(%rsp)
movl 0xf08(%rsp), %eax
movl %eax, 0x131c(%rsp)
movl %eax, 0x1318(%rsp)
movl %eax, 0x1314(%rsp)
movl %eax, 0x1310(%rsp)
movl 0x1314(%rsp), %edx
movl 0x1318(%rsp), %ecx
movl 0x131c(%rsp), %eax
movd 0x1310(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1300(%rsp)
movaps 0x1300(%rsp), %xmm0
movaps %xmm0, 0x340(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x68(%rax), %eax
movl %eax, 0xf04(%rsp)
movl 0xf04(%rsp), %eax
movl %eax, 0x133c(%rsp)
movl %eax, 0x1338(%rsp)
movl %eax, 0x1334(%rsp)
movl %eax, 0x1330(%rsp)
movl 0x1334(%rsp), %edx
movl 0x1338(%rsp), %ecx
movl 0x133c(%rsp), %eax
movd 0x1330(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1320(%rsp)
movaps 0x1320(%rsp), %xmm0
movaps %xmm0, 0x330(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x98(%rax), %eax
movl %eax, 0xf00(%rsp)
movl 0xf00(%rsp), %eax
movl %eax, 0x135c(%rsp)
movl %eax, 0x1358(%rsp)
movl %eax, 0x1354(%rsp)
movl %eax, 0x1350(%rsp)
movl 0x1354(%rsp), %edx
movl 0x1358(%rsp), %ecx
movl 0x135c(%rsp), %eax
movd 0x1350(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1340(%rsp)
movaps 0x1340(%rsp), %xmm0
movaps %xmm0, 0x320(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x68(%rax), %eax
negl %eax
movl %eax, 0xefc(%rsp)
movl 0xefc(%rsp), %eax
movl %eax, 0x137c(%rsp)
movl %eax, 0x1378(%rsp)
movl %eax, 0x1374(%rsp)
movl %eax, 0x1370(%rsp)
movl 0x1374(%rsp), %edx
movl 0x1378(%rsp), %ecx
movl 0x137c(%rsp), %eax
movd 0x1370(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1360(%rsp)
movaps 0x1360(%rsp), %xmm0
movaps %xmm0, 0x310(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x88(%rax), %eax
movl %eax, 0xef8(%rsp)
movl 0xef8(%rsp), %eax
movl %eax, 0x139c(%rsp)
movl %eax, 0x1398(%rsp)
movl %eax, 0x1394(%rsp)
movl %eax, 0x1390(%rsp)
movl 0x1394(%rsp), %edx
movl 0x1398(%rsp), %ecx
movl 0x139c(%rsp), %eax
movd 0x1390(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1380(%rsp)
movaps 0x1380(%rsp), %xmm0
movaps %xmm0, 0x300(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x78(%rax), %eax
movl %eax, 0xef4(%rsp)
movl 0xef4(%rsp), %eax
movl %eax, 0x13bc(%rsp)
movl %eax, 0x13b8(%rsp)
movl %eax, 0x13b4(%rsp)
movl %eax, 0x13b0(%rsp)
movl 0x13b4(%rsp), %edx
movl 0x13b8(%rsp), %ecx
movl 0x13bc(%rsp), %eax
movd 0x13b0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x13a0(%rsp)
movaps 0x13a0(%rsp), %xmm0
movaps %xmm0, 0x2f0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x88(%rax), %eax
negl %eax
movl %eax, 0xef0(%rsp)
movl 0xef0(%rsp), %eax
movl %eax, 0x13dc(%rsp)
movl %eax, 0x13d8(%rsp)
movl %eax, 0x13d4(%rsp)
movl %eax, 0x13d0(%rsp)
movl 0x13d4(%rsp), %edx
movl 0x13d8(%rsp), %ecx
movl 0x13dc(%rsp), %eax
movd 0x13d0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x13c0(%rsp)
movaps 0x13c0(%rsp), %xmm0
movaps %xmm0, 0x2e0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xa8(%rax), %eax
movl %eax, 0xeec(%rsp)
movl 0xeec(%rsp), %eax
movl %eax, 0x13fc(%rsp)
movl %eax, 0x13f8(%rsp)
movl %eax, 0x13f4(%rsp)
movl %eax, 0x13f0(%rsp)
movl 0x13f4(%rsp), %edx
movl 0x13f8(%rsp), %ecx
movl 0x13fc(%rsp), %eax
movd 0x13f0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x13e0(%rsp)
movaps 0x13e0(%rsp), %xmm0
movaps %xmm0, 0x2d0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x58(%rax), %eax
movl %eax, 0xee8(%rsp)
movl 0xee8(%rsp), %eax
movl %eax, 0x141c(%rsp)
movl %eax, 0x1418(%rsp)
movl %eax, 0x1414(%rsp)
movl %eax, 0x1410(%rsp)
movl 0x1414(%rsp), %edx
movl 0x1418(%rsp), %ecx
movl 0x141c(%rsp), %eax
movd 0x1410(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1400(%rsp)
movaps 0x1400(%rsp), %xmm0
movaps %xmm0, 0x2c0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xa8(%rax), %eax
negl %eax
movl %eax, 0xee4(%rsp)
movl 0xee4(%rsp), %eax
movl %eax, 0x143c(%rsp)
movl %eax, 0x1438(%rsp)
movl %eax, 0x1434(%rsp)
movl %eax, 0x1430(%rsp)
movl 0x1434(%rsp), %edx
movl 0x1438(%rsp), %ecx
movl 0x143c(%rsp), %eax
movd 0x1430(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1420(%rsp)
movaps 0x1420(%rsp), %xmm0
movaps %xmm0, 0x2b0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xc8(%rax), %eax
movl %eax, 0xee0(%rsp)
movl 0xee0(%rsp), %eax
movl %eax, 0x145c(%rsp)
movl %eax, 0x1458(%rsp)
movl %eax, 0x1454(%rsp)
movl %eax, 0x1450(%rsp)
movl 0x1454(%rsp), %edx
movl 0x1458(%rsp), %ecx
movl 0x145c(%rsp), %eax
movd 0x1450(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1440(%rsp)
movaps 0x1440(%rsp), %xmm0
movaps %xmm0, 0x2a0(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x38(%rax), %eax
movl %eax, 0xedc(%rsp)
movl 0xedc(%rsp), %eax
movl %eax, 0x147c(%rsp)
movl %eax, 0x1478(%rsp)
movl %eax, 0x1474(%rsp)
movl %eax, 0x1470(%rsp)
movl 0x1474(%rsp), %edx
movl 0x1478(%rsp), %ecx
movl 0x147c(%rsp), %eax
movd 0x1470(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1460(%rsp)
movaps 0x1460(%rsp), %xmm0
movaps %xmm0, 0x290(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xc8(%rax), %eax
negl %eax
movl %eax, 0xed8(%rsp)
movl 0xed8(%rsp), %eax
movl %eax, 0x149c(%rsp)
movl %eax, 0x1498(%rsp)
movl %eax, 0x1494(%rsp)
movl %eax, 0x1490(%rsp)
movl 0x1494(%rsp), %edx
movl 0x1498(%rsp), %ecx
movl 0x149c(%rsp), %eax
movd 0x1490(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1480(%rsp)
movaps 0x1480(%rsp), %xmm0
movaps %xmm0, 0x280(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xe8(%rax), %eax
movl %eax, 0xed4(%rsp)
movl 0xed4(%rsp), %eax
movl %eax, 0x14bc(%rsp)
movl %eax, 0x14b8(%rsp)
movl %eax, 0x14b4(%rsp)
movl %eax, 0x14b0(%rsp)
movl 0x14b4(%rsp), %edx
movl 0x14b8(%rsp), %ecx
movl 0x14bc(%rsp), %eax
movd 0x14b0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x14a0(%rsp)
movaps 0x14a0(%rsp), %xmm0
movaps %xmm0, 0x270(%rsp)
movq 0x4a0(%rsp), %rax
movl 0x18(%rax), %eax
movl %eax, 0xed0(%rsp)
movl 0xed0(%rsp), %eax
movl %eax, 0x14dc(%rsp)
movl %eax, 0x14d8(%rsp)
movl %eax, 0x14d4(%rsp)
movl %eax, 0x14d0(%rsp)
movl 0x14d4(%rsp), %edx
movl 0x14d8(%rsp), %ecx
movl 0x14dc(%rsp), %eax
movd 0x14d0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x14c0(%rsp)
movaps 0x14c0(%rsp), %xmm0
movaps %xmm0, 0x260(%rsp)
movq 0x4a0(%rsp), %rax
movl 0xe8(%rax), %eax
negl %eax
movl %eax, 0xecc(%rsp)
movl 0xecc(%rsp), %eax
movl %eax, 0x14fc(%rsp)
movl %eax, 0x14f8(%rsp)
movl %eax, 0x14f4(%rsp)
movl %eax, 0x14f0(%rsp)
movl 0x14f4(%rsp), %edx
movl 0x14f8(%rsp), %ecx
movl 0x14fc(%rsp), %eax
movd 0x14f0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x14e0(%rsp)
movaps 0x14e0(%rsp), %xmm0
movaps %xmm0, 0x250(%rsp)
movb 0x4ac(%rsp), %cl
decb %cl
movl $0x1, %eax
shll %cl, %eax
movl %eax, 0xec8(%rsp)
movl 0xec8(%rsp), %eax
movl %eax, 0x151c(%rsp)
movl %eax, 0x1518(%rsp)
movl %eax, 0x1514(%rsp)
movl %eax, 0x1510(%rsp)
movl 0x1514(%rsp), %edx
movl 0x1518(%rsp), %ecx
movl 0x151c(%rsp), %eax
movd 0x1510(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x1500(%rsp)
movaps 0x1500(%rsp), %xmm0
movaps %xmm0, 0x240(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x1520(%rsp)
movdqa 0x1520(%rsp), %xmm0
movdqa %xmm0, 0x230(%rsp)
movl $0x0, 0xc(%rsp)
movl 0xc(%rsp), %eax
cmpl 0x4a8(%rsp), %eax
jge 0x800870
movq 0x4b8(%rsp), %rax
imull $0x0, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x130(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0xf, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xdb0(%rsp)
movdqa %xmm0, 0xda0(%rsp)
movdqa 0xdb0(%rsp), %xmm0
movdqa 0xda0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x140(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0x7, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xd90(%rsp)
movdqa %xmm0, 0xd80(%rsp)
movdqa 0xd90(%rsp), %xmm0
movdqa 0xd80(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x150(%rsp)
movq 0x4b8(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll $0x3, %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x160(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0x3, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xd70(%rsp)
movdqa %xmm0, 0xd60(%rsp)
movdqa 0xd70(%rsp), %xmm0
movdqa 0xd60(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x170(%rsp)
movq 0x4b8(%rsp), %rax
imull $0xc, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x180(%rsp)
movq 0x4b8(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll $0x2, %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x190(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0xb, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xd50(%rsp)
movdqa %xmm0, 0xd40(%rsp)
movdqa 0xd50(%rsp), %xmm0
movdqa 0xd40(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1a0(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll $0x0, %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xd30(%rsp)
movdqa %xmm0, 0xd20(%rsp)
movdqa 0xd30(%rsp), %xmm0
movdqa 0xd20(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1b0(%rsp)
movq 0x4b8(%rsp), %rax
imull $0xe, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movq 0x4b8(%rsp), %rax
imull $0x6, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x1d0(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0x9, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xd10(%rsp)
movdqa %xmm0, 0xd00(%rsp)
movdqa 0xd10(%rsp), %xmm0
movdqa 0xd00(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1e0(%rsp)
movq 0x4b8(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x1f0(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0xd, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xcf0(%rsp)
movdqa %xmm0, 0xce0(%rsp)
movdqa 0xcf0(%rsp), %xmm0
movdqa 0xce0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x200(%rsp)
movdqa 0x230(%rsp), %xmm1
movq 0x4b8(%rsp), %rax
imull $0x5, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0xcd0(%rsp)
movdqa %xmm0, 0xcc0(%rsp)
movdqa 0xcd0(%rsp), %xmm0
movdqa 0xcc0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x210(%rsp)
movq 0x4b8(%rsp), %rax
imull $0xa, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x220(%rsp)
movdqa 0x130(%rsp), %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0x140(%rsp), %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0x150(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0x1050(%rsp)
movdqa %xmm0, 0x1040(%rsp)
movdqa 0x1050(%rsp), %xmm0
movdqa 0x1040(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x160(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0x1030(%rsp)
movdqa %xmm0, 0x1020(%rsp)
movdqa 0x1030(%rsp), %xmm0
movdqa 0x1020(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x930(%rsp)
movdqa %xmm0, 0x920(%rsp)
movdqa 0x930(%rsp), %xmm0
movdqa 0x920(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x910(%rsp)
movdqa %xmm0, 0x900(%rsp)
movdqa 0x910(%rsp), %xmm0
movdqa 0x900(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x50(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xeb0(%rsp)
movl %eax, 0xeac(%rsp)
movdqa 0xeb0(%rsp), %xmm0
movl 0xeac(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0xcb0(%rsp)
movdqa %xmm0, 0xca0(%rsp)
movdqa 0xcb0(%rsp), %xmm0
movdqa 0xca0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x8f0(%rsp)
movdqa %xmm0, 0x8e0(%rsp)
movdqa 0x8f0(%rsp), %xmm0
movdqa 0x8e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x60(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xe90(%rsp)
movl %eax, 0xe8c(%rsp)
movdqa 0xe90(%rsp), %xmm0
movl 0xe8c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x170(%rsp), %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x180(%rsp), %xmm0
movdqa %xmm0, 0x80(%rsp)
movdqa 0x190(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0x1010(%rsp)
movdqa %xmm0, 0x1000(%rsp)
movdqa 0x1010(%rsp), %xmm0
movdqa 0x1000(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x1a0(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0xff0(%rsp)
movdqa %xmm0, 0xfe0(%rsp)
movdqa 0xff0(%rsp), %xmm0
movdqa 0xfe0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x8d0(%rsp)
movdqa %xmm0, 0x8c0(%rsp)
movdqa 0x8d0(%rsp), %xmm0
movdqa 0x8c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x8b0(%rsp)
movdqa %xmm0, 0x8a0(%rsp)
movdqa 0x8b0(%rsp), %xmm0
movdqa 0x8a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x90(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xe70(%rsp)
movl %eax, 0xe6c(%rsp)
movdqa 0xe70(%rsp), %xmm0
movl 0xe6c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0xc90(%rsp)
movdqa %xmm0, 0xc80(%rsp)
movdqa 0xc90(%rsp), %xmm0
movdqa 0xc80(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0xa0(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x890(%rsp)
movdqa %xmm0, 0x880(%rsp)
movdqa 0x890(%rsp), %xmm0
movdqa 0x880(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0xa0(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xe50(%rsp)
movl %eax, 0xe4c(%rsp)
movdqa 0xe50(%rsp), %xmm0
movl 0xe4c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0x1b0(%rsp), %xmm0
movdqa %xmm0, 0xb0(%rsp)
movdqa 0x1c0(%rsp), %xmm0
movdqa %xmm0, 0xc0(%rsp)
movdqa 0x1d0(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0xfd0(%rsp)
movdqa %xmm0, 0xfc0(%rsp)
movdqa 0xfd0(%rsp), %xmm0
movdqa 0xfc0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x1e0(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0xfb0(%rsp)
movdqa %xmm0, 0xfa0(%rsp)
movdqa 0xfb0(%rsp), %xmm0
movdqa 0xfa0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x870(%rsp)
movdqa %xmm0, 0x860(%rsp)
movdqa 0x870(%rsp), %xmm0
movdqa 0x860(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xd0(%rsp)
movdqa 0xd0(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x850(%rsp)
movdqa %xmm0, 0x840(%rsp)
movdqa 0x850(%rsp), %xmm0
movdqa 0x840(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xd0(%rsp)
movdqa 0xd0(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xe30(%rsp)
movl %eax, 0xe2c(%rsp)
movdqa 0xe30(%rsp), %xmm0
movl 0xe2c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xd0(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0xc70(%rsp)
movdqa %xmm0, 0xc60(%rsp)
movdqa 0xc70(%rsp), %xmm0
movdqa 0xc60(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x830(%rsp)
movdqa %xmm0, 0x820(%rsp)
movdqa 0x830(%rsp), %xmm0
movdqa 0x820(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movdqa 0xe0(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xe10(%rsp)
movl %eax, 0xe0c(%rsp)
movdqa 0xe10(%rsp), %xmm0
movl 0xe0c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movdqa 0x1f0(%rsp), %xmm0
movdqa %xmm0, 0xf0(%rsp)
movdqa 0x200(%rsp), %xmm0
movdqa %xmm0, 0x100(%rsp)
movdqa 0x210(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0xf90(%rsp)
movdqa %xmm0, 0xf80(%rsp)
movdqa 0xf90(%rsp), %xmm0
movdqa 0xf80(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x220(%rsp), %xmm1
movdqa 0x490(%rsp), %xmm0
movdqa %xmm1, 0xf70(%rsp)
movdqa %xmm0, 0xf60(%rsp)
movdqa 0xf70(%rsp), %xmm0
movdqa 0xf60(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x810(%rsp)
movdqa %xmm0, 0x800(%rsp)
movdqa 0x810(%rsp), %xmm0
movdqa 0x800(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movdqa 0x110(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x7f0(%rsp)
movdqa %xmm0, 0x7e0(%rsp)
movdqa 0x7f0(%rsp), %xmm0
movdqa 0x7e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movdqa 0x110(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xdf0(%rsp)
movl %eax, 0xdec(%rsp)
movdqa 0xdf0(%rsp), %xmm0
movl 0xdec(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x110(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0xc50(%rsp)
movdqa %xmm0, 0xc40(%rsp)
movdqa 0xc50(%rsp), %xmm0
movdqa 0xc40(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x120(%rsp)
movdqa 0x120(%rsp), %xmm1
movdqa 0x240(%rsp), %xmm0
movdqa %xmm1, 0x7d0(%rsp)
movdqa %xmm0, 0x7c0(%rsp)
movdqa 0x7d0(%rsp), %xmm0
movdqa 0x7c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x120(%rsp)
movdqa 0x120(%rsp), %xmm0
movl 0x4ac(%rsp), %eax
movdqa %xmm0, 0xdd0(%rsp)
movl %eax, 0xdcc(%rsp)
movdqa 0xdd0(%rsp), %xmm0
movl 0xdcc(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x120(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x50(%rsp), %xmm0
movdqa %xmm1, 0x7b0(%rsp)
movdqa %xmm0, 0x7a0(%rsp)
movdqa 0x7b0(%rsp), %xmm0
movdqa 0x7a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x130(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x60(%rsp), %xmm0
movdqa %xmm1, 0x790(%rsp)
movdqa %xmm0, 0x780(%rsp)
movdqa 0x790(%rsp), %xmm0
movdqa 0x780(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x140(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x50(%rsp), %xmm0
movdqa %xmm1, 0xc30(%rsp)
movdqa %xmm0, 0xc20(%rsp)
movdqa 0xc30(%rsp), %xmm0
movdqa 0xc20(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x150(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x60(%rsp), %xmm0
movdqa %xmm1, 0xc10(%rsp)
movdqa %xmm0, 0xc00(%rsp)
movdqa 0xc10(%rsp), %xmm0
movdqa 0xc00(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x160(%rsp)
movdqa 0x70(%rsp), %xmm1
movdqa 0x90(%rsp), %xmm0
movdqa %xmm1, 0x770(%rsp)
movdqa %xmm0, 0x760(%rsp)
movdqa 0x770(%rsp), %xmm0
movdqa 0x760(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0xa0(%rsp), %xmm0
movdqa %xmm1, 0x750(%rsp)
movdqa %xmm0, 0x740(%rsp)
movdqa 0x750(%rsp), %xmm0
movdqa 0x740(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x180(%rsp)
movdqa 0x70(%rsp), %xmm1
movdqa 0x90(%rsp), %xmm0
movdqa %xmm1, 0xbf0(%rsp)
movdqa %xmm0, 0xbe0(%rsp)
movdqa 0xbf0(%rsp), %xmm0
movdqa 0xbe0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x190(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0xa0(%rsp), %xmm0
movdqa %xmm1, 0xbd0(%rsp)
movdqa %xmm0, 0xbc0(%rsp)
movdqa 0xbd0(%rsp), %xmm0
movdqa 0xbc0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1a0(%rsp)
movdqa 0xb0(%rsp), %xmm1
movdqa 0xd0(%rsp), %xmm0
movdqa %xmm1, 0x730(%rsp)
movdqa %xmm0, 0x720(%rsp)
movdqa 0x730(%rsp), %xmm0
movdqa 0x720(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1b0(%rsp)
movdqa 0xc0(%rsp), %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0x710(%rsp)
movdqa %xmm0, 0x700(%rsp)
movdqa 0x710(%rsp), %xmm0
movdqa 0x700(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movdqa 0xb0(%rsp), %xmm1
movdqa 0xd0(%rsp), %xmm0
movdqa %xmm1, 0xbb0(%rsp)
movdqa %xmm0, 0xba0(%rsp)
movdqa 0xbb0(%rsp), %xmm0
movdqa 0xba0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1d0(%rsp)
movdqa 0xc0(%rsp), %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0xb90(%rsp)
movdqa %xmm0, 0xb80(%rsp)
movdqa 0xb90(%rsp), %xmm0
movdqa 0xb80(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1e0(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x6f0(%rsp)
movdqa %xmm0, 0x6e0(%rsp)
movdqa 0x6f0(%rsp), %xmm0
movdqa 0x6e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1f0(%rsp)
movdqa 0x100(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x6d0(%rsp)
movdqa %xmm0, 0x6c0(%rsp)
movdqa 0x6d0(%rsp), %xmm0
movdqa 0x6c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x200(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0xb70(%rsp)
movdqa %xmm0, 0xb60(%rsp)
movdqa 0xb70(%rsp), %xmm0
movdqa 0xb60(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x210(%rsp)
movdqa 0x100(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0xb50(%rsp)
movdqa %xmm0, 0xb40(%rsp)
movdqa 0xb50(%rsp), %xmm0
movdqa 0xb40(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x220(%rsp)
movdqa 0x130(%rsp), %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0x140(%rsp), %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0x150(%rsp), %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x160(%rsp), %xmm0
movdqa %xmm0, 0x60(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x40, %rsi
leaq 0x130(%rsp), %rcx
addq $0x50, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x470(%rsp), %rdi
leaq 0x480(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x70(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x40, %rsi
leaq 0x130(%rsp), %rcx
addq $0x50, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x480(%rsp), %rdi
leaq 0x460(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x80(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x60, %rsi
leaq 0x130(%rsp), %rcx
addq $0x70, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x450(%rsp), %rdi
leaq 0x470(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x90(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x60, %rsi
leaq 0x130(%rsp), %rcx
addq $0x70, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x470(%rsp), %rdi
leaq 0x480(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0x1b0(%rsp), %xmm0
movdqa %xmm0, 0xb0(%rsp)
movdqa 0x1c0(%rsp), %xmm0
movdqa %xmm0, 0xc0(%rsp)
movdqa 0x1d0(%rsp), %xmm0
movdqa %xmm0, 0xd0(%rsp)
movdqa 0x1e0(%rsp), %xmm0
movdqa %xmm0, 0xe0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xc0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xd0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x470(%rsp), %rdi
leaq 0x480(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xf0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xc0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xd0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x480(%rsp), %rdi
leaq 0x460(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x100(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xe0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xf0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x450(%rsp), %rdi
leaq 0x470(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x110(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xe0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xf0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x470(%rsp), %rdi
leaq 0x480(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x120(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0x6b0(%rsp)
movdqa %xmm0, 0x6a0(%rsp)
movdqa 0x6b0(%rsp), %xmm0
movdqa 0x6a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x130(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x80(%rsp), %xmm0
movdqa %xmm1, 0x690(%rsp)
movdqa %xmm0, 0x680(%rsp)
movdqa 0x690(%rsp), %xmm0
movdqa 0x680(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x140(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0x90(%rsp), %xmm0
movdqa %xmm1, 0x670(%rsp)
movdqa %xmm0, 0x660(%rsp)
movdqa 0x670(%rsp), %xmm0
movdqa 0x660(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x150(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0xa0(%rsp), %xmm0
movdqa %xmm1, 0x650(%rsp)
movdqa %xmm0, 0x640(%rsp)
movdqa 0x650(%rsp), %xmm0
movdqa 0x640(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x160(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0xb30(%rsp)
movdqa %xmm0, 0xb20(%rsp)
movdqa 0xb30(%rsp), %xmm0
movdqa 0xb20(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x80(%rsp), %xmm0
movdqa %xmm1, 0xb10(%rsp)
movdqa %xmm0, 0xb00(%rsp)
movdqa 0xb10(%rsp), %xmm0
movdqa 0xb00(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x180(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0x90(%rsp), %xmm0
movdqa %xmm1, 0xaf0(%rsp)
movdqa %xmm0, 0xae0(%rsp)
movdqa 0xaf0(%rsp), %xmm0
movdqa 0xae0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x190(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0xa0(%rsp), %xmm0
movdqa %xmm1, 0xad0(%rsp)
movdqa %xmm0, 0xac0(%rsp)
movdqa 0xad0(%rsp), %xmm0
movdqa 0xac0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1a0(%rsp)
movdqa 0xb0(%rsp), %xmm1
movdqa 0xf0(%rsp), %xmm0
movdqa %xmm1, 0x630(%rsp)
movdqa %xmm0, 0x620(%rsp)
movdqa 0x630(%rsp), %xmm0
movdqa 0x620(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1b0(%rsp)
movdqa 0xc0(%rsp), %xmm1
movdqa 0x100(%rsp), %xmm0
movdqa %xmm1, 0x610(%rsp)
movdqa %xmm0, 0x600(%rsp)
movdqa 0x610(%rsp), %xmm0
movdqa 0x600(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movdqa 0xd0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x5f0(%rsp)
movdqa %xmm0, 0x5e0(%rsp)
movdqa 0x5f0(%rsp), %xmm0
movdqa 0x5e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1d0(%rsp)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x5d0(%rsp)
movdqa %xmm0, 0x5c0(%rsp)
movdqa 0x5d0(%rsp), %xmm0
movdqa 0x5c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1e0(%rsp)
movdqa 0xb0(%rsp), %xmm1
movdqa 0xf0(%rsp), %xmm0
movdqa %xmm1, 0xab0(%rsp)
movdqa %xmm0, 0xaa0(%rsp)
movdqa 0xab0(%rsp), %xmm0
movdqa 0xaa0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1f0(%rsp)
movdqa 0xc0(%rsp), %xmm1
movdqa 0x100(%rsp), %xmm0
movdqa %xmm1, 0xa90(%rsp)
movdqa %xmm0, 0xa80(%rsp)
movdqa 0xa90(%rsp), %xmm0
movdqa 0xa80(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x200(%rsp)
movdqa 0xd0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0xa70(%rsp)
movdqa %xmm0, 0xa60(%rsp)
movdqa 0xa70(%rsp), %xmm0
movdqa 0xa60(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x210(%rsp)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0xa50(%rsp)
movdqa %xmm0, 0xa40(%rsp)
movdqa 0xa50(%rsp), %xmm0
movdqa 0xa40(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x220(%rsp)
movdqa 0x130(%rsp), %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0x140(%rsp), %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0x150(%rsp), %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x160(%rsp), %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x170(%rsp), %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x180(%rsp), %xmm0
movdqa %xmm0, 0x80(%rsp)
movdqa 0x190(%rsp), %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x1a0(%rsp), %xmm0
movdqa %xmm0, 0xa0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x80, %rsi
leaq 0x130(%rsp), %rcx
addq $0x90, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x440(%rsp), %rdi
leaq 0x430(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xb0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x80, %rsi
leaq 0x130(%rsp), %rcx
addq $0x90, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x430(%rsp), %rdi
leaq 0x410(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xc0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xa0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xb0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x3d0(%rsp), %rdi
leaq 0x400(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xd0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xa0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xb0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x400(%rsp), %rdi
leaq 0x3e0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xe0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xc0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xd0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x420(%rsp), %rdi
leaq 0x440(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xf0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xc0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xd0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x440(%rsp), %rdi
leaq 0x430(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x100(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xe0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xf0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x3f0(%rsp), %rdi
leaq 0x3d0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x110(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xe0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xf0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x3d0(%rsp), %rdi
leaq 0x400(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x120(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm1, 0x5b0(%rsp)
movdqa %xmm0, 0x5a0(%rsp)
movdqa 0x5b0(%rsp), %xmm0
movdqa 0x5a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x130(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm1, 0x590(%rsp)
movdqa %xmm0, 0x580(%rsp)
movdqa 0x590(%rsp), %xmm0
movdqa 0x580(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x140(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0xd0(%rsp), %xmm0
movdqa %xmm1, 0x570(%rsp)
movdqa %xmm0, 0x560(%rsp)
movdqa 0x570(%rsp), %xmm0
movdqa 0x560(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x150(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0x550(%rsp)
movdqa %xmm0, 0x540(%rsp)
movdqa 0x550(%rsp), %xmm0
movdqa 0x540(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x160(%rsp)
movdqa 0x70(%rsp), %xmm1
movdqa 0xf0(%rsp), %xmm0
movdqa %xmm1, 0x530(%rsp)
movdqa %xmm0, 0x520(%rsp)
movdqa 0x530(%rsp), %xmm0
movdqa 0x520(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x170(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x100(%rsp), %xmm0
movdqa %xmm1, 0x510(%rsp)
movdqa %xmm0, 0x500(%rsp)
movdqa 0x510(%rsp), %xmm0
movdqa 0x500(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x180(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x4f0(%rsp)
movdqa %xmm0, 0x4e0(%rsp)
movdqa 0x4f0(%rsp), %xmm0
movdqa 0x4e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x190(%rsp)
movdqa 0xa0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x4d0(%rsp)
movdqa %xmm0, 0x4c0(%rsp)
movdqa 0x4d0(%rsp), %xmm0
movdqa 0x4c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x1a0(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm1, 0xa30(%rsp)
movdqa %xmm0, 0xa20(%rsp)
movdqa 0xa30(%rsp), %xmm0
movdqa 0xa20(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1b0(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm1, 0xa10(%rsp)
movdqa %xmm0, 0xa00(%rsp)
movdqa 0xa10(%rsp), %xmm0
movdqa 0xa00(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1c0(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0xd0(%rsp), %xmm0
movdqa %xmm1, 0x9f0(%rsp)
movdqa %xmm0, 0x9e0(%rsp)
movdqa 0x9f0(%rsp), %xmm0
movdqa 0x9e0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1d0(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0x9d0(%rsp)
movdqa %xmm0, 0x9c0(%rsp)
movdqa 0x9d0(%rsp), %xmm0
movdqa 0x9c0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1e0(%rsp)
movdqa 0x70(%rsp), %xmm1
movdqa 0xf0(%rsp), %xmm0
movdqa %xmm1, 0x9b0(%rsp)
movdqa %xmm0, 0x9a0(%rsp)
movdqa 0x9b0(%rsp), %xmm0
movdqa 0x9a0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x1f0(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x100(%rsp), %xmm0
movdqa %xmm1, 0x990(%rsp)
movdqa %xmm0, 0x980(%rsp)
movdqa 0x990(%rsp), %xmm0
movdqa 0x980(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x200(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x970(%rsp)
movdqa %xmm0, 0x960(%rsp)
movdqa 0x970(%rsp), %xmm0
movdqa 0x960(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x210(%rsp)
movdqa 0xa0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x950(%rsp)
movdqa %xmm0, 0x940(%rsp)
movdqa 0x950(%rsp), %xmm0
movdqa 0x940(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x220(%rsp)
leaq 0x130(%rsp), %rsi
leaq 0x130(%rsp), %rcx
addq $0x10, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x3c0(%rsp), %rdi
leaq 0x3b0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x30(%rsp)
leaq 0x130(%rsp), %rsi
leaq 0x130(%rsp), %rcx
addq $0x10, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x3b0(%rsp), %rdi
leaq 0x3a0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x40(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x20, %rsi
leaq 0x130(%rsp), %rcx
addq $0x30, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x390(%rsp), %rdi
leaq 0x380(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x50(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x20, %rsi
leaq 0x130(%rsp), %rcx
addq $0x30, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x380(%rsp), %rdi
leaq 0x370(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x60(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x40, %rsi
leaq 0x130(%rsp), %rcx
addq $0x50, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x360(%rsp), %rdi
leaq 0x350(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x70(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x40, %rsi
leaq 0x130(%rsp), %rcx
addq $0x50, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x350(%rsp), %rdi
leaq 0x340(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x80(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x60, %rsi
leaq 0x130(%rsp), %rcx
addq $0x70, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x330(%rsp), %rdi
leaq 0x320(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x90(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x60, %rsi
leaq 0x130(%rsp), %rcx
addq $0x70, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x320(%rsp), %rdi
leaq 0x310(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xa0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x80, %rsi
leaq 0x130(%rsp), %rcx
addq $0x90, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x300(%rsp), %rdi
leaq 0x2f0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xb0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0x80, %rsi
leaq 0x130(%rsp), %rcx
addq $0x90, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x2f0(%rsp), %rdi
leaq 0x2e0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xc0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xa0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xb0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x2d0(%rsp), %rdi
leaq 0x2c0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xd0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xa0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xb0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x2c0(%rsp), %rdi
leaq 0x2b0(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xe0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xc0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xd0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x2a0(%rsp), %rdi
leaq 0x290(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0xf0(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xc0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xd0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x290(%rsp), %rdi
leaq 0x280(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x100(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xe0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xf0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x270(%rsp), %rdi
leaq 0x260(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x110(%rsp)
leaq 0x130(%rsp), %rsi
addq $0xe0, %rsi
leaq 0x130(%rsp), %rcx
addq $0xf0, %rcx
movl 0x4ac(%rsp), %r9d
leaq 0x260(%rsp), %rdi
leaq 0x250(%rsp), %rdx
leaq 0x240(%rsp), %r8
callq 0x8059c0
movdqa %xmm0, 0x120(%rsp)
movdqa 0x40(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0x0, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x110(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll $0x0, %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x60(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xf0(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0x3, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x80(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll $0x2, %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xd0(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0x5, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xa0(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0x6, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xb0(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0x7, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xc0(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
movl 0x4a8(%rsp), %ecx
shll $0x3, %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x90(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0x9, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xe0(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0xa, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x70(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0xb, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x100(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0xc, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x50(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0xd, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x120(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0xe, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x30(%rsp), %xmm0
movq 0x4b0(%rsp), %rax
imull $0xf, 0x4a8(%rsp), %ecx
addl 0xc(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl 0xc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xc(%rsp)
jmp 0x7fe296
addq $0x1538, %rsp # imm = 0x1538
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
idtx16x16_sse4_1 | static void idtx16x16_sse4_1(__m128i *in, __m128i *out, int bit, int col_num) {
(void)bit;
__m128i fact = _mm_set1_epi32(2 * NewSqrt2);
__m128i offset = _mm_set1_epi32(1 << (NewSqrt2Bits - 1));
__m128i a_low;
int num_iters = 16 * col_num;
for (int i = 0; i < num_iters; i++) {
a_low = _mm_mullo_epi32(in[i], fact);
a_low = _mm_add_epi32(a_low, offset);
out[i] = _mm_srai_epi32(a_low, NewSqrt2Bits);
}
} | subq $0x98, %rsp
movq %rdi, -0x28(%rsp)
movq %rsi, -0x30(%rsp)
movl %edx, -0x34(%rsp)
movl %ecx, -0x38(%rsp)
movl $0x2d42, 0x2c(%rsp) # imm = 0x2D42
movl 0x2c(%rsp), %eax
movl %eax, 0x6c(%rsp)
movl %eax, 0x68(%rsp)
movl %eax, 0x64(%rsp)
movl %eax, 0x60(%rsp)
movl 0x64(%rsp), %edx
movl 0x68(%rsp), %ecx
movl 0x6c(%rsp), %eax
movd 0x60(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x50(%rsp)
movaps 0x50(%rsp), %xmm0
movaps %xmm0, -0x50(%rsp)
movl $0x800, 0x28(%rsp) # imm = 0x800
movl 0x28(%rsp), %eax
movl %eax, 0x94(%rsp)
movl %eax, 0x90(%rsp)
movl %eax, 0x8c(%rsp)
movl %eax, 0x88(%rsp)
movl 0x8c(%rsp), %edx
movl 0x90(%rsp), %ecx
movl 0x94(%rsp), %eax
movd 0x88(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x70(%rsp), %xmm0
movdqa %xmm0, -0x60(%rsp)
movl -0x38(%rsp), %eax
shll $0x4, %eax
movl %eax, -0x74(%rsp)
movl $0x0, -0x78(%rsp)
movl -0x78(%rsp), %eax
cmpl -0x74(%rsp), %eax
jge 0x800a2a
movq -0x28(%rsp), %rax
movslq -0x78(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movdqa -0x50(%rsp), %xmm0
movdqa %xmm1, 0x40(%rsp)
movdqa %xmm0, 0x30(%rsp)
movdqa 0x40(%rsp), %xmm0
movdqa 0x30(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, -0x70(%rsp)
movdqa -0x70(%rsp), %xmm1
movdqa -0x60(%rsp), %xmm0
movdqa %xmm1, -0x10(%rsp)
movdqa %xmm0, -0x20(%rsp)
movdqa -0x10(%rsp), %xmm0
movdqa -0x20(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, -0x70(%rsp)
movdqa -0x70(%rsp), %xmm0
movdqa %xmm0, 0x10(%rsp)
movl $0xc, 0xc(%rsp)
movdqa 0x10(%rsp), %xmm0
movl 0xc(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x30(%rsp), %rax
movslq -0x78(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl -0x78(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x78(%rsp)
jmp 0x800965
addq $0x98, %rsp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
av1_fwd_txfm2d_32x16_sse4_1 | void av1_fwd_txfm2d_32x16_sse4_1(const int16_t *input, int32_t *coeff,
int stride, TX_TYPE tx_type, int bd) {
__m128i in[128];
__m128i *outcoef128 = (__m128i *)coeff;
const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X16];
const int txw_idx = get_txw_idx(TX_32X16);
const int txh_idx = get_txh_idx(TX_32X16);
const fwd_transform_1d_sse4_1 col_txfm = row_highbd_txfm8x32_arr[tx_type];
const fwd_transform_1d_sse4_1 row_txfm = col_highbd_txfm8x32_arr[tx_type];
int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
// column transform
load_buffer_32x8n(input, in, stride, 0, 0, shift[0], 16);
col_txfm(in, in, bitcol, 8);
col_txfm_16x16_rounding(&in[0], -shift[1]);
col_txfm_16x16_rounding(&in[64], -shift[1]);
transpose_8nx8n(in, outcoef128, 32, 16);
// row transform
for (int i = 0; i < 4; i++) {
row_txfm((outcoef128 + i), (in + i), bitrow, 4);
}
av1_round_shift_rect_array_32_sse4_1(in, outcoef128, 128, -shift[2],
NewSqrt2);
(void)bd;
} | subq $0x868, %rsp # imm = 0x868
movb %cl, %al
movq %rdi, 0x860(%rsp)
movq %rsi, 0x858(%rsp)
movl %edx, 0x854(%rsp)
movb %al, 0x853(%rsp)
movl %r8d, 0x84c(%rsp)
movq 0x858(%rsp), %rax
movq %rax, 0x38(%rsp)
leaq 0x3a314d(%rip), %rax # 0xba5b10
movq 0x50(%rax), %rax
movq %rax, 0x30(%rsp)
movl $0xa, %edi
callq 0x7ef0b0
movl %eax, 0x2c(%rsp)
movl $0xa, %edi
callq 0x7ef0d0
movl %eax, 0x28(%rsp)
movzbl 0x853(%rsp), %eax
movl %eax, %ecx
leaq 0x39a947(%rip), %rax # 0xb9d340
movq (%rax,%rcx,8), %rax
movq %rax, 0x20(%rsp)
movzbl 0x853(%rsp), %eax
movl %eax, %ecx
leaq 0x39a8ad(%rip), %rax # 0xb9d2c0
movq (%rax,%rcx,8), %rax
movq %rax, 0x18(%rsp)
movslq 0x2c(%rsp), %rcx
leaq 0x3145e8(%rip), %rax # 0xb17010
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x28(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x14(%rsp)
movslq 0x2c(%rsp), %rcx
leaq 0x3145e8(%rip), %rax # 0xb17030
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x28(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x10(%rsp)
movq 0x860(%rsp), %rdi
leaq 0x40(%rsp), %rsi
movl 0x854(%rsp), %edx
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
xorl %r8d, %r8d
movl %r8d, %ecx
movl $0x10, (%rsp)
callq 0x802530
movq 0x20(%rsp), %rax
leaq 0x40(%rsp), %rdi
leaq 0x40(%rsp), %rsi
movl 0x14(%rsp), %edx
movl $0x8, %ecx
callq *%rax
leaq 0x40(%rsp), %rdi
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
leaq 0x40(%rsp), %rdi
addq $0x400, %rdi # imm = 0x400
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
leaq 0x40(%rsp), %rdi
movq 0x38(%rsp), %rsi
movl $0x20, %edx
movl $0x10, %ecx
callq 0x801a60
movl $0x0, 0xc(%rsp)
cmpl $0x4, 0xc(%rsp)
jge 0x802b41
movq 0x18(%rsp), %rax
movq 0x38(%rsp), %rdi
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rdi
leaq 0x40(%rsp), %rsi
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rsi
movl 0x10(%rsp), %edx
movl $0x4, %ecx
callq *%rax
movl 0xc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xc(%rsp)
jmp 0x802afb
leaq 0x40(%rsp), %rdi
movq 0x38(%rsp), %rsi
movq 0x30(%rsp), %rax
movsbl 0x2(%rax), %eax
xorl %ecx, %ecx
subl %eax, %ecx
movl $0x80, %edx
movl $0x16a1, %r8d # imm = 0x16A1
callq 0x800e20
addq $0x868, %rsp # imm = 0x868
retq
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
load_buffer_4x8 | static inline void load_buffer_4x8(const int16_t *input, __m128i *out,
int stride, int flipud, int fliplr,
int shift) {
const int16_t *topL = input;
const int16_t *botL = input + 4 * stride;
const int16_t *tmp;
if (flipud) {
tmp = topL;
topL = botL;
botL = tmp;
}
load_buffer_4x4(topL, out, stride, flipud, fliplr, shift);
load_buffer_4x4(botL, out + 4, stride, flipud, fliplr, shift);
} | subq $0x38, %rsp
movq %rdi, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movl %edx, 0x24(%rsp)
movl %ecx, 0x20(%rsp)
movl %r8d, 0x1c(%rsp)
movl %r9d, 0x18(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x30(%rsp), %rax
movl 0x24(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x8(%rsp)
cmpl $0x0, 0x20(%rsp)
je 0x803237
movq 0x10(%rsp), %rax
movq %rax, (%rsp)
movq 0x8(%rsp), %rax
movq %rax, 0x10(%rsp)
movq (%rsp), %rax
movq %rax, 0x8(%rsp)
movq 0x10(%rsp), %rdi
movq 0x28(%rsp), %rsi
movl 0x24(%rsp), %edx
movl 0x20(%rsp), %ecx
movl 0x1c(%rsp), %r8d
movl 0x18(%rsp), %r9d
callq 0x7ef0f0
movq 0x8(%rsp), %rdi
movq 0x28(%rsp), %rsi
addq $0x40, %rsi
movl 0x24(%rsp), %edx
movl 0x20(%rsp), %ecx
movl 0x1c(%rsp), %r8d
movl 0x18(%rsp), %r9d
callq 0x7ef0f0
addq $0x38, %rsp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
col_txfm_4x8_rounding | static inline void col_txfm_4x8_rounding(__m128i *in, int shift) {
const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
in[0] = _mm_add_epi32(in[0], rounding);
in[1] = _mm_add_epi32(in[1], rounding);
in[2] = _mm_add_epi32(in[2], rounding);
in[3] = _mm_add_epi32(in[3], rounding);
in[4] = _mm_add_epi32(in[4], rounding);
in[5] = _mm_add_epi32(in[5], rounding);
in[6] = _mm_add_epi32(in[6], rounding);
in[7] = _mm_add_epi32(in[7], rounding);
in[0] = _mm_srai_epi32(in[0], shift);
in[1] = _mm_srai_epi32(in[1], shift);
in[2] = _mm_srai_epi32(in[2], shift);
in[3] = _mm_srai_epi32(in[3], shift);
in[4] = _mm_srai_epi32(in[4], shift);
in[5] = _mm_srai_epi32(in[5], shift);
in[6] = _mm_srai_epi32(in[6], shift);
in[7] = _mm_srai_epi32(in[7], shift);
} | subq $0x1d8, %rsp # imm = 0x1D8
movq %rdi, -0x68(%rsp)
movl %esi, -0x6c(%rsp)
movb -0x6c(%rsp), %cl
decb %cl
movl $0x1, %eax
shll %cl, %eax
movl %eax, 0x1ac(%rsp)
movl 0x1ac(%rsp), %eax
movl %eax, 0x1d4(%rsp)
movl %eax, 0x1d0(%rsp)
movl %eax, 0x1cc(%rsp)
movl %eax, 0x1c8(%rsp)
movl 0x1cc(%rsp), %edx
movl 0x1d0(%rsp), %ecx
movl 0x1d4(%rsp), %eax
movd 0x1c8(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movdqa %xmm0, 0x1b0(%rsp)
movdqa 0x1b0(%rsp), %xmm0
movdqa %xmm0, -0x80(%rsp)
movq -0x68(%rsp), %rax
movdqa (%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, 0x90(%rsp)
movdqa %xmm0, 0x80(%rsp)
movdqa 0x90(%rsp), %xmm0
movdqa 0x80(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movdqa 0x10(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, 0x70(%rsp)
movdqa %xmm0, 0x60(%rsp)
movdqa 0x70(%rsp), %xmm0
movdqa 0x60(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq -0x68(%rsp), %rax
movdqa 0x20(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, 0x50(%rsp)
movdqa %xmm0, 0x40(%rsp)
movdqa 0x50(%rsp), %xmm0
movdqa 0x40(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq -0x68(%rsp), %rax
movdqa 0x30(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, 0x30(%rsp)
movdqa %xmm0, 0x20(%rsp)
movdqa 0x30(%rsp), %xmm0
movdqa 0x20(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq -0x68(%rsp), %rax
movdqa 0x40(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, 0x10(%rsp)
movdqa %xmm0, (%rsp)
movdqa 0x10(%rsp), %xmm0
movdqa (%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x40(%rax)
movq -0x68(%rsp), %rax
movdqa 0x50(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, -0x10(%rsp)
movdqa %xmm0, -0x20(%rsp)
movdqa -0x10(%rsp), %xmm0
movdqa -0x20(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x50(%rax)
movq -0x68(%rsp), %rax
movdqa 0x60(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, -0x30(%rsp)
movdqa %xmm0, -0x40(%rsp)
movdqa -0x30(%rsp), %xmm0
movdqa -0x40(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x60(%rax)
movq -0x68(%rsp), %rax
movdqa 0x70(%rax), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, -0x50(%rsp)
movdqa %xmm0, -0x60(%rsp)
movdqa -0x50(%rsp), %xmm0
movdqa -0x60(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x70(%rax)
movq -0x68(%rsp), %rax
movdqa (%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0x190(%rsp)
movl %eax, 0x18c(%rsp)
movdqa 0x190(%rsp), %xmm0
movl 0x18c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movdqa 0x10(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0x170(%rsp)
movl %eax, 0x16c(%rsp)
movdqa 0x170(%rsp), %xmm0
movl 0x16c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x10(%rax)
movq -0x68(%rsp), %rax
movdqa 0x20(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0x150(%rsp)
movl %eax, 0x14c(%rsp)
movdqa 0x150(%rsp), %xmm0
movl 0x14c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x20(%rax)
movq -0x68(%rsp), %rax
movdqa 0x30(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0x130(%rsp)
movl %eax, 0x12c(%rsp)
movdqa 0x130(%rsp), %xmm0
movl 0x12c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x30(%rax)
movq -0x68(%rsp), %rax
movdqa 0x40(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0x110(%rsp)
movl %eax, 0x10c(%rsp)
movdqa 0x110(%rsp), %xmm0
movl 0x10c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x40(%rax)
movq -0x68(%rsp), %rax
movdqa 0x50(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0xf0(%rsp)
movl %eax, 0xec(%rsp)
movdqa 0xf0(%rsp), %xmm0
movl 0xec(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x50(%rax)
movq -0x68(%rsp), %rax
movdqa 0x60(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0xd0(%rsp)
movl %eax, 0xcc(%rsp)
movdqa 0xd0(%rsp), %xmm0
movl 0xcc(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x60(%rax)
movq -0x68(%rsp), %rax
movdqa 0x70(%rax), %xmm0
movl -0x6c(%rsp), %eax
movdqa %xmm0, 0xb0(%rsp)
movl %eax, 0xac(%rsp)
movdqa 0xb0(%rsp), %xmm0
movl 0xac(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq -0x68(%rsp), %rax
movdqa %xmm0, 0x70(%rax)
addq $0x1d8, %rsp # imm = 0x1D8
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
load_buffer_8x4 | static inline void load_buffer_8x4(const int16_t *input, __m128i *out,
int stride, int flipud, int fliplr,
int shift) {
const int16_t *topL = input;
const int16_t *topR = input + 4;
const int16_t *tmp;
if (fliplr) {
tmp = topL;
topL = topR;
topR = tmp;
}
load_buffer_4x4(topL, out, stride, flipud, fliplr, shift);
load_buffer_4x4(topR, out + 4, stride, flipud, fliplr, shift);
} | subq $0x38, %rsp
movq %rdi, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movl %edx, 0x24(%rsp)
movl %ecx, 0x20(%rsp)
movl %r8d, 0x1c(%rsp)
movl %r9d, 0x18(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x30(%rsp), %rax
addq $0x8, %rax
movq %rax, 0x8(%rsp)
cmpl $0x0, 0x1c(%rsp)
je 0x80392b
movq 0x10(%rsp), %rax
movq %rax, (%rsp)
movq 0x8(%rsp), %rax
movq %rax, 0x10(%rsp)
movq (%rsp), %rax
movq %rax, 0x8(%rsp)
movq 0x10(%rsp), %rdi
movq 0x28(%rsp), %rsi
movl 0x24(%rsp), %edx
movl 0x20(%rsp), %ecx
movl 0x1c(%rsp), %r8d
movl 0x18(%rsp), %r9d
callq 0x7ef0f0
movq 0x8(%rsp), %rdi
movq 0x28(%rsp), %rsi
addq $0x40, %rsi
movl 0x24(%rsp), %edx
movl 0x20(%rsp), %ecx
movl 0x1c(%rsp), %r8d
movl 0x18(%rsp), %r9d
callq 0x7ef0f0
addq $0x38, %rsp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
av1_fwd_txfm2d_16x64_sse4_1 | void av1_fwd_txfm2d_16x64_sse4_1(const int16_t *input, int32_t *coeff,
int stride, TX_TYPE tx_type, int bd) {
__m128i in[256];
__m128i *outcoeff128 = (__m128i *)coeff;
const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X64];
const int txw_idx = get_txw_idx(TX_16X64);
const int txh_idx = get_txh_idx(TX_16X64);
const int txfm_size_col = tx_size_wide[TX_16X64];
const int txfm_size_row = tx_size_high[TX_16X64];
int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
int ud_flip, lr_flip;
get_flip_cfg(tx_type, &ud_flip, &lr_flip);
const int num_col = txfm_size_col >> 2;
// col tranform
for (int i = 0; i < txfm_size_row; i += num_col) {
load_buffer_4x4(input + (i + 0) * stride, in + (i + 0) * num_col, num_col,
ud_flip, lr_flip, shift[0]);
load_buffer_4x4(input + (i + 1) * stride, in + (i + 1) * num_col, num_col,
ud_flip, lr_flip, shift[0]);
load_buffer_4x4(input + (i + 2) * stride, in + (i + 2) * num_col, num_col,
ud_flip, lr_flip, shift[0]);
load_buffer_4x4(input + (i + 3) * stride, in + (i + 3) * num_col, num_col,
ud_flip, lr_flip, shift[0]);
}
for (int i = 0; i < num_col; i++) {
av1_fdct64_sse4_1(in + i, outcoeff128 + i, bitcol, num_col, num_col);
}
col_txfm_16x16_rounding(outcoeff128, -shift[1]);
col_txfm_16x16_rounding(outcoeff128 + 64, -shift[1]);
col_txfm_16x16_rounding(outcoeff128 + 128, -shift[1]);
col_txfm_16x16_rounding(outcoeff128 + 192, -shift[1]);
transpose_8nx8n(outcoeff128, in, txfm_size_col, 32);
fdct16x16_sse4_1(in, outcoeff128, bitrow, 8);
(void)bd;
} | subq $0x1068, %rsp # imm = 0x1068
movb %cl, %al
movq %rdi, 0x1060(%rsp)
movq %rsi, 0x1058(%rsp)
movl %edx, 0x1054(%rsp)
movb %al, 0x1053(%rsp)
movl %r8d, 0x104c(%rsp)
movq 0x1058(%rsp), %rax
movq %rax, 0x38(%rsp)
leaq 0x3a214d(%rip), %rax # 0xba5b10
movq 0x88(%rax), %rax
movq %rax, 0x30(%rsp)
movl $0x11, %edi
callq 0x7ef0b0
movl %eax, 0x2c(%rsp)
movl $0x11, %edi
callq 0x7ef0d0
movl %eax, 0x28(%rsp)
movl 0x316863(%rip), %eax # 0xb1a254
movl %eax, 0x24(%rsp)
movl 0x3168a9(%rip), %eax # 0xb1a2a4
movl %eax, 0x20(%rsp)
movslq 0x2c(%rsp), %rcx
leaq 0x313605(%rip), %rax # 0xb17010
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x28(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x1c(%rsp)
movslq 0x2c(%rsp), %rcx
leaq 0x313605(%rip), %rax # 0xb17030
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x28(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x18(%rsp)
leaq 0x14(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movzbl 0x1053(%rsp), %edi
callq 0x800ca0
movl $0x4, 0xc(%rsp)
movl $0x0, 0x8(%rsp)
cmpl $0x40, 0x8(%rsp)
jge 0x803bcd
movq 0x1060(%rsp), %rdi
movl 0x8(%rsp), %eax
addl $0x0, %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
movl 0x8(%rsp), %eax
addl $0x0, %eax
shll $0x2, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movq 0x1060(%rsp), %rdi
movl 0x8(%rsp), %eax
addl $0x1, %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
movl 0x8(%rsp), %eax
addl $0x1, %eax
shll $0x2, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movq 0x1060(%rsp), %rdi
movl 0x8(%rsp), %eax
addl $0x2, %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
movl 0x8(%rsp), %eax
addl $0x2, %eax
shll $0x2, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movq 0x1060(%rsp), %rdi
movl 0x8(%rsp), %eax
addl $0x3, %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
movl 0x8(%rsp), %eax
addl $0x3, %eax
shll $0x2, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movl 0x8(%rsp), %eax
addl $0x4, %eax
movl %eax, 0x8(%rsp)
jmp 0x803a66
movl $0x0, 0x4(%rsp)
cmpl $0x4, 0x4(%rsp)
jge 0x803c20
leaq 0x40(%rsp), %rdi
movslq 0x4(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rdi
movq 0x38(%rsp), %rsi
movslq 0x4(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rsi
movl 0x1c(%rsp), %eax
movl $0x4, %r8d
movsbl %al, %edx
movl %r8d, %ecx
callq 0xa64110
movl 0x4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x4(%rsp)
jmp 0x803bd5
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
addq $0x400, %rdi # imm = 0x400
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
addq $0x800, %rdi # imm = 0x800
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
addq $0xc00, %rdi # imm = 0xC00
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
leaq 0x40(%rsp), %rsi
movl $0x10, %edx
movl $0x20, %ecx
callq 0x801a60
leaq 0x40(%rsp), %rdi
movq 0x38(%rsp), %rsi
movl 0x18(%rsp), %edx
movl $0x8, %ecx
callq 0x7f6f90
addq $0x1068, %rsp # imm = 0x1068
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
av1_fwd_txfm2d_64x16_sse4_1 | void av1_fwd_txfm2d_64x16_sse4_1(const int16_t *input, int32_t *coeff,
int stride, TX_TYPE tx_type, int bd) {
__m128i in[256];
__m128i *outcoeff128 = (__m128i *)coeff;
const int8_t *shift = av1_fwd_txfm_shift_ls[TX_64X16];
const int txw_idx = get_txw_idx(TX_64X16);
const int txh_idx = get_txh_idx(TX_64X16);
const int txfm_size_col = tx_size_wide[TX_64X16];
const int txfm_size_row = tx_size_high[TX_64X16];
int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
int ud_flip, lr_flip;
get_flip_cfg(tx_type, &ud_flip, &lr_flip);
// col tranform
for (int i = 0; i < txfm_size_row; i++) {
load_buffer_4x4(input + 0 + i * stride, in + 0 + i * txfm_size_row, 4,
ud_flip, lr_flip, shift[0]);
load_buffer_4x4(input + 16 + i * stride, in + 4 + i * txfm_size_row, 4,
ud_flip, lr_flip, shift[0]);
load_buffer_4x4(input + 32 + i * stride, in + 8 + i * txfm_size_row, 4,
ud_flip, lr_flip, shift[0]);
load_buffer_4x4(input + 48 + i * stride, in + 12 + i * txfm_size_row, 4,
ud_flip, lr_flip, shift[0]);
}
fdct16x16_sse4_1(in, outcoeff128, bitcol, txfm_size_row);
col_txfm_16x16_rounding(outcoeff128, -shift[1]);
col_txfm_16x16_rounding(outcoeff128 + 64, -shift[1]);
col_txfm_16x16_rounding(outcoeff128 + 128, -shift[1]);
col_txfm_16x16_rounding(outcoeff128 + 192, -shift[1]);
transpose_8nx8n(outcoeff128, in, txfm_size_col, txfm_size_row);
for (int i = 0; i < 4; i++) {
av1_fdct64_sse4_1(in + i, outcoeff128 + i, bitrow, 4, 4);
}
memset(coeff + txfm_size_row * 32, 0, txfm_size_row * 32 * sizeof(*coeff));
(void)bd;
} | subq $0x1068, %rsp # imm = 0x1068
movb %cl, %al
movq %rdi, 0x1060(%rsp)
movq %rsi, 0x1058(%rsp)
movl %edx, 0x1054(%rsp)
movb %al, 0x1053(%rsp)
movl %r8d, 0x104c(%rsp)
movq 0x1058(%rsp), %rax
movq %rax, 0x38(%rsp)
leaq 0x3a1dfd(%rip), %rax # 0xba5b10
movq 0x90(%rax), %rax
movq %rax, 0x30(%rsp)
movl $0x12, %edi
callq 0x7ef0b0
movl %eax, 0x2c(%rsp)
movl $0x12, %edi
callq 0x7ef0d0
movl %eax, 0x28(%rsp)
movl 0x316517(%rip), %eax # 0xb1a258
movl %eax, 0x24(%rsp)
movl 0x31655d(%rip), %eax # 0xb1a2a8
movl %eax, 0x20(%rsp)
movslq 0x2c(%rsp), %rcx
leaq 0x3132b5(%rip), %rax # 0xb17010
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x28(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x1c(%rsp)
movslq 0x2c(%rsp), %rcx
leaq 0x3132b5(%rip), %rax # 0xb17030
imulq $0x5, %rcx, %rcx
addq %rcx, %rax
movslq 0x28(%rsp), %rcx
movsbl (%rax,%rcx), %eax
movl %eax, 0x18(%rsp)
leaq 0x14(%rsp), %rsi
leaq 0x10(%rsp), %rdx
movzbl 0x1053(%rsp), %edi
callq 0x800ca0
movl $0x0, 0xc(%rsp)
cmpl $0x10, 0xc(%rsp)
jge 0x803f1b
movq 0x1060(%rsp), %rdi
movl 0xc(%rsp), %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
movl 0xc(%rsp), %eax
shll $0x4, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movq 0x1060(%rsp), %rdi
addq $0x20, %rdi
movl 0xc(%rsp), %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
addq $0x40, %rsi
movl 0xc(%rsp), %eax
shll $0x4, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movq 0x1060(%rsp), %rdi
addq $0x40, %rdi
movl 0xc(%rsp), %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
addq $0x80, %rsi
movl 0xc(%rsp), %eax
shll $0x4, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movq 0x1060(%rsp), %rdi
addq $0x60, %rdi
movl 0xc(%rsp), %eax
imull 0x1054(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
leaq 0x40(%rsp), %rsi
addq $0xc0, %rsi
movl 0xc(%rsp), %eax
shll $0x4, %eax
cltq
shlq $0x4, %rax
addq %rax, %rsi
movl 0x14(%rsp), %ecx
movl 0x10(%rsp), %r8d
movq 0x30(%rsp), %rax
movsbl (%rax), %r9d
movl $0x4, %edx
callq 0x7ef0f0
movl 0xc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xc(%rsp)
jmp 0x803dae
leaq 0x40(%rsp), %rdi
movq 0x38(%rsp), %rsi
movl 0x1c(%rsp), %edx
movl $0x10, %ecx
callq 0x7f6f90
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
addq $0x400, %rdi # imm = 0x400
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
addq $0x800, %rdi # imm = 0x800
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
addq $0xc00, %rdi # imm = 0xC00
movq 0x30(%rsp), %rax
movsbl 0x1(%rax), %eax
xorl %esi, %esi
subl %eax, %esi
callq 0x7fada0
movq 0x38(%rsp), %rdi
leaq 0x40(%rsp), %rsi
movl $0x40, %edx
movl $0x10, %ecx
callq 0x801a60
movl $0x0, 0x8(%rsp)
cmpl $0x4, 0x8(%rsp)
jge 0x804010
leaq 0x40(%rsp), %rdi
movslq 0x8(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rdi
movq 0x38(%rsp), %rsi
movslq 0x8(%rsp), %rax
shlq $0x4, %rax
addq %rax, %rsi
movl 0x18(%rsp), %eax
movl $0x4, %r8d
movsbl %al, %edx
movl %r8d, %ecx
callq 0xa64110
movl 0x8(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8(%rsp)
jmp 0x803fc5
movq 0x1058(%rsp), %rdi
addq $0x800, %rdi # imm = 0x800
xorl %esi, %esi
movl $0x800, %edx # imm = 0x800
callq 0x18280
addq $0x1068, %rsp # imm = 0x1068
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
fdct4x8_sse4_1 | static void fdct4x8_sse4_1(__m128i *in, __m128i *out, int bit,
const int col_num) {
const int32_t *cospi = cospi_arr(bit);
const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
__m128i u[8], v[8];
int startidx = 0 * col_num;
int endidx = 7 * col_num;
// Even 8 points 0, 2, ..., 14
// stage 0
// stage 1
u[0] = _mm_add_epi32(in[startidx], in[endidx]);
v[7] = _mm_sub_epi32(in[startidx], in[endidx]); // v[7]
startidx += col_num;
endidx -= col_num;
u[1] = _mm_add_epi32(in[startidx], in[endidx]);
u[6] = _mm_sub_epi32(in[startidx], in[endidx]);
startidx += col_num;
endidx -= col_num;
u[2] = _mm_add_epi32(in[startidx], in[endidx]);
u[5] = _mm_sub_epi32(in[startidx], in[endidx]);
startidx += col_num;
endidx -= col_num;
u[3] = _mm_add_epi32(in[startidx], in[endidx]);
v[4] = _mm_sub_epi32(in[startidx], in[endidx]); // v[4]
// stage 2
v[0] = _mm_add_epi32(u[0], u[3]);
v[3] = _mm_sub_epi32(u[0], u[3]);
v[1] = _mm_add_epi32(u[1], u[2]);
v[2] = _mm_sub_epi32(u[1], u[2]);
v[5] = _mm_mullo_epi32(u[5], cospim32);
v[6] = _mm_mullo_epi32(u[6], cospi32);
v[5] = _mm_add_epi32(v[5], v[6]);
v[5] = _mm_add_epi32(v[5], rnding);
v[5] = _mm_srai_epi32(v[5], bit);
u[0] = _mm_mullo_epi32(u[5], cospi32);
v[6] = _mm_mullo_epi32(u[6], cospim32);
v[6] = _mm_sub_epi32(u[0], v[6]);
v[6] = _mm_add_epi32(v[6], rnding);
v[6] = _mm_srai_epi32(v[6], bit);
// stage 3
// type 0
v[0] = _mm_mullo_epi32(v[0], cospi32);
v[1] = _mm_mullo_epi32(v[1], cospi32);
u[0] = _mm_add_epi32(v[0], v[1]);
u[0] = _mm_add_epi32(u[0], rnding);
u[0] = _mm_srai_epi32(u[0], bit);
u[1] = _mm_sub_epi32(v[0], v[1]);
u[1] = _mm_add_epi32(u[1], rnding);
u[1] = _mm_srai_epi32(u[1], bit);
// type 1
v[0] = _mm_mullo_epi32(v[2], cospi48);
v[1] = _mm_mullo_epi32(v[3], cospi16);
u[2] = _mm_add_epi32(v[0], v[1]);
u[2] = _mm_add_epi32(u[2], rnding);
u[2] = _mm_srai_epi32(u[2], bit);
v[0] = _mm_mullo_epi32(v[2], cospi16);
v[1] = _mm_mullo_epi32(v[3], cospi48);
u[3] = _mm_sub_epi32(v[1], v[0]);
u[3] = _mm_add_epi32(u[3], rnding);
u[3] = _mm_srai_epi32(u[3], bit);
u[4] = _mm_add_epi32(v[4], v[5]);
u[5] = _mm_sub_epi32(v[4], v[5]);
u[6] = _mm_sub_epi32(v[7], v[6]);
u[7] = _mm_add_epi32(v[7], v[6]);
// stage 4
// stage 5
v[0] = _mm_mullo_epi32(u[4], cospi56);
v[1] = _mm_mullo_epi32(u[7], cospi8);
v[0] = _mm_add_epi32(v[0], v[1]);
v[0] = _mm_add_epi32(v[0], rnding);
out[1 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[4]
v[0] = _mm_mullo_epi32(u[4], cospi8);
v[1] = _mm_mullo_epi32(u[7], cospi56);
v[0] = _mm_sub_epi32(v[1], v[0]);
v[0] = _mm_add_epi32(v[0], rnding);
out[7 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[7]
v[0] = _mm_mullo_epi32(u[5], cospi24);
v[1] = _mm_mullo_epi32(u[6], cospi40);
v[0] = _mm_add_epi32(v[0], v[1]);
v[0] = _mm_add_epi32(v[0], rnding);
out[5 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[5]
v[0] = _mm_mullo_epi32(u[5], cospi40);
v[1] = _mm_mullo_epi32(u[6], cospi24);
v[0] = _mm_sub_epi32(v[1], v[0]);
v[0] = _mm_add_epi32(v[0], rnding);
out[3 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[6]
out[0 * col_num] = u[0]; // buf0[0]
out[4 * col_num] = u[1]; // buf0[1]
out[2 * col_num] = u[2]; // buf0[2]
out[6 * col_num] = u[3]; // buf0[3]
} | subq $0xb18, %rsp # imm = 0xB18
movq %rdi, 0x1b8(%rsp)
movq %rsi, 0x1b0(%rsp)
movl %edx, 0x1ac(%rsp)
movl %ecx, 0x1a8(%rsp)
movl 0x1ac(%rsp), %edi
callq 0x804040
movq %rax, 0x1a0(%rsp)
movq 0x1a0(%rsp), %rax
movl 0x80(%rax), %eax
movl %eax, 0x7ac(%rsp)
movl 0x7ac(%rsp), %eax
movl %eax, 0xa0c(%rsp)
movl %eax, 0xa08(%rsp)
movl %eax, 0xa04(%rsp)
movl %eax, 0xa00(%rsp)
movl 0xa04(%rsp), %edx
movl 0xa08(%rsp), %ecx
movl 0xa0c(%rsp), %eax
movd 0xa00(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0x9f0(%rsp)
movaps 0x9f0(%rsp), %xmm0
movaps %xmm0, 0x190(%rsp)
movq 0x1a0(%rsp), %rax
movl 0x80(%rax), %eax
negl %eax
movl %eax, 0x7a8(%rsp)
movl 0x7a8(%rsp), %eax
movl %eax, 0xa2c(%rsp)
movl %eax, 0xa28(%rsp)
movl %eax, 0xa24(%rsp)
movl %eax, 0xa20(%rsp)
movl 0xa24(%rsp), %edx
movl 0xa28(%rsp), %ecx
movl 0xa2c(%rsp), %eax
movd 0xa20(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xa10(%rsp)
movaps 0xa10(%rsp), %xmm0
movaps %xmm0, 0x180(%rsp)
movq 0x1a0(%rsp), %rax
movl 0xc0(%rax), %eax
movl %eax, 0x7a4(%rsp)
movl 0x7a4(%rsp), %eax
movl %eax, 0xa4c(%rsp)
movl %eax, 0xa48(%rsp)
movl %eax, 0xa44(%rsp)
movl %eax, 0xa40(%rsp)
movl 0xa44(%rsp), %edx
movl 0xa48(%rsp), %ecx
movl 0xa4c(%rsp), %eax
movd 0xa40(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xa30(%rsp)
movaps 0xa30(%rsp), %xmm0
movaps %xmm0, 0x170(%rsp)
movq 0x1a0(%rsp), %rax
movl 0x40(%rax), %eax
movl %eax, 0x7a0(%rsp)
movl 0x7a0(%rsp), %eax
movl %eax, 0xa6c(%rsp)
movl %eax, 0xa68(%rsp)
movl %eax, 0xa64(%rsp)
movl %eax, 0xa60(%rsp)
movl 0xa64(%rsp), %edx
movl 0xa68(%rsp), %ecx
movl 0xa6c(%rsp), %eax
movd 0xa60(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xa50(%rsp)
movaps 0xa50(%rsp), %xmm0
movaps %xmm0, 0x160(%rsp)
movq 0x1a0(%rsp), %rax
movl 0xe0(%rax), %eax
movl %eax, 0x79c(%rsp)
movl 0x79c(%rsp), %eax
movl %eax, 0xa8c(%rsp)
movl %eax, 0xa88(%rsp)
movl %eax, 0xa84(%rsp)
movl %eax, 0xa80(%rsp)
movl 0xa84(%rsp), %edx
movl 0xa88(%rsp), %ecx
movl 0xa8c(%rsp), %eax
movd 0xa80(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xa70(%rsp)
movaps 0xa70(%rsp), %xmm0
movaps %xmm0, 0x150(%rsp)
movq 0x1a0(%rsp), %rax
movl 0x20(%rax), %eax
movl %eax, 0x798(%rsp)
movl 0x798(%rsp), %eax
movl %eax, 0xaac(%rsp)
movl %eax, 0xaa8(%rsp)
movl %eax, 0xaa4(%rsp)
movl %eax, 0xaa0(%rsp)
movl 0xaa4(%rsp), %edx
movl 0xaa8(%rsp), %ecx
movl 0xaac(%rsp), %eax
movd 0xaa0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xa90(%rsp)
movaps 0xa90(%rsp), %xmm0
movaps %xmm0, 0x140(%rsp)
movq 0x1a0(%rsp), %rax
movl 0x60(%rax), %eax
movl %eax, 0x794(%rsp)
movl 0x794(%rsp), %eax
movl %eax, 0xacc(%rsp)
movl %eax, 0xac8(%rsp)
movl %eax, 0xac4(%rsp)
movl %eax, 0xac0(%rsp)
movl 0xac4(%rsp), %edx
movl 0xac8(%rsp), %ecx
movl 0xacc(%rsp), %eax
movd 0xac0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xab0(%rsp)
movaps 0xab0(%rsp), %xmm0
movaps %xmm0, 0x130(%rsp)
movq 0x1a0(%rsp), %rax
movl 0xa0(%rax), %eax
movl %eax, 0x790(%rsp)
movl 0x790(%rsp), %eax
movl %eax, 0xaec(%rsp)
movl %eax, 0xae8(%rsp)
movl %eax, 0xae4(%rsp)
movl %eax, 0xae0(%rsp)
movl 0xae4(%rsp), %edx
movl 0xae8(%rsp), %ecx
movl 0xaec(%rsp), %eax
movd 0xae0(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movaps %xmm0, 0xad0(%rsp)
movaps 0xad0(%rsp), %xmm0
movaps %xmm0, 0x120(%rsp)
movb 0x1ac(%rsp), %cl
decb %cl
movl $0x1, %eax
shll %cl, %eax
movl %eax, 0x78c(%rsp)
movl 0x78c(%rsp), %eax
movl %eax, 0xb14(%rsp)
movl %eax, 0xb10(%rsp)
movl %eax, 0xb0c(%rsp)
movl %eax, 0xb08(%rsp)
movl 0xb0c(%rsp), %edx
movl 0xb10(%rsp), %ecx
movl 0xb14(%rsp), %eax
movd 0xb08(%rsp), %xmm0
pinsrd $0x1, %edx, %xmm0
pinsrd $0x2, %ecx, %xmm0
pinsrd $0x3, %eax, %xmm0
movdqa %xmm0, 0xaf0(%rsp)
movdqa 0xaf0(%rsp), %xmm0
movdqa %xmm0, 0x110(%rsp)
imull $0x0, 0x1a8(%rsp), %eax
movl %eax, 0xc(%rsp)
imull $0x7, 0x1a8(%rsp), %eax
movl %eax, 0x8(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x490(%rsp)
movdqa %xmm0, 0x480(%rsp)
movdqa 0x490(%rsp), %xmm0
movdqa 0x480(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x630(%rsp)
movdqa %xmm0, 0x620(%rsp)
movdqa 0x630(%rsp), %xmm0
movdqa 0x620(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x80(%rsp)
movl 0x1a8(%rsp), %eax
addl 0xc(%rsp), %eax
movl %eax, 0xc(%rsp)
movl 0x1a8(%rsp), %ecx
movl 0x8(%rsp), %eax
subl %ecx, %eax
movl %eax, 0x8(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x470(%rsp)
movdqa %xmm0, 0x460(%rsp)
movdqa 0x470(%rsp), %xmm0
movdqa 0x460(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x610(%rsp)
movdqa %xmm0, 0x600(%rsp)
movdqa 0x610(%rsp), %xmm0
movdqa 0x600(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xf0(%rsp)
movl 0x1a8(%rsp), %eax
addl 0xc(%rsp), %eax
movl %eax, 0xc(%rsp)
movl 0x1a8(%rsp), %ecx
movl 0x8(%rsp), %eax
subl %ecx, %eax
movl %eax, 0x8(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x450(%rsp)
movdqa %xmm0, 0x440(%rsp)
movdqa 0x450(%rsp), %xmm0
movdqa 0x440(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xb0(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x5f0(%rsp)
movdqa %xmm0, 0x5e0(%rsp)
movdqa 0x5f0(%rsp), %xmm0
movdqa 0x5e0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movl 0x1a8(%rsp), %eax
addl 0xc(%rsp), %eax
movl %eax, 0xc(%rsp)
movl 0x1a8(%rsp), %ecx
movl 0x8(%rsp), %eax
subl %ecx, %eax
movl %eax, 0x8(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x430(%rsp)
movdqa %xmm0, 0x420(%rsp)
movdqa 0x430(%rsp), %xmm0
movdqa 0x420(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xc0(%rsp)
movq 0x1b8(%rsp), %rax
movslq 0xc(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq 0x1b8(%rsp), %rax
movslq 0x8(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x5d0(%rsp)
movdqa %xmm0, 0x5c0(%rsp)
movdqa 0x5d0(%rsp), %xmm0
movdqa 0x5c0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x50(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm1, 0x410(%rsp)
movdqa %xmm0, 0x400(%rsp)
movdqa 0x410(%rsp), %xmm0
movdqa 0x400(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm1, 0x5b0(%rsp)
movdqa %xmm0, 0x5a0(%rsp)
movdqa 0x5b0(%rsp), %xmm0
movdqa 0x5a0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x40(%rsp)
movdqa 0xa0(%rsp), %xmm1
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm1, 0x3f0(%rsp)
movdqa %xmm0, 0x3e0(%rsp)
movdqa 0x3f0(%rsp), %xmm0
movdqa 0x3e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0xa0(%rsp), %xmm1
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm1, 0x590(%rsp)
movdqa %xmm0, 0x580(%rsp)
movdqa 0x590(%rsp), %xmm0
movdqa 0x580(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x30(%rsp)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x180(%rsp), %xmm0
movdqa %xmm1, 0x9e0(%rsp)
movdqa %xmm0, 0x9d0(%rsp)
movdqa 0x9e0(%rsp), %xmm0
movdqa 0x9d0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x190(%rsp), %xmm0
movdqa %xmm1, 0x9c0(%rsp)
movdqa %xmm0, 0x9b0(%rsp)
movdqa 0x9c0(%rsp), %xmm0
movdqa 0x9b0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0x3d0(%rsp)
movdqa %xmm0, 0x3c0(%rsp)
movdqa 0x3d0(%rsp), %xmm0
movdqa 0x3c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x60(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x3b0(%rsp)
movdqa %xmm0, 0x3a0(%rsp)
movdqa 0x3b0(%rsp), %xmm0
movdqa 0x3a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x60(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x770(%rsp)
movl %eax, 0x76c(%rsp)
movdqa 0x770(%rsp), %xmm0
movl 0x76c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x190(%rsp), %xmm0
movdqa %xmm1, 0x9a0(%rsp)
movdqa %xmm0, 0x990(%rsp)
movdqa 0x9a0(%rsp), %xmm0
movdqa 0x990(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x180(%rsp), %xmm0
movdqa %xmm1, 0x980(%rsp)
movdqa %xmm0, 0x970(%rsp)
movdqa 0x980(%rsp), %xmm0
movdqa 0x970(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0x570(%rsp)
movdqa %xmm0, 0x560(%rsp)
movdqa 0x570(%rsp), %xmm0
movdqa 0x560(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x70(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x390(%rsp)
movdqa %xmm0, 0x380(%rsp)
movdqa 0x390(%rsp), %xmm0
movdqa 0x380(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x70(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x750(%rsp)
movl %eax, 0x74c(%rsp)
movdqa 0x750(%rsp), %xmm0
movl 0x74c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x70(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x190(%rsp), %xmm0
movdqa %xmm1, 0x960(%rsp)
movdqa %xmm0, 0x950(%rsp)
movdqa 0x960(%rsp), %xmm0
movdqa 0x950(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x190(%rsp), %xmm0
movdqa %xmm1, 0x940(%rsp)
movdqa %xmm0, 0x930(%rsp)
movdqa 0x940(%rsp), %xmm0
movdqa 0x930(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm0
movdqa %xmm1, 0x370(%rsp)
movdqa %xmm0, 0x360(%rsp)
movdqa 0x370(%rsp), %xmm0
movdqa 0x360(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x90(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x350(%rsp)
movdqa %xmm0, 0x340(%rsp)
movdqa 0x350(%rsp), %xmm0
movdqa 0x340(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x90(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x730(%rsp)
movl %eax, 0x72c(%rsp)
movdqa 0x730(%rsp), %xmm0
movl 0x72c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0x90(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm0
movdqa %xmm1, 0x550(%rsp)
movdqa %xmm0, 0x540(%rsp)
movdqa 0x550(%rsp), %xmm0
movdqa 0x540(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0xa0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x330(%rsp)
movdqa %xmm0, 0x320(%rsp)
movdqa 0x330(%rsp), %xmm0
movdqa 0x320(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0xa0(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x710(%rsp)
movl %eax, 0x70c(%rsp)
movdqa 0x710(%rsp), %xmm0
movl 0x70c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xa0(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x170(%rsp), %xmm0
movdqa %xmm1, 0x920(%rsp)
movdqa %xmm0, 0x910(%rsp)
movdqa 0x920(%rsp), %xmm0
movdqa 0x910(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x160(%rsp), %xmm0
movdqa %xmm1, 0x900(%rsp)
movdqa %xmm0, 0x8f0(%rsp)
movdqa 0x900(%rsp), %xmm0
movdqa 0x8f0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm0
movdqa %xmm1, 0x310(%rsp)
movdqa %xmm0, 0x300(%rsp)
movdqa 0x310(%rsp), %xmm0
movdqa 0x300(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xb0(%rsp)
movdqa 0xb0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x2f0(%rsp)
movdqa %xmm0, 0x2e0(%rsp)
movdqa 0x2f0(%rsp), %xmm0
movdqa 0x2e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xb0(%rsp)
movdqa 0xb0(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x6f0(%rsp)
movl %eax, 0x6ec(%rsp)
movdqa 0x6f0(%rsp), %xmm0
movl 0x6ec(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xb0(%rsp)
movdqa 0x30(%rsp), %xmm1
movdqa 0x160(%rsp), %xmm0
movdqa %xmm1, 0x8e0(%rsp)
movdqa %xmm0, 0x8d0(%rsp)
movdqa 0x8e0(%rsp), %xmm0
movdqa 0x8d0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x40(%rsp), %xmm1
movdqa 0x170(%rsp), %xmm0
movdqa %xmm1, 0x8c0(%rsp)
movdqa %xmm0, 0x8b0(%rsp)
movdqa 0x8c0(%rsp), %xmm0
movdqa 0x8b0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x530(%rsp)
movdqa %xmm0, 0x520(%rsp)
movdqa 0x530(%rsp), %xmm0
movdqa 0x520(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xc0(%rsp)
movdqa 0xc0(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x2d0(%rsp)
movdqa %xmm0, 0x2c0(%rsp)
movdqa 0x2d0(%rsp), %xmm0
movdqa 0x2c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xc0(%rsp)
movdqa 0xc0(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x6d0(%rsp)
movl %eax, 0x6cc(%rsp)
movdqa 0x6d0(%rsp), %xmm0
movl 0x6cc(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, 0xc0(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0x60(%rsp), %xmm0
movdqa %xmm1, 0x2b0(%rsp)
movdqa %xmm0, 0x2a0(%rsp)
movdqa 0x2b0(%rsp), %xmm0
movdqa 0x2a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0xd0(%rsp)
movdqa 0x50(%rsp), %xmm1
movdqa 0x60(%rsp), %xmm0
movdqa %xmm1, 0x510(%rsp)
movdqa %xmm0, 0x500(%rsp)
movdqa 0x510(%rsp), %xmm0
movdqa 0x500(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xe0(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0x4f0(%rsp)
movdqa %xmm0, 0x4e0(%rsp)
movdqa 0x4f0(%rsp), %xmm0
movdqa 0x4e0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0xf0(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x70(%rsp), %xmm0
movdqa %xmm1, 0x290(%rsp)
movdqa %xmm0, 0x280(%rsp)
movdqa 0x290(%rsp), %xmm0
movdqa 0x280(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x100(%rsp)
movdqa 0xd0(%rsp), %xmm1
movdqa 0x150(%rsp), %xmm0
movdqa %xmm1, 0x8a0(%rsp)
movdqa %xmm0, 0x890(%rsp)
movdqa 0x8a0(%rsp), %xmm0
movdqa 0x890(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x100(%rsp), %xmm1
movdqa 0x140(%rsp), %xmm0
movdqa %xmm1, 0x880(%rsp)
movdqa %xmm0, 0x870(%rsp)
movdqa 0x880(%rsp), %xmm0
movdqa 0x870(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm0
movdqa %xmm1, 0x270(%rsp)
movdqa %xmm0, 0x260(%rsp)
movdqa 0x270(%rsp), %xmm0
movdqa 0x260(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x250(%rsp)
movdqa %xmm0, 0x240(%rsp)
movdqa 0x250(%rsp), %xmm0
movdqa 0x240(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x6b0(%rsp)
movl %eax, 0x6ac(%rsp)
movdqa 0x6b0(%rsp), %xmm0
movl 0x6ac(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq 0x1b0(%rsp), %rax
movl 0x1a8(%rsp), %ecx
shll $0x0, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xd0(%rsp), %xmm1
movdqa 0x140(%rsp), %xmm0
movdqa %xmm1, 0x860(%rsp)
movdqa %xmm0, 0x850(%rsp)
movdqa 0x860(%rsp), %xmm0
movdqa 0x850(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x100(%rsp), %xmm1
movdqa 0x150(%rsp), %xmm0
movdqa %xmm1, 0x840(%rsp)
movdqa %xmm0, 0x830(%rsp)
movdqa 0x840(%rsp), %xmm0
movdqa 0x830(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x4d0(%rsp)
movdqa %xmm0, 0x4c0(%rsp)
movdqa 0x4d0(%rsp), %xmm0
movdqa 0x4c0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x230(%rsp)
movdqa %xmm0, 0x220(%rsp)
movdqa 0x230(%rsp), %xmm0
movdqa 0x220(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x690(%rsp)
movl %eax, 0x68c(%rsp)
movdqa 0x690(%rsp), %xmm0
movl 0x68c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq 0x1b0(%rsp), %rax
imull $0x7, 0x1a8(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x130(%rsp), %xmm0
movdqa %xmm1, 0x820(%rsp)
movdqa %xmm0, 0x810(%rsp)
movdqa 0x820(%rsp), %xmm0
movdqa 0x810(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x800(%rsp)
movdqa %xmm0, 0x7f0(%rsp)
movdqa 0x800(%rsp), %xmm0
movdqa 0x7f0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm0
movdqa %xmm1, 0x210(%rsp)
movdqa %xmm0, 0x200(%rsp)
movdqa 0x210(%rsp), %xmm0
movdqa 0x200(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x1f0(%rsp)
movdqa %xmm0, 0x1e0(%rsp)
movdqa 0x1f0(%rsp), %xmm0
movdqa 0x1e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x670(%rsp)
movl %eax, 0x66c(%rsp)
movdqa 0x670(%rsp), %xmm0
movl 0x66c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq 0x1b0(%rsp), %rax
imull $0x5, 0x1a8(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xe0(%rsp), %xmm1
movdqa 0x120(%rsp), %xmm0
movdqa %xmm1, 0x7e0(%rsp)
movdqa %xmm0, 0x7d0(%rsp)
movdqa 0x7e0(%rsp), %xmm0
movdqa 0x7d0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0xf0(%rsp), %xmm1
movdqa 0x130(%rsp), %xmm0
movdqa %xmm1, 0x7c0(%rsp)
movdqa %xmm0, 0x7b0(%rsp)
movdqa 0x7c0(%rsp), %xmm0
movdqa 0x7b0(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x4b0(%rsp)
movdqa %xmm0, 0x4a0(%rsp)
movdqa 0x4b0(%rsp), %xmm0
movdqa 0x4a0(%rsp), %xmm1
psubd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x110(%rsp), %xmm0
movdqa %xmm1, 0x1d0(%rsp)
movdqa %xmm0, 0x1c0(%rsp)
movdqa 0x1d0(%rsp), %xmm0
movdqa 0x1c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm0
movl 0x1ac(%rsp), %eax
movdqa %xmm0, 0x650(%rsp)
movl %eax, 0x64c(%rsp)
movdqa 0x650(%rsp), %xmm0
movl 0x64c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movq 0x1b0(%rsp), %rax
imull $0x3, 0x1a8(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0x90(%rsp), %xmm0
movq 0x1b0(%rsp), %rax
imull $0x0, 0x1a8(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xa0(%rsp), %xmm0
movq 0x1b0(%rsp), %rax
movl 0x1a8(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xb0(%rsp), %xmm0
movq 0x1b0(%rsp), %rax
movl 0x1a8(%rsp), %ecx
shll %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movdqa 0xc0(%rsp), %xmm0
movq 0x1b0(%rsp), %rax
imull $0x6, 0x1a8(%rsp), %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
addq $0xb18, %rsp # imm = 0xB18
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
convert_8x8_to_16x16 | static inline void convert_8x8_to_16x16(const __m128i *in, __m128i *out) {
int row_index = 0;
int dst_index = 0;
int src_index = 0;
// row 0, 1, .., 7
do {
out[dst_index] = in[src_index];
out[dst_index + 1] = in[src_index + 1];
out[dst_index + 2] = in[src_index + 16];
out[dst_index + 3] = in[src_index + 17];
dst_index += 4;
src_index += 2;
row_index += 1;
} while (row_index < 8);
// row 8, 9, ..., 15
src_index += 16;
do {
out[dst_index] = in[src_index];
out[dst_index + 1] = in[src_index + 1];
out[dst_index + 2] = in[src_index + 16];
out[dst_index + 3] = in[src_index + 17];
dst_index += 4;
src_index += 2;
row_index += 1;
} while (row_index < 16);
} | movq %rdi, -0x8(%rsp)
movq %rsi, -0x10(%rsp)
movl $0x0, -0x14(%rsp)
movl $0x0, -0x18(%rsp)
movl $0x0, -0x1c(%rsp)
movq -0x8(%rsp), %rax
movslq -0x1c(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movslq -0x18(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x8(%rsp), %rax
movl -0x1c(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movl -0x18(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x8(%rsp), %rax
movl -0x1c(%rsp), %ecx
addl $0x10, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movl -0x18(%rsp), %ecx
addl $0x2, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x8(%rsp), %rax
movl -0x1c(%rsp), %ecx
addl $0x11, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movl -0x18(%rsp), %ecx
addl $0x3, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl -0x18(%rsp), %eax
addl $0x4, %eax
movl %eax, -0x18(%rsp)
movl -0x1c(%rsp), %eax
addl $0x2, %eax
movl %eax, -0x1c(%rsp)
movl -0x14(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x14(%rsp)
cmpl $0x8, -0x14(%rsp)
jl 0x8057c2
movl -0x1c(%rsp), %eax
addl $0x10, %eax
movl %eax, -0x1c(%rsp)
movq -0x8(%rsp), %rax
movslq -0x1c(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movslq -0x18(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x8(%rsp), %rax
movl -0x1c(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movl -0x18(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x8(%rsp), %rax
movl -0x1c(%rsp), %ecx
addl $0x10, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movl -0x18(%rsp), %ecx
addl $0x2, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x8(%rsp), %rax
movl -0x1c(%rsp), %ecx
addl $0x11, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movq -0x10(%rsp), %rax
movl -0x18(%rsp), %ecx
addl $0x3, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl -0x18(%rsp), %eax
addl $0x4, %eax
movl %eax, -0x18(%rsp)
movl -0x1c(%rsp), %eax
addl $0x2, %eax
movl %eax, -0x1c(%rsp)
movl -0x14(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x14(%rsp)
cmpl $0x10, -0x14(%rsp)
jl 0x8058bf
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
half_btf_sse4_1 | static inline __m128i half_btf_sse4_1(const __m128i *w0, const __m128i *n0,
const __m128i *w1, const __m128i *n1,
const __m128i *rounding, int bit) {
__m128i x, y;
x = _mm_mullo_epi32(*w0, *n0);
y = _mm_mullo_epi32(*w1, *n1);
x = _mm_add_epi32(x, y);
x = _mm_add_epi32(x, *rounding);
x = _mm_srai_epi32(x, bit);
return x;
} | subq $0x78, %rsp
movq %rdi, -0x38(%rsp)
movq %rsi, -0x40(%rsp)
movq %rdx, -0x48(%rsp)
movq %rcx, -0x50(%rsp)
movq %r8, -0x58(%rsp)
movl %r9d, -0x5c(%rsp)
movq -0x38(%rsp), %rax
movdqa (%rax), %xmm1
movq -0x40(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x60(%rsp)
movdqa %xmm0, 0x50(%rsp)
movdqa 0x60(%rsp), %xmm0
movdqa 0x50(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, -0x70(%rsp)
movq -0x48(%rsp), %rax
movdqa (%rax), %xmm1
movq -0x50(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x40(%rsp)
movdqa %xmm0, 0x30(%rsp)
movdqa 0x40(%rsp), %xmm0
movdqa 0x30(%rsp), %xmm1
pmulld %xmm1, %xmm0
movdqa %xmm0, -0x80(%rsp)
movdqa -0x70(%rsp), %xmm1
movdqa -0x80(%rsp), %xmm0
movdqa %xmm1, (%rsp)
movdqa %xmm0, -0x10(%rsp)
movdqa (%rsp), %xmm0
movdqa -0x10(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, -0x70(%rsp)
movdqa -0x70(%rsp), %xmm1
movq -0x58(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm1, -0x20(%rsp)
movdqa %xmm0, -0x30(%rsp)
movdqa -0x20(%rsp), %xmm0
movdqa -0x30(%rsp), %xmm1
paddd %xmm1, %xmm0
movdqa %xmm0, -0x70(%rsp)
movdqa -0x70(%rsp), %xmm0
movl -0x5c(%rsp), %eax
movdqa %xmm0, 0x20(%rsp)
movl %eax, 0x1c(%rsp)
movdqa 0x20(%rsp), %xmm0
movl 0x1c(%rsp), %eax
movd %eax, %xmm1
psrad %xmm1, %xmm0
movdqa %xmm0, -0x70(%rsp)
movdqa -0x70(%rsp), %xmm0
addq $0x78, %rsp
retq
| /m-ab-s[P]aom/av1/common/x86/highbd_txfm_utility_sse4.h |
idtx32x8_sse4_1 | static void idtx32x8_sse4_1(__m128i *in, __m128i *out, int bit, int col_num) {
(void)bit;
(void)col_num;
for (int j = 0; j < 2; j++) {
out[j + 8 * 0] = _mm_add_epi32(in[j + 8 * 0], in[j + 8 * 0]);
out[j + 8 * 1] = _mm_add_epi32(in[j + 8 * 1], in[j + 8 * 1]);
out[j + 8 * 2] = _mm_add_epi32(in[j + 8 * 2], in[j + 8 * 2]);
out[j + 8 * 3] = _mm_add_epi32(in[j + 8 * 3], in[j + 8 * 3]);
out[j + 8 * 4] = _mm_add_epi32(in[j + 8 * 4], in[j + 8 * 4]);
out[j + 8 * 5] = _mm_add_epi32(in[j + 8 * 5], in[j + 8 * 5]);
out[j + 8 * 6] = _mm_add_epi32(in[j + 8 * 6], in[j + 8 * 6]);
out[j + 8 * 7] = _mm_add_epi32(in[j + 8 * 7], in[j + 8 * 7]);
}
} | subq $0xa8, %rsp
movq %rdi, -0x68(%rsp)
movq %rsi, -0x70(%rsp)
movl %edx, -0x74(%rsp)
movl %ecx, -0x78(%rsp)
movl $0x0, -0x7c(%rsp)
cmpl $0x2, -0x7c(%rsp)
jge 0x805f46
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x0, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x0, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x90(%rsp)
movdqa %xmm0, 0x80(%rsp)
movdqa 0x90(%rsp), %xmm0
movdqa 0x80(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x0, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x8, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x8, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x70(%rsp)
movdqa %xmm0, 0x60(%rsp)
movdqa 0x70(%rsp), %xmm0
movdqa 0x60(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x8, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x10, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x10, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x50(%rsp)
movdqa %xmm0, 0x40(%rsp)
movdqa 0x50(%rsp), %xmm0
movdqa 0x40(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x10, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x18, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x18, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x30(%rsp)
movdqa %xmm0, 0x20(%rsp)
movdqa 0x30(%rsp), %xmm0
movdqa 0x20(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x18, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x20, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x20, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, 0x10(%rsp)
movdqa %xmm0, (%rsp)
movdqa 0x10(%rsp), %xmm0
movdqa (%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x20, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x28, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x28, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, -0x10(%rsp)
movdqa %xmm0, -0x20(%rsp)
movdqa -0x10(%rsp), %xmm0
movdqa -0x20(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x28, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x30, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x30, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, -0x30(%rsp)
movdqa %xmm0, -0x40(%rsp)
movdqa -0x30(%rsp), %xmm0
movdqa -0x40(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x30, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x38, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm1
movq -0x68(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x38, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa (%rax), %xmm0
movdqa %xmm1, -0x50(%rsp)
movdqa %xmm0, -0x60(%rsp)
movdqa -0x50(%rsp), %xmm0
movdqa -0x60(%rsp), %xmm1
paddd %xmm1, %xmm0
movq -0x70(%rsp), %rax
movl -0x7c(%rsp), %ecx
addl $0x38, %ecx
movslq %ecx, %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movdqa %xmm0, (%rax)
movl -0x7c(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x7c(%rsp)
jmp 0x805bd1
addq $0xa8, %rsp
retq
nop
| /m-ab-s[P]aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c |
av1_get_horver_correlation_full_sse4_1 | void av1_get_horver_correlation_full_sse4_1(const int16_t *diff, int stride,
int width, int height, float *hcorr,
float *vcorr) {
// The following notation is used:
// x - current pixel
// y - right neighbour pixel
// z - below neighbour pixel
// w - down-right neighbour pixel
int64_t xy_sum = 0, xz_sum = 0;
int64_t x_sum = 0, x2_sum = 0;
// Process horizontal and vertical correlations through the body in 4x4
// blocks. This excludes the final row and column and possibly one extra
// column depending how 3 divides into width and height
int32_t xy_tmp[4] = { 0 }, xz_tmp[4] = { 0 };
int32_t x_tmp[4] = { 0 }, x2_tmp[4] = { 0 };
__m128i xy_sum_32 = _mm_setzero_si128();
__m128i xz_sum_32 = _mm_setzero_si128();
__m128i x_sum_32 = _mm_setzero_si128();
__m128i x2_sum_32 = _mm_setzero_si128();
for (int i = 0; i <= height - 4; i += 3) {
for (int j = 0; j <= width - 4; j += 3) {
horver_correlation_4x4(&diff[i * stride + j], stride, &xy_sum_32,
&xz_sum_32, &x_sum_32, &x2_sum_32);
}
xx_storeu_128(xy_tmp, xy_sum_32);
xx_storeu_128(xz_tmp, xz_sum_32);
xx_storeu_128(x_tmp, x_sum_32);
xx_storeu_128(x2_tmp, x2_sum_32);
xy_sum += (int64_t)xy_tmp[3] + xy_tmp[2] + xy_tmp[1];
xz_sum += (int64_t)xz_tmp[3] + xz_tmp[2] + xz_tmp[0];
x_sum += (int64_t)x_tmp[3] + x_tmp[2] + x_tmp[1] + x_tmp[0];
x2_sum += (int64_t)x2_tmp[2] + x2_tmp[1] + x2_tmp[0];
xy_sum_32 = _mm_setzero_si128();
xz_sum_32 = _mm_setzero_si128();
x_sum_32 = _mm_setzero_si128();
x2_sum_32 = _mm_setzero_si128();
}
// x_sum now covers every pixel except the final 1-2 rows and 1-2 cols
int64_t x_finalrow = 0, x_finalcol = 0, x2_finalrow = 0, x2_finalcol = 0;
// Do we have 2 rows remaining or just the one? Note that width and height
// are powers of 2, so each modulo 3 must be 1 or 2.
if (height % 3 == 1) { // Just horiz corrs on the final row
const int16_t x0 = diff[(height - 1) * stride];
x_sum += x0;
x_finalrow += x0;
x2_sum += x0 * x0;
x2_finalrow += x0 * x0;
for (int j = 0; j < width - 1; ++j) {
const int16_t x = diff[(height - 1) * stride + j];
const int16_t y = diff[(height - 1) * stride + j + 1];
xy_sum += x * y;
x_sum += y;
x2_sum += y * y;
x_finalrow += y;
x2_finalrow += y * y;
}
} else { // Two rows remaining to do
const int16_t x0 = diff[(height - 2) * stride];
const int16_t z0 = diff[(height - 1) * stride];
x_sum += x0 + z0;
x2_sum += x0 * x0 + z0 * z0;
x_finalrow += z0;
x2_finalrow += z0 * z0;
for (int j = 0; j < width - 1; ++j) {
const int16_t x = diff[(height - 2) * stride + j];
const int16_t y = diff[(height - 2) * stride + j + 1];
const int16_t z = diff[(height - 1) * stride + j];
const int16_t w = diff[(height - 1) * stride + j + 1];
// Horizontal and vertical correlations for the penultimate row:
xy_sum += x * y;
xz_sum += x * z;
// Now just horizontal correlations for the final row:
xy_sum += z * w;
x_sum += y + w;
x2_sum += y * y + w * w;
x_finalrow += w;
x2_finalrow += w * w;
}
}
// Do we have 2 columns remaining or just the one?
if (width % 3 == 1) { // Just vert corrs on the final col
const int16_t x0 = diff[width - 1];
x_sum += x0;
x_finalcol += x0;
x2_sum += x0 * x0;
x2_finalcol += x0 * x0;
for (int i = 0; i < height - 1; ++i) {
const int16_t x = diff[i * stride + width - 1];
const int16_t z = diff[(i + 1) * stride + width - 1];
xz_sum += x * z;
x_finalcol += z;
x2_finalcol += z * z;
// So the bottom-right elements don't get counted twice:
if (i < height - (height % 3 == 1 ? 2 : 3)) {
x_sum += z;
x2_sum += z * z;
}
}
} else { // Two cols remaining
const int16_t x0 = diff[width - 2];
const int16_t y0 = diff[width - 1];
x_sum += x0 + y0;
x2_sum += x0 * x0 + y0 * y0;
x_finalcol += y0;
x2_finalcol += y0 * y0;
for (int i = 0; i < height - 1; ++i) {
const int16_t x = diff[i * stride + width - 2];
const int16_t y = diff[i * stride + width - 1];
const int16_t z = diff[(i + 1) * stride + width - 2];
const int16_t w = diff[(i + 1) * stride + width - 1];
// Horizontal and vertical correlations for the penultimate col:
// Skip these on the last iteration of this loop if we also had two
// rows remaining, otherwise the final horizontal and vertical correlation
// get erroneously processed twice
if (i < height - 2 || height % 3 == 1) {
xy_sum += x * y;
xz_sum += x * z;
}
x_finalcol += w;
x2_finalcol += w * w;
// So the bottom-right elements don't get counted twice:
if (i < height - (height % 3 == 1 ? 2 : 3)) {
x_sum += z + w;
x2_sum += z * z + w * w;
}
// Now just vertical correlations for the final column:
xz_sum += y * w;
}
}
// Calculate the simple sums and squared-sums
int64_t x_firstrow = 0, x_firstcol = 0;
int64_t x2_firstrow = 0, x2_firstcol = 0;
for (int j = 0; j < width; ++j) {
x_firstrow += diff[j];
x2_firstrow += diff[j] * diff[j];
}
for (int i = 0; i < height; ++i) {
x_firstcol += diff[i * stride];
x2_firstcol += diff[i * stride] * diff[i * stride];
}
int64_t xhor_sum = x_sum - x_finalcol;
int64_t xver_sum = x_sum - x_finalrow;
int64_t y_sum = x_sum - x_firstcol;
int64_t z_sum = x_sum - x_firstrow;
int64_t x2hor_sum = x2_sum - x2_finalcol;
int64_t x2ver_sum = x2_sum - x2_finalrow;
int64_t y2_sum = x2_sum - x2_firstcol;
int64_t z2_sum = x2_sum - x2_firstrow;
const float num_hor = (float)(height * (width - 1));
const float num_ver = (float)((height - 1) * width);
const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;
if (xhor_var_n > 0 && y_var_n > 0) {
*hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
*hcorr = *hcorr < 0 ? 0 : *hcorr;
} else {
*hcorr = 1.0;
}
if (xver_var_n > 0 && z_var_n > 0) {
*vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
*vcorr = *vcorr < 0 ? 0 : *vcorr;
} else {
*vcorr = 1.0;
}
} | subq $0x258, %rsp # imm = 0x258
movq %rdi, 0x1c8(%rsp)
movl %esi, 0x1c4(%rsp)
movl %edx, 0x1c0(%rsp)
movl %ecx, 0x1bc(%rsp)
movq %r8, 0x1b0(%rsp)
movq %r9, 0x1a8(%rsp)
movq $0x0, 0x1a0(%rsp)
movq $0x0, 0x198(%rsp)
movq $0x0, 0x190(%rsp)
movq $0x0, 0x188(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x170(%rsp)
movaps %xmm0, 0x160(%rsp)
movaps %xmm0, 0x150(%rsp)
movaps %xmm0, 0x140(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x240(%rsp)
movaps 0x240(%rsp), %xmm1
movaps %xmm1, 0x130(%rsp)
movaps %xmm0, 0x230(%rsp)
movaps 0x230(%rsp), %xmm1
movaps %xmm1, 0x120(%rsp)
movaps %xmm0, 0x220(%rsp)
movaps 0x220(%rsp), %xmm1
movaps %xmm1, 0x110(%rsp)
movaps %xmm0, 0x210(%rsp)
movdqa 0x210(%rsp), %xmm0
movdqa %xmm0, 0x100(%rsp)
movl $0x0, 0xfc(%rsp)
movl 0xfc(%rsp), %eax
movl 0x1bc(%rsp), %ecx
subl $0x4, %ecx
cmpl %ecx, %eax
jg 0x806283
movl $0x0, 0xf8(%rsp)
movl 0xf8(%rsp), %eax
movl 0x1c0(%rsp), %ecx
subl $0x4, %ecx
cmpl %ecx, %eax
jg 0x8060e5
movq 0x1c8(%rsp), %rdi
movl 0xfc(%rsp), %eax
imull 0x1c4(%rsp), %eax
addl 0xf8(%rsp), %eax
cltq
shlq %rax
addq %rax, %rdi
movl 0x1c4(%rsp), %esi
leaq 0x130(%rsp), %rdx
leaq 0x120(%rsp), %rcx
leaq 0x110(%rsp), %r8
leaq 0x100(%rsp), %r9
callq 0x8071b0
movl 0xf8(%rsp), %eax
addl $0x3, %eax
movl %eax, 0xf8(%rsp)
jmp 0x80606b
movaps 0x130(%rsp), %xmm0
leaq 0x170(%rsp), %rdi
callq 0x8076b0
movaps 0x120(%rsp), %xmm0
leaq 0x160(%rsp), %rdi
callq 0x8076b0
movaps 0x110(%rsp), %xmm0
leaq 0x150(%rsp), %rdi
callq 0x8076b0
movaps 0x100(%rsp), %xmm0
leaq 0x140(%rsp), %rdi
callq 0x8076b0
movslq 0x17c(%rsp), %rcx
movslq 0x178(%rsp), %rax
addq %rax, %rcx
movslq 0x174(%rsp), %rax
addq %rax, %rcx
movq 0x1a0(%rsp), %rax
addq %rcx, %rax
movq %rax, 0x1a0(%rsp)
movslq 0x16c(%rsp), %rcx
movslq 0x168(%rsp), %rax
addq %rax, %rcx
movslq 0x160(%rsp), %rax
addq %rax, %rcx
movq 0x198(%rsp), %rax
addq %rcx, %rax
movq %rax, 0x198(%rsp)
movslq 0x15c(%rsp), %rcx
movslq 0x158(%rsp), %rax
addq %rax, %rcx
movslq 0x154(%rsp), %rax
addq %rax, %rcx
movslq 0x150(%rsp), %rax
addq %rax, %rcx
movq 0x190(%rsp), %rax
addq %rcx, %rax
movq %rax, 0x190(%rsp)
movslq 0x148(%rsp), %rcx
movslq 0x144(%rsp), %rax
addq %rax, %rcx
movslq 0x140(%rsp), %rax
addq %rax, %rcx
movq 0x188(%rsp), %rax
addq %rcx, %rax
movq %rax, 0x188(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x200(%rsp)
movaps 0x200(%rsp), %xmm1
movaps %xmm1, 0x130(%rsp)
movaps %xmm0, 0x1f0(%rsp)
movaps 0x1f0(%rsp), %xmm1
movaps %xmm1, 0x120(%rsp)
movaps %xmm0, 0x1e0(%rsp)
movaps 0x1e0(%rsp), %xmm1
movaps %xmm1, 0x110(%rsp)
movaps %xmm0, 0x1d0(%rsp)
movdqa 0x1d0(%rsp), %xmm0
movdqa %xmm0, 0x100(%rsp)
movl 0xfc(%rsp), %eax
addl $0x3, %eax
movl %eax, 0xfc(%rsp)
jmp 0x806047
movq $0x0, 0xf0(%rsp)
movq $0x0, 0xe8(%rsp)
movq $0x0, 0xe0(%rsp)
movq $0x0, 0xd8(%rsp)
movl 0x1bc(%rsp), %eax
movl $0x3, %ecx
cltd
idivl %ecx
cmpl $0x1, %edx
jne 0x8064b3
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xd6(%rsp)
movswq 0xd6(%rsp), %rax
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswq 0xd6(%rsp), %rax
addq 0xf0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movswl 0xd6(%rsp), %eax
movswl 0xd6(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswl 0xd6(%rsp), %eax
movswl 0xd6(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xe0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x0, 0xd0(%rsp)
movl 0xd0(%rsp), %eax
movl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
cmpl %ecx, %eax
jge 0x8064ae
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0xd0(%rsp), %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xce(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0xd0(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xcc(%rsp)
movswl 0xce(%rsp), %eax
movswl 0xcc(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x1a0(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movswq 0xcc(%rsp), %rax
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswl 0xcc(%rsp), %eax
movswl 0xcc(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswq 0xcc(%rsp), %rax
addq 0xf0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movswl 0xcc(%rsp), %eax
movswl 0xcc(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xe0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0xd0(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xd0(%rsp)
jmp 0x80637b
jmp 0x8067ae
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x2, %ecx
imull 0x1c4(%rsp), %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xca(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xc8(%rsp)
movswl 0xca(%rsp), %eax
movswl 0xc8(%rsp), %ecx
addl %ecx, %eax
cltq
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswl 0xca(%rsp), %eax
movswl 0xca(%rsp), %ecx
imull %ecx, %eax
movswl 0xc8(%rsp), %ecx
movswl 0xc8(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswq 0xc8(%rsp), %rax
addq 0xf0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movswl 0xc8(%rsp), %eax
movswl 0xc8(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xe0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x0, 0xc4(%rsp)
movl 0xc4(%rsp), %eax
movl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
cmpl %ecx, %eax
jge 0x8067ac
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x2, %ecx
imull 0x1c4(%rsp), %ecx
addl 0xc4(%rsp), %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xc2(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x2, %ecx
imull 0x1c4(%rsp), %ecx
addl 0xc4(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xc0(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0xc4(%rsp), %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xbe(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0xc4(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xbc(%rsp)
movswl 0xc2(%rsp), %eax
movswl 0xc0(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x1a0(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movswl 0xc2(%rsp), %eax
movswl 0xbe(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x198(%rsp), %rax
movq %rax, 0x198(%rsp)
movswl 0xbe(%rsp), %eax
movswl 0xbc(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x1a0(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movswl 0xc0(%rsp), %eax
movswl 0xbc(%rsp), %ecx
addl %ecx, %eax
cltq
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswl 0xc0(%rsp), %eax
movswl 0xc0(%rsp), %ecx
imull %ecx, %eax
movswl 0xbc(%rsp), %ecx
movswl 0xbc(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswq 0xbc(%rsp), %rax
addq 0xf0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movswl 0xbc(%rsp), %eax
movswl 0xbc(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xe0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl 0xc4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xc4(%rsp)
jmp 0x8065ac
jmp 0x8067ae
movl 0x1c0(%rsp), %eax
movl $0x3, %ecx
cltd
idivl %ecx
cmpl $0x1, %edx
jne 0x8069e5
movq 0x1c8(%rsp), %rax
movl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xba(%rsp)
movswq 0xba(%rsp), %rax
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswq 0xba(%rsp), %rax
addq 0xe8(%rsp), %rax
movq %rax, 0xe8(%rsp)
movswl 0xba(%rsp), %eax
movswl 0xba(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswl 0xba(%rsp), %eax
movswl 0xba(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xd8(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl $0x0, 0xb4(%rsp)
movl 0xb4(%rsp), %eax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
cmpl %ecx, %eax
jge 0x8069e0
movq 0x1c8(%rsp), %rax
movl 0xb4(%rsp), %ecx
imull 0x1c4(%rsp), %ecx
addl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xb2(%rsp)
movq 0x1c8(%rsp), %rax
movl 0xb4(%rsp), %ecx
addl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xb0(%rsp)
movswl 0xb2(%rsp), %eax
movswl 0xb0(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x198(%rsp), %rax
movq %rax, 0x198(%rsp)
movswq 0xb0(%rsp), %rax
addq 0xe8(%rsp), %rax
movq %rax, 0xe8(%rsp)
movswl 0xb0(%rsp), %eax
movswl 0xb0(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xd8(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl 0xb4(%rsp), %eax
movl %eax, 0x14(%rsp)
movl 0x1bc(%rsp), %ecx
movl 0x1bc(%rsp), %eax
movl $0x3, %esi
cltd
idivl %esi
movl 0x14(%rsp), %eax
movl %edx, %edi
movl $0x3, %edx
movl $0x2, %esi
cmpl $0x1, %edi
cmovel %esi, %edx
subl %edx, %ecx
cmpl %ecx, %eax
jge 0x8069c8
movswq 0xb0(%rsp), %rax
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswl 0xb0(%rsp), %eax
movswl 0xb0(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
jmp 0x8069ca
movl 0xb4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xb4(%rsp)
jmp 0x80686e
jmp 0x806d36
movq 0x1c8(%rsp), %rax
movl 0x1c0(%rsp), %ecx
subl $0x2, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xae(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xac(%rsp)
movswl 0xae(%rsp), %eax
movswl 0xac(%rsp), %ecx
addl %ecx, %eax
cltq
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswl 0xae(%rsp), %eax
movswl 0xae(%rsp), %ecx
imull %ecx, %eax
movswl 0xac(%rsp), %ecx
movswl 0xac(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswq 0xac(%rsp), %rax
addq 0xe8(%rsp), %rax
movq %rax, 0xe8(%rsp)
movswl 0xac(%rsp), %eax
movswl 0xac(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xd8(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl $0x0, 0xa8(%rsp)
movl 0xa8(%rsp), %eax
movl 0x1bc(%rsp), %ecx
subl $0x1, %ecx
cmpl %ecx, %eax
jge 0x806d34
movq 0x1c8(%rsp), %rax
movl 0xa8(%rsp), %ecx
imull 0x1c4(%rsp), %ecx
addl 0x1c0(%rsp), %ecx
subl $0x2, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xa6(%rsp)
movq 0x1c8(%rsp), %rax
movl 0xa8(%rsp), %ecx
imull 0x1c4(%rsp), %ecx
addl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xa4(%rsp)
movq 0x1c8(%rsp), %rax
movl 0xa8(%rsp), %ecx
addl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0x1c0(%rsp), %ecx
subl $0x2, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xa2(%rsp)
movq 0x1c8(%rsp), %rax
movl 0xa8(%rsp), %ecx
addl $0x1, %ecx
imull 0x1c4(%rsp), %ecx
addl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
movslq %ecx, %rcx
movw (%rax,%rcx,2), %ax
movw %ax, 0xa0(%rsp)
movl 0xa8(%rsp), %eax
movl 0x1bc(%rsp), %ecx
subl $0x2, %ecx
cmpl %ecx, %eax
jl 0x806bd6
movl 0x1bc(%rsp), %eax
movl $0x3, %ecx
cltd
idivl %ecx
cmpl $0x1, %edx
jne 0x806c20
movswl 0xa6(%rsp), %eax
movswl 0xa4(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x1a0(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movswl 0xa6(%rsp), %eax
movswl 0xa2(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x198(%rsp), %rax
movq %rax, 0x198(%rsp)
movswq 0xa0(%rsp), %rax
addq 0xe8(%rsp), %rax
movq %rax, 0xe8(%rsp)
movswl 0xa0(%rsp), %eax
movswl 0xa0(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0xd8(%rsp), %rax
movq %rax, 0xd8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0x10(%rsp)
movl 0x1bc(%rsp), %ecx
movl 0x1bc(%rsp), %eax
movl $0x3, %esi
cltd
idivl %esi
movl 0x10(%rsp), %eax
movl %edx, %edi
movl $0x3, %edx
movl $0x2, %esi
cmpl $0x1, %edi
cmovel %esi, %edx
subl %edx, %ecx
cmpl %ecx, %eax
jge 0x806cf9
movswl 0xa2(%rsp), %eax
movswl 0xa0(%rsp), %ecx
addl %ecx, %eax
cltq
addq 0x190(%rsp), %rax
movq %rax, 0x190(%rsp)
movswl 0xa2(%rsp), %eax
movswl 0xa2(%rsp), %ecx
imull %ecx, %eax
movswl 0xa0(%rsp), %ecx
movswl 0xa0(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
cltq
addq 0x188(%rsp), %rax
movq %rax, 0x188(%rsp)
movswl 0xa4(%rsp), %eax
movswl 0xa0(%rsp), %ecx
imull %ecx, %eax
cltq
addq 0x198(%rsp), %rax
movq %rax, 0x198(%rsp)
movl 0xa8(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xa8(%rsp)
jmp 0x806ace
jmp 0x806d36
movq $0x0, 0x98(%rsp)
movq $0x0, 0x90(%rsp)
movq $0x0, 0x88(%rsp)
movq $0x0, 0x80(%rsp)
movl $0x0, 0x7c(%rsp)
movl 0x7c(%rsp), %eax
cmpl 0x1c0(%rsp), %eax
jge 0x806de1
movq 0x1c8(%rsp), %rax
movslq 0x7c(%rsp), %rcx
movswq (%rax,%rcx,2), %rax
addq 0x98(%rsp), %rax
movq %rax, 0x98(%rsp)
movq 0x1c8(%rsp), %rax
movslq 0x7c(%rsp), %rcx
movswl (%rax,%rcx,2), %eax
movq 0x1c8(%rsp), %rcx
movslq 0x7c(%rsp), %rdx
movswl (%rcx,%rdx,2), %ecx
imull %ecx, %eax
cltq
addq 0x88(%rsp), %rax
movq %rax, 0x88(%rsp)
movl 0x7c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x7c(%rsp)
jmp 0x806d6e
movl $0x0, 0x78(%rsp)
movl 0x78(%rsp), %eax
cmpl 0x1bc(%rsp), %eax
jge 0x806e81
movq 0x1c8(%rsp), %rax
movl 0x78(%rsp), %ecx
imull 0x1c4(%rsp), %ecx
movslq %ecx, %rcx
movswq (%rax,%rcx,2), %rax
addq 0x90(%rsp), %rax
movq %rax, 0x90(%rsp)
movq 0x1c8(%rsp), %rax
movl 0x78(%rsp), %ecx
imull 0x1c4(%rsp), %ecx
movslq %ecx, %rcx
movswl (%rax,%rcx,2), %eax
movq 0x1c8(%rsp), %rcx
movl 0x78(%rsp), %edx
imull 0x1c4(%rsp), %edx
movslq %edx, %rdx
movswl (%rcx,%rdx,2), %ecx
imull %ecx, %eax
cltq
addq 0x80(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0x78(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x78(%rsp)
jmp 0x806de9
movq 0x190(%rsp), %rax
subq 0xe8(%rsp), %rax
movq %rax, 0x70(%rsp)
movq 0x190(%rsp), %rax
subq 0xf0(%rsp), %rax
movq %rax, 0x68(%rsp)
movq 0x190(%rsp), %rax
subq 0x90(%rsp), %rax
movq %rax, 0x60(%rsp)
movq 0x190(%rsp), %rax
subq 0x98(%rsp), %rax
movq %rax, 0x58(%rsp)
movq 0x188(%rsp), %rax
subq 0xd8(%rsp), %rax
movq %rax, 0x50(%rsp)
movq 0x188(%rsp), %rax
subq 0xe0(%rsp), %rax
movq %rax, 0x48(%rsp)
movq 0x188(%rsp), %rax
subq 0x80(%rsp), %rax
movq %rax, 0x40(%rsp)
movq 0x188(%rsp), %rax
subq 0x88(%rsp), %rax
movq %rax, 0x38(%rsp)
movl 0x1bc(%rsp), %eax
movl 0x1c0(%rsp), %ecx
subl $0x1, %ecx
imull %ecx, %eax
cvtsi2ss %eax, %xmm0
movss %xmm0, 0x34(%rsp)
movl 0x1bc(%rsp), %eax
subl $0x1, %eax
imull 0x1c0(%rsp), %eax
cvtsi2ss %eax, %xmm0
movss %xmm0, 0x30(%rsp)
cvtsi2ssq 0x50(%rsp), %xmm0
movq 0x70(%rsp), %rax
imulq 0x70(%rsp), %rax
cvtsi2ss %rax, %xmm1
divss 0x34(%rsp), %xmm1
subss %xmm1, %xmm0
movss %xmm0, 0x2c(%rsp)
cvtsi2ssq 0x48(%rsp), %xmm0
movq 0x68(%rsp), %rax
imulq 0x68(%rsp), %rax
cvtsi2ss %rax, %xmm1
divss 0x30(%rsp), %xmm1
subss %xmm1, %xmm0
movss %xmm0, 0x28(%rsp)
cvtsi2ssq 0x40(%rsp), %xmm0
movq 0x60(%rsp), %rax
imulq 0x60(%rsp), %rax
cvtsi2ss %rax, %xmm1
divss 0x34(%rsp), %xmm1
subss %xmm1, %xmm0
movss %xmm0, 0x24(%rsp)
cvtsi2ssq 0x38(%rsp), %xmm0
movq 0x58(%rsp), %rax
imulq 0x58(%rsp), %rax
cvtsi2ss %rax, %xmm1
divss 0x30(%rsp), %xmm1
subss %xmm1, %xmm0
movss %xmm0, 0x20(%rsp)
cvtsi2ssq 0x1a0(%rsp), %xmm0
movq 0x70(%rsp), %rax
imulq 0x60(%rsp), %rax
cvtsi2ss %rax, %xmm1
divss 0x34(%rsp), %xmm1
subss %xmm1, %xmm0
movss %xmm0, 0x1c(%rsp)
cvtsi2ssq 0x198(%rsp), %xmm0
movq 0x68(%rsp), %rax
imulq 0x58(%rsp), %rax
cvtsi2ss %rax, %xmm1
divss 0x30(%rsp), %xmm1
subss %xmm1, %xmm0
movss %xmm0, 0x18(%rsp)
movss 0x2c(%rsp), %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jbe 0x8070ea
movss 0x24(%rsp), %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jbe 0x8070ea
movss 0x1c(%rsp), %xmm0
movss %xmm0, 0xc(%rsp)
movss 0x2c(%rsp), %xmm0
mulss 0x24(%rsp), %xmm0
callq 0x188f0
movaps %xmm0, %xmm1
movss 0xc(%rsp), %xmm0
divss %xmm1, %xmm0
movq 0x1b0(%rsp), %rax
movss %xmm0, (%rax)
movq 0x1b0(%rsp), %rax
xorps %xmm0, %xmm0
ucomiss (%rax), %xmm0
jbe 0x8070c4
xorps %xmm0, %xmm0
movss %xmm0, 0x8(%rsp)
jmp 0x8070d6
movq 0x1b0(%rsp), %rax
movss (%rax), %xmm0
movss %xmm0, 0x8(%rsp)
movss 0x8(%rsp), %xmm0
movq 0x1b0(%rsp), %rax
movss %xmm0, (%rax)
jmp 0x8070fe
movq 0x1b0(%rsp), %rax
movss 0x27c3be(%rip), %xmm0 # 0xa834b8
movss %xmm0, (%rax)
movss 0x28(%rsp), %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jbe 0x807192
movss 0x20(%rsp), %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jbe 0x807192
movss 0x18(%rsp), %xmm0
movss %xmm0, 0x4(%rsp)
movss 0x28(%rsp), %xmm0
mulss 0x20(%rsp), %xmm0
callq 0x188f0
movaps %xmm0, %xmm1
movss 0x4(%rsp), %xmm0
divss %xmm1, %xmm0
movq 0x1a8(%rsp), %rax
movss %xmm0, (%rax)
movq 0x1a8(%rsp), %rax
xorps %xmm0, %xmm0
ucomiss (%rax), %xmm0
jbe 0x80716e
xorps %xmm0, %xmm0
movss %xmm0, (%rsp)
jmp 0x80717f
movq 0x1a8(%rsp), %rax
movss (%rax), %xmm0
movss %xmm0, (%rsp)
movss (%rsp), %xmm0
movq 0x1a8(%rsp), %rax
movss %xmm0, (%rax)
jmp 0x8071a6
movq 0x1a8(%rsp), %rax
movss 0x27c316(%rip), %xmm0 # 0xa834b8
movss %xmm0, (%rax)
addq $0x258, %rsp # imm = 0x258
retq
nop
| /m-ab-s[P]aom/av1/encoder/x86/rdopt_sse4.c |
horver_correlation_4x4 | static inline void horver_correlation_4x4(const int16_t *diff, int stride,
__m128i *xy_sum_32,
__m128i *xz_sum_32, __m128i *x_sum_32,
__m128i *x2_sum_32) {
// Pixels in this 4x4 [ a b c d ]
// are referred to as: [ e f g h ]
// [ i j k l ]
// [ m n o p ]
const __m128i pixelsa = xx_loadu_2x64(&diff[0 * stride], &diff[2 * stride]);
const __m128i pixelsb = xx_loadu_2x64(&diff[1 * stride], &diff[3 * stride]);
// pixelsa = [d c b a l k j i] as i16
// pixelsb = [h g f e p o n m] as i16
const __m128i slli_a = _mm_slli_epi64(pixelsa, 16);
const __m128i slli_b = _mm_slli_epi64(pixelsb, 16);
// slli_a = [c b a 0 k j i 0] as i16
// slli_b = [g f e 0 o n m 0] as i16
const __m128i xy_madd_a = _mm_madd_epi16(pixelsa, slli_a);
const __m128i xy_madd_b = _mm_madd_epi16(pixelsb, slli_b);
// xy_madd_a = [bc+cd ab jk+kl ij] as i32
// xy_madd_b = [fg+gh ef no+op mn] as i32
const __m128i xy32 = _mm_hadd_epi32(xy_madd_b, xy_madd_a);
// xy32 = [ab+bc+cd ij+jk+kl ef+fg+gh mn+no+op] as i32
*xy_sum_32 = _mm_add_epi32(*xy_sum_32, xy32);
const __m128i xz_madd_a = _mm_madd_epi16(slli_a, slli_b);
// xz_madd_a = [bf+cg ae jn+ko im] i32
const __m128i swap_b = _mm_srli_si128(slli_b, 8);
// swap_b = [0 0 0 0 g f e 0] as i16
const __m128i xz_madd_b = _mm_madd_epi16(slli_a, swap_b);
// xz_madd_b = [0 0 gk+fj ei] i32
const __m128i xz32 = _mm_hadd_epi32(xz_madd_b, xz_madd_a);
// xz32 = [ae+bf+cg im+jn+ko 0 ei+fj+gk] i32
*xz_sum_32 = _mm_add_epi32(*xz_sum_32, xz32);
// Now calculate the straight sums, x_sum += a+b+c+e+f+g+i+j+k
// (sum up every element in slli_a and swap_b)
const __m128i sum_slli_a = _mm_hadd_epi16(slli_a, slli_a);
const __m128i sum_slli_a32 = _mm_cvtepi16_epi32(sum_slli_a);
// sum_slli_a32 = [c+b a k+j i] as i32
const __m128i swap_b32 = _mm_cvtepi16_epi32(swap_b);
// swap_b32 = [g f e 0] as i32
*x_sum_32 = _mm_add_epi32(*x_sum_32, sum_slli_a32);
*x_sum_32 = _mm_add_epi32(*x_sum_32, swap_b32);
// sum = [c+b+g a+f k+j+e i] as i32
// Also sum their squares
const __m128i slli_a_2 = _mm_madd_epi16(slli_a, slli_a);
const __m128i swap_b_2 = _mm_madd_epi16(swap_b, swap_b);
// slli_a_2 = [c2+b2 a2 k2+j2 i2]
// swap_b_2 = [0 0 g2+f2 e2]
const __m128i sum2 = _mm_hadd_epi32(slli_a_2, swap_b_2);
// sum2 = [0 g2+f2+e2 c2+b2+a2 k2+j2+i2]
*x2_sum_32 = _mm_add_epi32(*x2_sum_32, sum2);
} | subq $0x388, %rsp # imm = 0x388
movq %rdi, 0x140(%rsp)
movl %esi, 0x13c(%rsp)
movq %rdx, 0x130(%rsp)
movq %rcx, 0x128(%rsp)
movq %r8, 0x120(%rsp)
movq %r9, 0x118(%rsp)
movq 0x140(%rsp), %rdi
movl 0x13c(%rsp), %eax
addl %eax, %eax
cltq
leaq (%rdi,%rax,2), %rsi
callq 0x8076f0
movaps %xmm0, 0x100(%rsp)
movq 0x140(%rsp), %rax
movslq 0x13c(%rsp), %rcx
movl %ecx, %edx
leaq (%rax,%rcx,2), %rdi
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rsi
callq 0x8076f0
movaps %xmm0, 0xf0(%rsp)
movaps 0x100(%rsp), %xmm0
movaps %xmm0, 0x170(%rsp)
movl $0x10, 0x16c(%rsp)
movaps 0x170(%rsp), %xmm0
movd 0x16c(%rsp), %xmm1
psllq %xmm1, %xmm0
movaps %xmm0, 0xe0(%rsp)
movaps 0xf0(%rsp), %xmm0
movaps %xmm0, 0x150(%rsp)
movl $0x10, 0x14c(%rsp)
movaps 0x150(%rsp), %xmm0
movd 0x14c(%rsp), %xmm1
psllq %xmm1, %xmm0
movaps %xmm0, 0xd0(%rsp)
movaps 0x100(%rsp), %xmm1
movaps 0xe0(%rsp), %xmm0
movaps %xmm1, 0x230(%rsp)
movaps %xmm0, 0x220(%rsp)
movaps 0x230(%rsp), %xmm0
movaps 0x220(%rsp), %xmm1
pmaddwd %xmm1, %xmm0
movaps %xmm0, 0xc0(%rsp)
movaps 0xf0(%rsp), %xmm1
movaps 0xd0(%rsp), %xmm0
movaps %xmm1, 0x210(%rsp)
movaps %xmm0, 0x200(%rsp)
movaps 0x210(%rsp), %xmm0
movaps 0x200(%rsp), %xmm1
pmaddwd %xmm1, %xmm0
movaps %xmm0, 0xb0(%rsp)
movaps 0xb0(%rsp), %xmm1
movaps 0xc0(%rsp), %xmm0
movaps %xmm1, 0x290(%rsp)
movaps %xmm0, 0x280(%rsp)
movaps 0x290(%rsp), %xmm0
movaps 0x280(%rsp), %xmm1
phaddd %xmm1, %xmm0
movaps %xmm0, 0xa0(%rsp)
movq 0x130(%rsp), %rax
movaps (%rax), %xmm1
movaps 0xa0(%rsp), %xmm0
movaps %xmm1, 0x330(%rsp)
movaps %xmm0, 0x320(%rsp)
movaps 0x330(%rsp), %xmm0
movaps 0x320(%rsp), %xmm1
paddd %xmm1, %xmm0
movq 0x130(%rsp), %rax
movaps %xmm0, (%rax)
movaps 0xe0(%rsp), %xmm1
movaps 0xd0(%rsp), %xmm0
movaps %xmm1, 0x1f0(%rsp)
movaps %xmm0, 0x1e0(%rsp)
movaps 0x1f0(%rsp), %xmm0
movaps 0x1e0(%rsp), %xmm1
pmaddwd %xmm1, %xmm0
movaps %xmm0, 0x90(%rsp)
movaps 0xd0(%rsp), %xmm0
psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
movaps %xmm0, 0x80(%rsp)
movaps 0xe0(%rsp), %xmm1
movaps 0x80(%rsp), %xmm0
movaps %xmm1, 0x1d0(%rsp)
movaps %xmm0, 0x1c0(%rsp)
movaps 0x1d0(%rsp), %xmm0
movaps 0x1c0(%rsp), %xmm1
pmaddwd %xmm1, %xmm0
movaps %xmm0, 0x70(%rsp)
movaps 0x70(%rsp), %xmm1
movaps 0x90(%rsp), %xmm0
movaps %xmm1, 0x270(%rsp)
movaps %xmm0, 0x260(%rsp)
movaps 0x270(%rsp), %xmm0
movaps 0x260(%rsp), %xmm1
phaddd %xmm1, %xmm0
movaps %xmm0, 0x60(%rsp)
movq 0x128(%rsp), %rax
movaps (%rax), %xmm1
movaps 0x60(%rsp), %xmm0
movaps %xmm1, 0x310(%rsp)
movaps %xmm0, 0x300(%rsp)
movaps 0x310(%rsp), %xmm0
movaps 0x300(%rsp), %xmm1
paddd %xmm1, %xmm0
movq 0x128(%rsp), %rax
movaps %xmm0, (%rax)
movaps 0xe0(%rsp), %xmm0
movaps %xmm0, 0x350(%rsp)
movaps %xmm0, 0x340(%rsp)
movaps 0x350(%rsp), %xmm0
movaps 0x340(%rsp), %xmm1
phaddw %xmm1, %xmm0
movaps %xmm0, 0x50(%rsp)
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0x370(%rsp)
pmovsxwd 0x370(%rsp), %xmm0
movaps %xmm0, 0x40(%rsp)
movaps 0x80(%rsp), %xmm0
movaps %xmm0, 0x360(%rsp)
pmovsxwd 0x360(%rsp), %xmm0
movdqa %xmm0, 0x30(%rsp)
movq 0x120(%rsp), %rax
movdqa (%rax), %xmm1
movdqa 0x40(%rsp), %xmm0
movdqa %xmm1, 0x2f0(%rsp)
movdqa %xmm0, 0x2e0(%rsp)
movdqa 0x2f0(%rsp), %xmm0
movdqa 0x2e0(%rsp), %xmm1
paddd %xmm1, %xmm0
movq 0x120(%rsp), %rax
movdqa %xmm0, (%rax)
movq 0x120(%rsp), %rax
movdqa (%rax), %xmm1
movdqa 0x30(%rsp), %xmm0
movdqa %xmm1, 0x2d0(%rsp)
movdqa %xmm0, 0x2c0(%rsp)
movdqa 0x2d0(%rsp), %xmm0
movdqa 0x2c0(%rsp), %xmm1
paddd %xmm1, %xmm0
movq 0x120(%rsp), %rax
movdqa %xmm0, (%rax)
movdqa 0xe0(%rsp), %xmm1
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm1, 0x1b0(%rsp)
movdqa %xmm0, 0x1a0(%rsp)
movdqa 0x1b0(%rsp), %xmm0
movdqa 0x1a0(%rsp), %xmm1
pmaddwd %xmm1, %xmm0
movdqa %xmm0, 0x20(%rsp)
movdqa 0x80(%rsp), %xmm1
movdqa 0x80(%rsp), %xmm0
movdqa %xmm1, 0x190(%rsp)
movdqa %xmm0, 0x180(%rsp)
movdqa 0x190(%rsp), %xmm0
movdqa 0x180(%rsp), %xmm1
pmaddwd %xmm1, %xmm0
movdqa %xmm0, 0x10(%rsp)
movdqa 0x20(%rsp), %xmm1
movdqa 0x10(%rsp), %xmm0
movdqa %xmm1, 0x250(%rsp)
movdqa %xmm0, 0x240(%rsp)
movdqa 0x250(%rsp), %xmm0
movdqa 0x240(%rsp), %xmm1
phaddd %xmm1, %xmm0
movdqa %xmm0, (%rsp)
movq 0x118(%rsp), %rax
movdqa (%rax), %xmm1
movdqa (%rsp), %xmm0
movdqa %xmm1, 0x2b0(%rsp)
movdqa %xmm0, 0x2a0(%rsp)
movdqa 0x2b0(%rsp), %xmm0
movdqa 0x2a0(%rsp), %xmm1
paddd %xmm1, %xmm0
movq 0x118(%rsp), %rax
movdqa %xmm0, (%rax)
addq $0x388, %rsp # imm = 0x388
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/rdopt_sse4.c |
av1_compute_stats_highbd_sse4_1 | void av1_compute_stats_highbd_sse4_1(int wiener_win, const uint8_t *dgd8,
const uint8_t *src8, int16_t *dgd_avg,
int16_t *src_avg, int h_start, int h_end,
int v_start, int v_end, int dgd_stride,
int src_stride, int64_t *M, int64_t *H,
aom_bit_depth_t bit_depth) {
if (wiener_win == WIENER_WIN) {
(void)dgd_avg;
(void)src_avg;
compute_stats_highbd_win7_opt_sse4_1(dgd8, src8, h_start, h_end, v_start,
v_end, dgd_stride, src_stride, M, H,
bit_depth);
} else if (wiener_win == WIENER_WIN_CHROMA) {
(void)dgd_avg;
(void)src_avg;
compute_stats_highbd_win5_opt_sse4_1(dgd8, src8, h_start, h_end, v_start,
v_end, dgd_stride, src_stride, M, H,
bit_depth);
} else {
av1_compute_stats_highbd_c(wiener_win, dgd8, src8, dgd_avg, src_avg,
h_start, h_end, v_start, v_end, dgd_stride,
src_stride, M, H, bit_depth);
}
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x70, %rsp
movl 0xd8(%rsp), %eax
movq 0xd0(%rsp), %rax
movq 0xc8(%rsp), %rax
movl 0xc0(%rsp), %eax
movl 0xb8(%rsp), %eax
movl 0xb0(%rsp), %eax
movl 0xa8(%rsp), %eax
movl 0xa0(%rsp), %eax
movl %edi, 0x6c(%rsp)
movq %rsi, 0x60(%rsp)
movq %rdx, 0x58(%rsp)
movq %rcx, 0x50(%rsp)
movq %r8, 0x48(%rsp)
movl %r9d, 0x44(%rsp)
cmpl $0x7, 0x6c(%rsp)
jne 0x807823
movq 0x60(%rsp), %rdi
movq 0x58(%rsp), %rsi
movl 0x44(%rsp), %edx
movl 0xa0(%rsp), %ecx
movl 0xa8(%rsp), %r8d
movl 0xb0(%rsp), %r9d
movl 0xb8(%rsp), %ebp
movl 0xc0(%rsp), %ebx
movq 0xc8(%rsp), %r11
movq 0xd0(%rsp), %r10
movl 0xd8(%rsp), %eax
movl %ebp, (%rsp)
movl %ebx, 0x8(%rsp)
movq %r11, 0x10(%rsp)
movq %r10, 0x18(%rsp)
movl %eax, 0x20(%rsp)
callq 0x807930
jmp 0x807918
cmpl $0x5, 0x6c(%rsp)
jne 0x807893
movq 0x60(%rsp), %rdi
movq 0x58(%rsp), %rsi
movl 0x44(%rsp), %edx
movl 0xa0(%rsp), %ecx
movl 0xa8(%rsp), %r8d
movl 0xb0(%rsp), %r9d
movl 0xb8(%rsp), %ebp
movl 0xc0(%rsp), %ebx
movq 0xc8(%rsp), %r11
movq 0xd0(%rsp), %r10
movl 0xd8(%rsp), %eax
movl %ebp, (%rsp)
movl %ebx, 0x8(%rsp)
movq %r11, 0x10(%rsp)
movq %r10, 0x18(%rsp)
movl %eax, 0x20(%rsp)
callq 0x807e70
jmp 0x807916
movl 0x6c(%rsp), %edi
movq 0x60(%rsp), %rsi
movq 0x58(%rsp), %rdx
movq 0x50(%rsp), %rcx
movq 0x48(%rsp), %r8
movl 0x44(%rsp), %r9d
movl 0xa0(%rsp), %r12d
movl 0xa8(%rsp), %r15d
movl 0xb0(%rsp), %r14d
movl 0xb8(%rsp), %ebp
movl 0xc0(%rsp), %ebx
movq 0xc8(%rsp), %r11
movq 0xd0(%rsp), %r10
movl 0xd8(%rsp), %eax
movl %r12d, (%rsp)
movl %r15d, 0x8(%rsp)
movl %r14d, 0x10(%rsp)
movl %ebp, 0x18(%rsp)
movl %ebx, 0x20(%rsp)
movq %r11, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movl %eax, 0x38(%rsp)
callq 0x1bc790
jmp 0x807918
addq $0x70, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/pickrst_sse4.c |
xy_y_convolve_4tap_2x2_sse2 | static inline __m128i xy_y_convolve_4tap_2x2_sse2(const int16_t *const src,
__m128i s_32[4],
__m128i ss_128[2],
const __m128i coeffs[2]) {
s_32[3] = _mm_cvtsi32_si128(loadu_int32(src + 3 * 2));
const __m128i src23 = _mm_unpacklo_epi32(s_32[2], s_32[3]);
s_32[2] = _mm_cvtsi32_si128(loadu_int32(src + 4 * 2));
const __m128i src34 = _mm_unpacklo_epi32(s_32[3], s_32[2]);
ss_128[1] = _mm_unpacklo_epi16(src23, src34);
const __m128i r = convolve16_4tap_sse2(ss_128, coeffs);
ss_128[0] = ss_128[1];
return r;
} | subq $0xf8, %rsp
movq %rdi, 0x48(%rsp)
movq %rsi, 0x40(%rsp)
movq %rdx, 0x38(%rsp)
movq %rcx, 0x30(%rsp)
movq 0x48(%rsp), %rdi
addq $0xc, %rdi
callq 0x8b1ea0
movl %eax, 0x8c(%rsp)
vmovd 0x8c(%rsp), %xmm0
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x70(%rsp), %xmm0
movq 0x40(%rsp), %rax
vmovdqa %xmm0, 0x30(%rax)
movq 0x40(%rsp), %rax
vmovdqa 0x20(%rax), %xmm1
vmovdqa 0x30(%rax), %xmm0
vmovdqa %xmm1, 0xc0(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xc0(%rsp), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x20(%rsp)
movq 0x48(%rsp), %rdi
addq $0x10, %rdi
callq 0x8b1ea0
movl %eax, 0x6c(%rsp)
vmovd 0x6c(%rsp), %xmm0
vmovdqa %xmm0, 0x50(%rsp)
vmovdqa 0x50(%rsp), %xmm0
movq 0x40(%rsp), %rax
vmovdqa %xmm0, 0x20(%rax)
movq 0x40(%rsp), %rax
vmovdqa 0x20(%rax), %xmm0
vmovdqa 0x30(%rax), %xmm1
vmovdqa %xmm1, 0xa0(%rsp)
vmovdqa %xmm0, 0x90(%rsp)
vmovdqa 0xa0(%rsp), %xmm0
vmovdqa 0x90(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x20(%rsp), %xmm1
vmovdqa 0x10(%rsp), %xmm0
vmovdqa %xmm1, 0xe0(%rsp)
vmovdqa %xmm0, 0xd0(%rsp)
vmovdqa 0xe0(%rsp), %xmm0
vmovdqa 0xd0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x38(%rsp), %rax
vmovdqa %xmm0, 0x10(%rax)
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rsi
callq 0x8b8f70
vmovdqa %xmm0, (%rsp)
movq 0x38(%rsp), %rax
vmovdqa 0x10(%rax), %xmm0
movq 0x38(%rsp), %rax
vmovdqa %xmm0, (%rax)
vmovdqa (%rsp), %xmm0
addq $0xf8, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
prepare_coeffs_4tap_avx2 | static inline void prepare_coeffs_4tap_avx2(
const InterpFilterParams *const filter_params, const int32_t subpel_q4,
__m256i *const coeffs /* [2] */) {
const int16_t *filter = av1_get_interp_filter_subpel_kernel(
filter_params, subpel_q4 & SUBPEL_MASK);
const __m128i coeff_8 = _mm_loadu_si128((__m128i *)filter);
const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
// coeffs 2 3 2 3 2 3 2 3
coeffs[0] = _mm256_shuffle_epi32(coeff, 0x55);
// coeffs 4 5 4 5 4 5 4 5
coeffs[1] = _mm256_shuffle_epi32(coeff, 0xaa);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x80, %rsp
movq %rdi, 0x50(%rsp)
movl %esi, 0x4c(%rsp)
movq %rdx, 0x40(%rsp)
movq 0x50(%rsp), %rdi
movl 0x4c(%rsp), %esi
andl $0xf, %esi
callq 0x8a82e0
movq %rax, 0x38(%rsp)
movq 0x38(%rsp), %rax
movq %rax, 0x58(%rsp)
movq 0x58(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
vmovdqa 0x20(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
vbroadcastf128 0x60(%rsp), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, (%rsp)
vmovaps (%rsp), %ymm0
vpshufd $0x55, %ymm0, %ymm0 # ymm0 = ymm0[1,1,1,1,5,5,5,5]
movq 0x40(%rsp), %rax
vmovaps %ymm0, (%rax)
vmovaps (%rsp), %ymm0
vpshufd $0xaa, %ymm0, %ymm0 # ymm0 = ymm0[2,2,2,2,6,6,6,6]
movq 0x40(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_4tap_8x2_half_pel_avx2 | static inline void xy_y_convolve_4tap_8x2_half_pel_avx2(
const int16_t *const src, const __m256i coeffs[1], __m256i s_256[4],
__m256i r[2]) {
__m256i a_256[2];
s_256[2] = _mm256_loadu_si256((__m256i *)(src + 2 * 8));
s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 8));
a_256[0] = _mm256_add_epi16(s_256[0], s_256[3]);
a_256[1] = _mm256_add_epi16(s_256[1], s_256[2]);
xy_y_convolve_2tap_16_avx2(a_256[0], a_256[1], coeffs, r);
s_256[0] = s_256[2];
s_256[1] = s_256[3];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x120, %rsp # imm = 0x120
movq %rdi, 0x68(%rsp)
movq %rsi, 0x60(%rsp)
movq %rdx, 0x58(%rsp)
movq %rcx, 0x50(%rsp)
movq 0x68(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x78(%rsp)
movq 0x78(%rsp), %rax
vmovdqu (%rax), %ymm0
movq 0x58(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq 0x68(%rsp), %rax
addq $0x30, %rax
movq %rax, 0x70(%rsp)
movq 0x70(%rsp), %rax
vmovdqu (%rax), %ymm0
movq 0x58(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x58(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0x58(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
vmovdqa %ymm1, 0xe0(%rsp)
vmovdqa %ymm0, 0xc0(%rsp)
vmovdqa 0xe0(%rsp), %ymm0
vmovdqa 0xc0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
movq 0x58(%rsp), %rax
vmovdqa 0x20(%rax), %ymm1
movq 0x58(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
vmovdqa %ymm1, 0xa0(%rsp)
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa 0x80(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa (%rsp), %ymm0
vmovdqa 0x20(%rsp), %ymm1
movq 0x60(%rsp), %rdi
movq 0x50(%rsp), %rsi
callq 0x8b7e90
movq 0x58(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x58(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x58(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
movq 0x58(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_4tap_16x2_half_pelavx2 | static inline void xy_y_convolve_4tap_16x2_half_pelavx2(
const int16_t *const src, __m256i s_256[5], const __m256i coeffs[1],
__m256i r[4]) {
__m256i a_256[2];
s_256[3] = _mm256_loadu_si256((__m256i *)(src + 3 * 16));
s_256[4] = _mm256_loadu_si256((__m256i *)(src + 4 * 16));
a_256[0] = _mm256_add_epi16(s_256[0], s_256[3]);
a_256[1] = _mm256_add_epi16(s_256[1], s_256[2]);
xy_y_convolve_2tap_16_avx2(a_256[0], a_256[1], coeffs, r + 0);
a_256[0] = _mm256_add_epi16(s_256[1], s_256[4]);
a_256[1] = _mm256_add_epi16(s_256[2], s_256[3]);
xy_y_convolve_2tap_16_avx2(a_256[0], a_256[1], coeffs, r + 2);
s_256[0] = s_256[2];
s_256[1] = s_256[3];
s_256[2] = s_256[4];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x1a0, %rsp # imm = 0x1A0
movq %rdi, 0x68(%rsp)
movq %rsi, 0x60(%rsp)
movq %rdx, 0x58(%rsp)
movq %rcx, 0x50(%rsp)
movq 0x68(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x78(%rsp)
movq 0x78(%rsp), %rax
vmovdqu (%rax), %ymm0
movq 0x60(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x68(%rsp), %rax
addq $0x80, %rax
movq %rax, 0x70(%rsp)
movq 0x70(%rsp), %rax
vmovdqu (%rax), %ymm0
movq 0x60(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq 0x60(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0x60(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
vmovdqa %ymm1, 0x160(%rsp)
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x160(%rsp), %ymm0
vmovdqa 0x140(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
movq 0x60(%rsp), %rax
vmovdqa 0x20(%rax), %ymm1
movq 0x60(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
vmovdqa %ymm1, 0x120(%rsp)
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x120(%rsp), %ymm0
vmovdqa 0x100(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa (%rsp), %ymm0
vmovdqa 0x20(%rsp), %ymm1
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
callq 0x8b7e90
movq 0x60(%rsp), %rax
vmovdqa 0x20(%rax), %ymm1
movq 0x60(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
vmovdqa %ymm1, 0xe0(%rsp)
vmovdqa %ymm0, 0xc0(%rsp)
vmovdqa 0xe0(%rsp), %ymm0
vmovdqa 0xc0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
movq 0x60(%rsp), %rax
vmovdqa 0x40(%rax), %ymm1
movq 0x60(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
vmovdqa %ymm1, 0xa0(%rsp)
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa 0x80(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa (%rsp), %ymm0
vmovdqa 0x20(%rsp), %ymm1
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
addq $0x40, %rsi
callq 0x8b7e90
movq 0x60(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x60(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x60(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
movq 0x60(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x60(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
movq 0x60(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
prepare_coeffs_6tap_avx2 | static inline void prepare_coeffs_6tap_avx2(
const InterpFilterParams *const filter_params, const int32_t subpel_q4,
__m256i *const coeffs /* [3]*/) {
const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
filter_params, subpel_q4 & SUBPEL_MASK);
const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
const __m256i coeff = _mm256_broadcastsi128_si256(coeffs_8);
// coeffs 1 2 1 2 1 2 1 2
coeffs[0] = _mm256_shuffle_epi8(coeff, _mm256_set1_epi32(0x05040302u));
// coeffs 3 4 3 4 3 4 3 4
coeffs[1] = _mm256_shuffle_epi8(coeff, _mm256_set1_epi32(0x09080706u));
// coeffs 5 6 5 6 5 6 5 6
coeffs[2] = _mm256_shuffle_epi8(coeff, _mm256_set1_epi32(0x0D0C0B0Au));
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x240, %rsp # imm = 0x240
movq %rdi, 0x60(%rsp)
movl %esi, 0x5c(%rsp)
movq %rdx, 0x50(%rsp)
movq 0x60(%rsp), %rdi
movl 0x5c(%rsp), %esi
andl $0xf, %esi
callq 0x8a82e0
movq %rax, 0x48(%rsp)
movq 0x48(%rsp), %rax
movq %rax, 0x78(%rsp)
movq 0x78(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vmovdqa 0x30(%rsp), %xmm0
vmovdqa %xmm0, 0x150(%rsp)
vbroadcastf128 0x150(%rsp), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, (%rsp)
vmovaps (%rsp), %ymm1
movl $0x5040302, 0x74(%rsp) # imm = 0x5040302
movl 0x74(%rsp), %eax
movl %eax, 0xbc(%rsp)
movl %eax, 0xb8(%rsp)
movl %eax, 0xb4(%rsp)
movl %eax, 0xb0(%rsp)
movl %eax, 0xac(%rsp)
movl %eax, 0xa8(%rsp)
movl %eax, 0xa4(%rsp)
movl %eax, 0xa0(%rsp)
movl 0xa4(%rsp), %r8d
movl 0xa8(%rsp), %edi
movl 0xac(%rsp), %esi
movl 0xb4(%rsp), %edx
movl 0xb8(%rsp), %ecx
movl 0xbc(%rsp), %eax
vmovd 0xa0(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0xb0(%rsp), %xmm2
vpinsrd $0x1, %edx, %xmm2, %xmm2
vpinsrd $0x2, %ecx, %xmm2, %xmm2
vpinsrd $0x3, %eax, %xmm2, %xmm2
vmovdqa %xmm2, 0x90(%rsp)
vmovdqa %xmm0, 0x80(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm1, 0x200(%rsp)
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x1e0(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x50(%rsp), %rax
vmovaps %ymm0, (%rax)
vmovaps (%rsp), %ymm1
movl $0x9080706, 0x70(%rsp) # imm = 0x9080706
movl 0x70(%rsp), %eax
movl %eax, 0xfc(%rsp)
movl %eax, 0xf8(%rsp)
movl %eax, 0xf4(%rsp)
movl %eax, 0xf0(%rsp)
movl %eax, 0xec(%rsp)
movl %eax, 0xe8(%rsp)
movl %eax, 0xe4(%rsp)
movl %eax, 0xe0(%rsp)
movl 0xe4(%rsp), %r8d
movl 0xe8(%rsp), %edi
movl 0xec(%rsp), %esi
movl 0xf4(%rsp), %edx
movl 0xf8(%rsp), %ecx
movl 0xfc(%rsp), %eax
vmovd 0xe0(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0xf0(%rsp), %xmm2
vpinsrd $0x1, %edx, %xmm2, %xmm2
vpinsrd $0x2, %ecx, %xmm2, %xmm2
vpinsrd $0x3, %eax, %xmm2, %xmm2
vmovdqa %xmm2, 0xd0(%rsp)
vmovdqa %xmm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm1, 0x1c0(%rsp)
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1c0(%rsp), %ymm0
vmovaps 0x1a0(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x50(%rsp), %rax
vmovaps %ymm0, 0x20(%rax)
vmovaps (%rsp), %ymm1
movl $0xd0c0b0a, 0x6c(%rsp) # imm = 0xD0C0B0A
movl 0x6c(%rsp), %eax
movl %eax, 0x14c(%rsp)
movl %eax, 0x148(%rsp)
movl %eax, 0x144(%rsp)
movl %eax, 0x140(%rsp)
movl %eax, 0x13c(%rsp)
movl %eax, 0x138(%rsp)
movl %eax, 0x134(%rsp)
movl %eax, 0x130(%rsp)
movl 0x134(%rsp), %edx
movl 0x138(%rsp), %ecx
movl 0x13c(%rsp), %eax
movl 0x144(%rsp), %r8d
movl 0x148(%rsp), %edi
movl 0x14c(%rsp), %esi
vmovd 0x140(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm2
vmovd 0x130(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm3
vmovaps %xmm3, %xmm0
vinserti128 $0x1, %xmm2, %ymm0, %ymm0
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x100(%rsp), %ymm0
vmovdqa %ymm1, 0x180(%rsp)
vmovdqa %ymm0, 0x160(%rsp)
vmovdqa 0x180(%rsp), %ymm0
vmovdqa 0x160(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_6tap_8x2_avx2 | static inline void xy_y_convolve_6tap_8x2_avx2(const int16_t *const src,
__m256i ss_256[6],
const __m256i coeffs[3],
__m256i r[2]) {
__m256i s_256[2];
s_256[0] = _mm256_loadu_si256((__m256i *)(src + 4 * 8));
s_256[1] = _mm256_loadu_si256((__m256i *)(src + 5 * 8));
ss_256[2] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
ss_256[5] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
xy_y_convolve_6tap_16_avx2(ss_256, coeffs, r);
ss_256[0] = ss_256[1];
ss_256[1] = ss_256[2];
ss_256[3] = ss_256[4];
ss_256[4] = ss_256[5];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x100, %rsp # imm = 0x100
movq %rdi, 0x58(%rsp)
movq %rsi, 0x50(%rsp)
movq %rdx, 0x48(%rsp)
movq %rcx, 0x40(%rsp)
movq 0x58(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xe8(%rsp)
movq 0xe8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, (%rsp)
movq 0x58(%rsp), %rax
addq $0x50, %rax
movq %rax, 0xe0(%rsp)
movq 0xe0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x20(%rsp)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0x80(%rsp)
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps 0x60(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x50(%rsp), %rax
vmovaps %ymm0, 0x40(%rax)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0xc0(%rsp)
vmovaps %ymm0, 0xa0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps 0xa0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0xa0(%rax)
movq 0x50(%rsp), %rdi
movq 0x48(%rsp), %rsi
movq 0x40(%rsp), %rdx
vzeroupper
callq 0x8ba7b0
movq 0x50(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x50(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x50(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x50(%rsp), %rax
vmovdqa 0xa0(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_6tap_8x2_half_pel_avx2 | static inline void xy_y_convolve_6tap_8x2_half_pel_avx2(
const int16_t *const src, const __m256i coeffs[2], __m256i s_256[6],
__m256i r[2]) {
__m256i a_256[2], ss_256[4];
s_256[4] = _mm256_loadu_si256((__m256i *)(src + 4 * 8));
s_256[5] = _mm256_loadu_si256((__m256i *)(src + 5 * 8));
a_256[0] = _mm256_add_epi16(s_256[0], s_256[5]);
a_256[1] = _mm256_add_epi16(s_256[1], s_256[4]);
ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
ss_256[3] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r);
s_256[0] = s_256[2];
s_256[1] = s_256[3];
s_256[2] = s_256[4];
s_256[3] = s_256[5];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x2a0, %rsp # imm = 0x2A0
movq %rdi, 0xd8(%rsp)
movq %rsi, 0xd0(%rsp)
movq %rdx, 0xc8(%rsp)
movq %rcx, 0xc0(%rsp)
movq 0xd8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0x1f8(%rsp)
movq 0x1f8(%rsp), %rax
vmovups (%rax), %ymm0
movq 0xc8(%rsp), %rax
vmovaps %ymm0, 0x80(%rax)
movq 0xd8(%rsp), %rax
addq $0x50, %rax
movq %rax, 0x1f0(%rsp)
movq 0x1f0(%rsp), %rax
vmovups (%rax), %ymm0
movq 0xc8(%rsp), %rax
vmovaps %ymm0, 0xa0(%rax)
movq 0xc8(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x260(%rsp)
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x260(%rsp), %ymm0
vmovaps 0x240(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
movq 0xc8(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm1, 0x220(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x220(%rsp), %ymm0
vmovaps 0x200(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x140(%rsp)
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x140(%rsp), %ymm0
vmovaps 0x120(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, (%rsp)
movq 0xc8(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0x60(%rax), %ymm0
vmovaps %ymm1, 0x100(%rsp)
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x100(%rsp), %ymm0
vmovaps 0xe0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x1c0(%rsp)
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1c0(%rsp), %ymm0
vmovaps 0x1a0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x40(%rsp)
movq 0xc8(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0x60(%rax), %ymm0
vmovaps %ymm1, 0x180(%rsp)
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x180(%rsp), %ymm0
vmovaps 0x160(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x60(%rsp)
movq %rsp, %rdi
movq 0xd0(%rsp), %rsi
movq 0xc0(%rsp), %rdx
vzeroupper
callq 0x8b91a0
movq 0xc8(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0xc8(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0xc8(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
movq 0xc8(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xc8(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
movq 0xc8(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq 0xc8(%rsp), %rax
vmovdqa 0xa0(%rax), %ymm0
movq 0xc8(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopl (%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_6tap_16x2_avx2 | static inline void xy_y_convolve_6tap_16x2_avx2(
const int16_t *const src, const ptrdiff_t stride, __m256i s_256[6],
__m256i ss_256[6], __m256i tt_256[6], const __m256i coeffs[3],
__m256i r[4]) {
s_256[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
ss_256[2] = _mm256_unpacklo_epi16(s_256[4], s_256[5]);
ss_256[5] = _mm256_unpackhi_epi16(s_256[4], s_256[5]);
s_256[4] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
tt_256[2] = _mm256_unpacklo_epi16(s_256[5], s_256[4]);
tt_256[5] = _mm256_unpackhi_epi16(s_256[5], s_256[4]);
xy_y_convolve_6tap_16_avx2(ss_256, coeffs, r + 0);
xy_y_convolve_6tap_16_avx2(tt_256, coeffs, r + 2);
ss_256[0] = ss_256[1];
ss_256[1] = ss_256[2];
ss_256[3] = ss_256[4];
ss_256[4] = ss_256[5];
tt_256[0] = tt_256[1];
tt_256[1] = tt_256[2];
tt_256[3] = tt_256[4];
tt_256[4] = tt_256[5];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x160, %rsp # imm = 0x160
movq 0x10(%rbp), %rax
movq %rdi, 0x38(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdx, 0x28(%rsp)
movq %rcx, 0x20(%rsp)
movq %r8, 0x18(%rsp)
movq %r9, 0x10(%rsp)
movq 0x38(%rsp), %rax
movq 0x30(%rsp), %rcx
leaq (%rcx,%rcx,4), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x148(%rsp)
movq 0x148(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x28(%rsp), %rax
vmovaps %ymm0, 0xa0(%rax)
movq 0x28(%rsp), %rax
vmovaps 0x80(%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0xa0(%rsp)
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0xa0(%rsp), %ymm0
vmovaps 0x80(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x20(%rsp), %rax
vmovaps %ymm0, 0x40(%rax)
movq 0x28(%rsp), %rax
vmovaps 0x80(%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x120(%rsp)
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm0
vmovaps 0x100(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x20(%rsp), %rax
vmovaps %ymm0, 0xa0(%rax)
movq 0x38(%rsp), %rax
movq 0x30(%rsp), %rcx
leaq (%rcx,%rcx,2), %rcx
leaq (%rax,%rcx,4), %rax
movq %rax, 0x140(%rsp)
movq 0x140(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x28(%rsp), %rax
vmovaps %ymm0, 0x80(%rax)
movq 0x28(%rsp), %rax
vmovaps 0x80(%rax), %ymm0
vmovaps 0xa0(%rax), %ymm1
vmovaps %ymm1, 0x60(%rsp)
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x60(%rsp), %ymm0
vmovaps 0x40(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x18(%rsp), %rax
vmovaps %ymm0, 0x40(%rax)
movq 0x28(%rsp), %rax
vmovaps 0x80(%rax), %ymm0
vmovaps 0xa0(%rax), %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xe0(%rsp), %ymm0
vmovaps 0xc0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x18(%rsp), %rax
vmovdqa %ymm0, 0xa0(%rax)
movq 0x20(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x10(%rbp), %rdx
vzeroupper
callq 0x8ba7b0
movq 0x18(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x10(%rbp), %rdx
addq $0x40, %rdx
callq 0x8ba7b0
movq 0x20(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
movq 0x20(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x20(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x20(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x20(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
movq 0x20(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x20(%rsp), %rax
vmovdqa 0xa0(%rax), %ymm0
movq 0x20(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq 0x18(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
movq 0x18(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x18(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x18(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x18(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
movq 0x18(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x18(%rsp), %rax
vmovdqa 0xa0(%rax), %ymm0
movq 0x18(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_6tap_16x2_half_pel_avx2 | static inline void xy_y_convolve_6tap_16x2_half_pel_avx2(
const int16_t *const src, const ptrdiff_t stride, __m256i s_256[6],
__m256i ss_256[4], const __m256i coeffs[2], __m256i r[4]) {
__m256i a_256[2];
s_256[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
a_256[0] = _mm256_add_epi16(s_256[0], s_256[5]);
a_256[1] = _mm256_add_epi16(s_256[1], s_256[4]);
ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
ss_256[1] = _mm256_unpacklo_epi16(s_256[2], s_256[3]);
ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
ss_256[3] = _mm256_unpackhi_epi16(s_256[2], s_256[3]);
xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 0);
a_256[1] = _mm256_add_epi16(s_256[2], s_256[5]);
s_256[0] = s_256[2];
s_256[2] = s_256[4];
s_256[4] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
a_256[0] = _mm256_add_epi16(s_256[1], s_256[4]);
s_256[1] = s_256[3];
s_256[3] = s_256[5];
ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
ss_256[1] = _mm256_unpacklo_epi16(s_256[1], s_256[2]);
ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
ss_256[3] = _mm256_unpackhi_epi16(s_256[1], s_256[2]);
xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 2);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x3c0, %rsp # imm = 0x3C0
movq %rdi, 0x78(%rsp)
movq %rsi, 0x70(%rsp)
movq %rdx, 0x68(%rsp)
movq %rcx, 0x60(%rsp)
movq %r8, 0x58(%rsp)
movq %r9, 0x50(%rsp)
movq 0x78(%rsp), %rax
movq 0x70(%rsp), %rcx
leaq (%rcx,%rcx,4), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x298(%rsp)
movq 0x298(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x68(%rsp), %rax
vmovaps %ymm0, 0xa0(%rax)
movq 0x68(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x380(%rsp)
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x380(%rsp), %ymm0
vmovaps 0x360(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, (%rsp)
movq 0x68(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm1, 0x340(%rsp)
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x340(%rsp), %ymm0
vmovaps 0x320(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x20(%rsp)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0x160(%rsp)
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x160(%rsp), %ymm0
vmovaps 0x140(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x60(%rsp), %rax
vmovaps %ymm0, (%rax)
movq 0x68(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0x60(%rax), %ymm0
vmovaps %ymm1, 0x120(%rsp)
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm0
vmovaps 0x100(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x60(%rsp), %rax
vmovaps %ymm0, 0x20(%rax)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0x260(%rsp)
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x260(%rsp), %ymm0
vmovaps 0x240(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x60(%rsp), %rax
vmovaps %ymm0, 0x40(%rax)
movq 0x68(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0x60(%rax), %ymm0
vmovaps %ymm1, 0x220(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x220(%rsp), %ymm0
vmovaps 0x200(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x60(%rsp), %rax
vmovaps %ymm0, 0x60(%rax)
movq 0x60(%rsp), %rdi
movq 0x58(%rsp), %rsi
movq 0x50(%rsp), %rdx
vzeroupper
callq 0x8b91a0
movq 0x68(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps 0x2e0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x20(%rsp)
movq 0x68(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm0, (%rax)
movq 0x68(%rsp), %rax
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm0, 0x40(%rax)
movq 0x78(%rsp), %rax
movq 0x70(%rsp), %rcx
leaq (%rcx,%rcx,2), %rcx
leaq (%rax,%rcx,4), %rax
movq %rax, 0x290(%rsp)
movq 0x290(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x68(%rsp), %rax
vmovaps %ymm0, 0x80(%rax)
movq 0x68(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm1, 0x2c0(%rsp)
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps 0x2a0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, (%rsp)
movq 0x68(%rsp), %rax
vmovaps 0x60(%rax), %ymm0
vmovaps %ymm0, 0x20(%rax)
movq 0x68(%rsp), %rax
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm0, 0x60(%rax)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0xe0(%rsp)
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xe0(%rsp), %ymm0
vmovaps 0xc0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x60(%rsp), %rax
vmovaps %ymm0, (%rax)
movq 0x68(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm1, 0xa0(%rsp)
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0xa0(%rsp), %ymm0
vmovaps 0x80(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x60(%rsp), %rax
vmovaps %ymm0, 0x20(%rax)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0x1e0(%rsp)
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x1c0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x60(%rsp), %rax
vmovaps %ymm0, 0x40(%rax)
movq 0x68(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm1, 0x1a0(%rsp)
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x1a0(%rsp), %ymm0
vmovaps 0x180(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x60(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x60(%rsp), %rdi
movq 0x58(%rsp), %rsi
movq 0x50(%rsp), %rdx
addq $0x40, %rdx
vzeroupper
callq 0x8b91a0
movq %rbp, %rsp
popq %rbp
retq
nop
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
prepare_coeffs_8tap_sse2 | static inline void prepare_coeffs_8tap_sse2(
const InterpFilterParams *const filter_params, const int32_t subpel_q4,
__m128i *const coeffs /* [4] */) {
const int16_t *filter = av1_get_interp_filter_subpel_kernel(
filter_params, subpel_q4 & SUBPEL_MASK);
const __m128i coeff = _mm_loadu_si128((__m128i *)filter);
// coeffs 0 1 0 1 0 1 0 1
coeffs[0] = _mm_shuffle_epi32(coeff, 0x00);
// coeffs 2 3 2 3 2 3 2 3
coeffs[1] = _mm_shuffle_epi32(coeff, 0x55);
// coeffs 4 5 4 5 4 5 4 5
coeffs[2] = _mm_shuffle_epi32(coeff, 0xaa);
// coeffs 6 7 6 7 6 7 6 7
coeffs[3] = _mm_shuffle_epi32(coeff, 0xff);
} | subq $0x38, %rsp
movq %rdi, 0x28(%rsp)
movl %esi, 0x24(%rsp)
movq %rdx, 0x18(%rsp)
movq 0x28(%rsp), %rdi
movl 0x24(%rsp), %esi
andl $0xf, %esi
callq 0x8a82e0
movq %rax, 0x10(%rsp)
movq 0x10(%rsp), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, (%rsp)
vpbroadcastd (%rsp), %xmm0
movq 0x18(%rsp), %rax
vmovdqa %xmm0, (%rax)
vpbroadcastd 0x4(%rsp), %xmm0
movq 0x18(%rsp), %rax
vmovdqa %xmm0, 0x10(%rax)
vpbroadcastd 0x8(%rsp), %xmm0
movq 0x18(%rsp), %rax
vmovdqa %xmm0, 0x20(%rax)
vpbroadcastd 0xc(%rsp), %xmm0
movq 0x18(%rsp), %rax
vmovdqa %xmm0, 0x30(%rax)
addq $0x38, %rsp
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_8tap_8x2_avx2 | static inline void xy_y_convolve_8tap_8x2_avx2(const int16_t *const src,
__m256i ss_256[8],
const __m256i coeffs[4],
__m256i r[2]) {
__m256i s_256[2];
s_256[0] = _mm256_loadu_si256((__m256i *)(src + 6 * 8));
s_256[1] = _mm256_loadu_si256((__m256i *)(src + 7 * 8));
ss_256[3] = _mm256_unpacklo_epi16(s_256[0], s_256[1]);
ss_256[7] = _mm256_unpackhi_epi16(s_256[0], s_256[1]);
xy_y_convolve_8tap_16_avx2(ss_256, coeffs, r);
ss_256[0] = ss_256[1];
ss_256[1] = ss_256[2];
ss_256[2] = ss_256[3];
ss_256[4] = ss_256[5];
ss_256[5] = ss_256[6];
ss_256[6] = ss_256[7];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x100, %rsp # imm = 0x100
movq %rdi, 0x58(%rsp)
movq %rsi, 0x50(%rsp)
movq %rdx, 0x48(%rsp)
movq %rcx, 0x40(%rsp)
movq 0x58(%rsp), %rax
addq $0x60, %rax
movq %rax, 0xe8(%rsp)
movq 0xe8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, (%rsp)
movq 0x58(%rsp), %rax
addq $0x70, %rax
movq %rax, 0xe0(%rsp)
movq 0xe0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x20(%rsp)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0x80(%rsp)
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps 0x60(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
movq 0x50(%rsp), %rax
vmovaps %ymm0, 0x60(%rax)
vmovaps (%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0xc0(%rsp)
vmovaps %ymm0, 0xa0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps 0xa0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0xe0(%rax)
movq 0x50(%rsp), %rdi
movq 0x48(%rsp), %rsi
movq 0x40(%rsp), %rdx
vzeroupper
callq 0x8bbdd0
movq 0x50(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x50(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x50(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq 0x50(%rsp), %rax
vmovdqa 0xa0(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq 0x50(%rsp), %rax
vmovdqa 0xc0(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0xa0(%rax)
movq 0x50(%rsp), %rax
vmovdqa 0xe0(%rax), %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0xc0(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_8tap_8x2_half_pel_avx2 | static inline void xy_y_convolve_8tap_8x2_half_pel_avx2(
const int16_t *const src, const __m256i coeffs[2], __m256i s_256[8],
__m256i r[2]) {
__m256i a_256[4], ss_256[4];
s_256[6] = _mm256_loadu_si256((__m256i *)(src + 6 * 8));
s_256[7] = _mm256_loadu_si256((__m256i *)(src + 7 * 8));
a_256[0] = _mm256_add_epi16(s_256[0], s_256[7]);
a_256[1] = _mm256_add_epi16(s_256[1], s_256[6]);
a_256[2] = _mm256_add_epi16(s_256[2], s_256[5]);
a_256[3] = _mm256_add_epi16(s_256[3], s_256[4]);
ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
ss_256[1] = _mm256_unpacklo_epi16(a_256[2], a_256[3]);
ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
ss_256[3] = _mm256_unpackhi_epi16(a_256[2], a_256[3]);
xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r);
s_256[0] = s_256[2];
s_256[1] = s_256[3];
s_256[2] = s_256[4];
s_256[3] = s_256[5];
s_256[4] = s_256[6];
s_256[5] = s_256[7];
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x360, %rsp # imm = 0x360
movq %rdi, 0x118(%rsp)
movq %rsi, 0x110(%rsp)
movq %rdx, 0x108(%rsp)
movq %rcx, 0x100(%rsp)
movq 0x118(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x238(%rsp)
movq 0x238(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x108(%rsp), %rax
vmovaps %ymm0, 0xc0(%rax)
movq 0x118(%rsp), %rax
addq $0x70, %rax
movq %rax, 0x230(%rsp)
movq 0x230(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x108(%rsp), %rax
vmovaps %ymm0, 0xe0(%rax)
movq 0x108(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps 0xe0(%rax), %ymm0
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps 0x300(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
movq 0x108(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0xc0(%rax), %ymm0
vmovaps %ymm1, 0x2e0(%rsp)
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps 0x2c0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
movq 0x108(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x2a0(%rsp)
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps 0x280(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
movq 0x108(%rsp), %rax
vmovaps 0x60(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm1, 0x260(%rsp)
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x260(%rsp), %ymm0
vmovaps 0x240(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x180(%rsp)
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x180(%rsp), %ymm0
vmovaps 0x160(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, (%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0x140(%rsp)
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x140(%rsp), %ymm0
vmovaps 0x120(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x200(%rsp)
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x1e0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0x1c0(%rsp)
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1c0(%rsp), %ymm0
vmovaps 0x1a0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x60(%rsp)
movq %rsp, %rdi
movq 0x110(%rsp), %rsi
movq 0x100(%rsp), %rdx
vzeroupper
callq 0x8b91a0
movq 0x108(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
movq 0x108(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x108(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
movq 0x108(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x108(%rsp), %rax
vmovdqa 0x80(%rax), %ymm0
movq 0x108(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq 0x108(%rsp), %rax
vmovdqa 0xa0(%rax), %ymm0
movq 0x108(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq 0x108(%rsp), %rax
vmovdqa 0xc0(%rax), %ymm0
movq 0x108(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq 0x108(%rsp), %rax
vmovdqa 0xe0(%rax), %ymm0
movq 0x108(%rsp), %rax
vmovdqa %ymm0, 0xa0(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
load_16bit_7rows_avx2 | static inline void load_16bit_7rows_avx2(const int16_t *const src,
const ptrdiff_t stride,
__m256i dst[7]) {
dst[0] = _mm256_loadu_si256((__m256i *)(src + 0 * stride));
dst[1] = _mm256_loadu_si256((__m256i *)(src + 1 * stride));
dst[2] = _mm256_loadu_si256((__m256i *)(src + 2 * stride));
dst[3] = _mm256_loadu_si256((__m256i *)(src + 3 * stride));
dst[4] = _mm256_loadu_si256((__m256i *)(src + 4 * stride));
dst[5] = _mm256_loadu_si256((__m256i *)(src + 5 * stride));
dst[6] = _mm256_loadu_si256((__m256i *)(src + 6 * stride));
} | movq %rdi, -0x40(%rsp)
movq %rsi, -0x48(%rsp)
movq %rdx, -0x50(%rsp)
movq -0x40(%rsp), %rax
imulq $0x0, -0x48(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x8(%rsp)
movq -0x8(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq -0x40(%rsp), %rax
movq -0x48(%rsp), %rcx
shlq $0x0, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x10(%rsp)
movq -0x10(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq -0x40(%rsp), %rax
movq -0x48(%rsp), %rcx
shlq %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x18(%rsp)
movq -0x18(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
movq -0x40(%rsp), %rax
imulq $0x3, -0x48(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x20(%rsp)
movq -0x20(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq -0x40(%rsp), %rax
movq -0x48(%rsp), %rcx
shlq $0x2, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x28(%rsp)
movq -0x28(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, 0x80(%rax)
movq -0x40(%rsp), %rax
imulq $0x5, -0x48(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x30(%rsp)
movq -0x30(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, 0xa0(%rax)
movq -0x40(%rsp), %rax
imulq $0x6, -0x48(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x38(%rsp)
movq -0x38(%rsp), %rax
vmovdqu (%rax), %ymm0
movq -0x50(%rsp), %rax
vmovdqa %ymm0, 0xc0(%rax)
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
xy_y_convolve_8tap_16x2_half_pel_avx2 | static inline void xy_y_convolve_8tap_16x2_half_pel_avx2(
const int16_t *const src, const ptrdiff_t stride, const __m256i coeffs[4],
__m256i s_256[8], __m256i r[4]) {
__m256i a_256[4], ss_256[4];
s_256[7] = _mm256_loadu_si256((__m256i *)(src + 7 * stride));
a_256[0] = _mm256_add_epi16(s_256[0], s_256[7]);
a_256[1] = _mm256_add_epi16(s_256[1], s_256[6]);
a_256[2] = _mm256_add_epi16(s_256[2], s_256[5]);
a_256[3] = _mm256_add_epi16(s_256[3], s_256[4]);
ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
ss_256[1] = _mm256_unpacklo_epi16(a_256[2], a_256[3]);
ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
ss_256[3] = _mm256_unpackhi_epi16(a_256[2], a_256[3]);
xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 0);
a_256[1] = _mm256_add_epi16(s_256[2], s_256[7]);
a_256[2] = _mm256_add_epi16(s_256[3], s_256[6]);
a_256[3] = _mm256_add_epi16(s_256[4], s_256[5]);
s_256[0] = s_256[2];
s_256[2] = s_256[4];
s_256[4] = s_256[6];
s_256[6] = _mm256_loadu_si256((__m256i *)(src + 8 * stride));
a_256[0] = _mm256_add_epi16(s_256[1], s_256[6]);
s_256[1] = s_256[3];
s_256[3] = s_256[5];
s_256[5] = s_256[7];
ss_256[0] = _mm256_unpacklo_epi16(a_256[0], a_256[1]);
ss_256[1] = _mm256_unpacklo_epi16(a_256[2], a_256[3]);
ss_256[2] = _mm256_unpackhi_epi16(a_256[0], a_256[1]);
ss_256[3] = _mm256_unpackhi_epi16(a_256[2], a_256[3]);
xy_y_convolve_4tap_16_avx2(ss_256, coeffs, r + 2);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x580, %rsp # imm = 0x580
movq %rdi, 0x138(%rsp)
movq %rsi, 0x130(%rsp)
movq %rdx, 0x128(%rsp)
movq %rcx, 0x120(%rsp)
movq %r8, 0x118(%rsp)
movq 0x138(%rsp), %rax
movq 0x130(%rsp), %rcx
movq %rcx, %rdx
addq %rdx, %rdx
shlq $0x4, %rcx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, 0x358(%rsp)
movq 0x358(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x120(%rsp), %rax
vmovaps %ymm0, 0xe0(%rax)
movq 0x120(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps 0xe0(%rax), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0xc0(%rax), %ymm0
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps 0x4e0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x4c0(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps 0x4a0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x60(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x220(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x220(%rsp), %ymm0
vmovaps 0x200(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, (%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0x1e0(%rsp)
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x1c0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps 0x300(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0x2e0(%rsp)
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps 0x2c0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x60(%rsp)
movq 0x128(%rsp), %rsi
movq 0x118(%rsp), %rdx
movq %rsp, %rdi
vzeroupper
callq 0x8b91a0
movq 0x120(%rsp), %rax
vmovaps 0x40(%rax), %ymm1
vmovaps 0xe0(%rax), %ymm0
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x420(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x60(%rax), %ymm1
vmovaps 0xc0(%rax), %ymm0
vmovaps %ymm1, 0x400(%rsp)
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps 0x3e0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x80(%rax), %ymm1
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x3c0(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm0, (%rax)
movq 0x120(%rsp), %rax
vmovaps 0x80(%rax), %ymm0
vmovaps %ymm0, 0x40(%rax)
movq 0x120(%rsp), %rax
vmovaps 0xc0(%rax), %ymm0
vmovaps %ymm0, 0x80(%rax)
movq 0x138(%rsp), %rax
movq 0x130(%rsp), %rcx
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x350(%rsp)
movq 0x350(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x120(%rsp), %rax
vmovaps %ymm0, 0xc0(%rax)
movq 0x120(%rsp), %rax
vmovaps 0x20(%rax), %ymm1
vmovaps 0xc0(%rax), %ymm0
vmovaps %ymm1, 0x380(%rsp)
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x380(%rsp), %ymm0
vmovaps 0x360(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
movq 0x120(%rsp), %rax
vmovaps 0x60(%rax), %ymm0
vmovaps %ymm0, 0x20(%rax)
movq 0x120(%rsp), %rax
vmovaps 0xa0(%rax), %ymm0
vmovaps %ymm0, 0x60(%rax)
movq 0x120(%rsp), %rax
vmovaps 0xe0(%rax), %ymm0
vmovaps %ymm0, 0xa0(%rax)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x1a0(%rsp)
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x1a0(%rsp), %ymm0
vmovaps 0x180(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, (%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0x160(%rsp)
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x160(%rsp), %ymm0
vmovaps 0x140(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x80(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x2a0(%rsp)
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps 0x280(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0x260(%rsp)
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x260(%rsp), %ymm0
vmovaps 0x240(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x60(%rsp)
movq %rsp, %rdi
movq 0x128(%rsp), %rsi
movq 0x118(%rsp), %rdx
addq $0x40, %rdx
vzeroupper
callq 0x8b91a0
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
convolve16_8tap_sse2 | static inline __m128i convolve16_8tap_sse2(const __m128i ss[4],
const __m128i coeffs[4]) {
const __m128i res_01 = _mm_madd_epi16(ss[0], coeffs[0]);
const __m128i res_23 = _mm_madd_epi16(ss[1], coeffs[1]);
const __m128i res_45 = _mm_madd_epi16(ss[2], coeffs[2]);
const __m128i res_67 = _mm_madd_epi16(ss[3], coeffs[3]);
const __m128i res_0123 = _mm_add_epi32(res_01, res_23);
const __m128i res_4567 = _mm_add_epi32(res_45, res_67);
return _mm_add_epi32(res_0123, res_4567);
} | subq $0xd8, %rsp
movq %rdi, -0x18(%rsp)
movq %rsi, -0x20(%rsp)
movq -0x18(%rsp), %rax
vmovdqa (%rax), %xmm1
movq -0x20(%rsp), %rax
vmovdqa (%rax), %xmm0
vmovdqa %xmm1, 0x60(%rsp)
vmovdqa %xmm0, 0x50(%rsp)
vmovdqa 0x60(%rsp), %xmm0
vmovdqa 0x50(%rsp), %xmm1
vpmaddwd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, -0x30(%rsp)
movq -0x18(%rsp), %rax
vmovdqa 0x10(%rax), %xmm1
movq -0x20(%rsp), %rax
vmovdqa 0x10(%rax), %xmm0
vmovdqa %xmm1, 0x40(%rsp)
vmovdqa %xmm0, 0x30(%rsp)
vmovdqa 0x40(%rsp), %xmm0
vmovdqa 0x30(%rsp), %xmm1
vpmaddwd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
movq -0x18(%rsp), %rax
vmovdqa 0x20(%rax), %xmm1
movq -0x20(%rsp), %rax
vmovdqa 0x20(%rax), %xmm0
vmovdqa %xmm1, 0x20(%rsp)
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x20(%rsp), %xmm0
vmovdqa 0x10(%rsp), %xmm1
vpmaddwd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, -0x50(%rsp)
movq -0x18(%rsp), %rax
vmovdqa 0x30(%rax), %xmm1
movq -0x20(%rsp), %rax
vmovdqa 0x30(%rax), %xmm0
vmovdqa %xmm1, (%rsp)
vmovdqa %xmm0, -0x10(%rsp)
vmovdqa (%rsp), %xmm0
vmovdqa -0x10(%rsp), %xmm1
vpmaddwd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, -0x60(%rsp)
vmovdqa -0x30(%rsp), %xmm1
vmovdqa -0x40(%rsp), %xmm0
vmovdqa %xmm1, 0xc0(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xc0(%rsp), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vpaddd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, -0x70(%rsp)
vmovdqa -0x50(%rsp), %xmm1
vmovdqa -0x60(%rsp), %xmm0
vmovdqa %xmm1, 0xa0(%rsp)
vmovdqa %xmm0, 0x90(%rsp)
vmovdqa 0xa0(%rsp), %xmm0
vmovdqa 0x90(%rsp), %xmm1
vpaddd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, -0x80(%rsp)
vmovdqa -0x70(%rsp), %xmm1
vmovdqa -0x80(%rsp), %xmm0
vmovdqa %xmm1, 0x80(%rsp)
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x80(%rsp), %xmm0
vmovdqa 0x70(%rsp), %xmm1
vpaddd %xmm1, %xmm0, %xmm0
addq $0xd8, %rsp
retq
nop
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
convolve16_8tap_avx2 | static inline __m256i convolve16_8tap_avx2(const __m256i ss[4],
const __m256i coeffs[4]) {
const __m256i res_01 = _mm256_madd_epi16(ss[0], coeffs[0]);
const __m256i res_23 = _mm256_madd_epi16(ss[1], coeffs[1]);
const __m256i res_45 = _mm256_madd_epi16(ss[2], coeffs[2]);
const __m256i res_67 = _mm256_madd_epi16(ss[3], coeffs[3]);
const __m256i res_0123 = _mm256_add_epi32(res_01, res_23);
const __m256i res_4567 = _mm256_add_epi32(res_45, res_67);
return _mm256_add_epi32(res_0123, res_4567);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x2c0, %rsp # imm = 0x2C0
movq %rdi, 0xd8(%rsp)
movq %rsi, 0xd0(%rsp)
movq 0xd8(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0xd0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x280(%rsp)
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x280(%rsp), %ymm0
vmovdqa 0x260(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xa0(%rsp)
movq 0xd8(%rsp), %rax
vmovdqa 0x20(%rax), %ymm1
movq 0xd0(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
vmovdqa %ymm1, 0x240(%rsp)
vmovdqa %ymm0, 0x220(%rsp)
vmovdqa 0x240(%rsp), %ymm0
vmovdqa 0x220(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
movq 0xd8(%rsp), %rax
vmovdqa 0x40(%rax), %ymm1
movq 0xd0(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
vmovdqa %ymm1, 0x200(%rsp)
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x200(%rsp), %ymm0
vmovdqa 0x1e0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
movq 0xd8(%rsp), %rax
vmovdqa 0x60(%rax), %ymm1
movq 0xd0(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
vmovdqa %ymm1, 0x1c0(%rsp)
vmovdqa %ymm0, 0x1a0(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa 0x1a0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0xa0(%rsp), %ymm1
vmovdqa 0x80(%rsp), %ymm0
vmovdqa %ymm1, 0x180(%rsp)
vmovdqa %ymm0, 0x160(%rsp)
vmovdqa 0x180(%rsp), %ymm0
vmovdqa 0x160(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x60(%rsp), %ymm1
vmovdqa 0x40(%rsp), %ymm0
vmovdqa %ymm1, 0x140(%rsp)
vmovdqa %ymm0, 0x120(%rsp)
vmovdqa 0x140(%rsp), %ymm0
vmovdqa 0x120(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa 0x20(%rsp), %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x100(%rsp)
vmovdqa %ymm0, 0xe0(%rsp)
vmovdqa 0x100(%rsp), %ymm0
vmovdqa 0xe0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/third_party/SVT-AV1/convolve_avx2.h |
av1_convolve_y_sr_avx2 | void av1_convolve_y_sr_avx2(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride, int32_t w,
int32_t h,
const InterpFilterParams *filter_params_y,
const int32_t subpel_y_qn) {
#if CONFIG_SVT_AV1
const int vert_tap = get_filter_tap(filter_params_y, subpel_y_qn);
if (vert_tap == 12) {
av1_convolve_y_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_y, subpel_y_qn);
} else {
av1_convolve_y_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_y, subpel_y_qn);
}
#else
av1_convolve_y_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_y, subpel_y_qn);
#endif
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x3ea0, %rsp # imm = 0x3EA0
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x48(%rsp)
movl %esi, 0x44(%rsp)
movq %rdx, 0x38(%rsp)
movl %ecx, 0x34(%rsp)
movl %r8d, 0x30(%rsp)
movl %r9d, 0x2c(%rsp)
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
callq 0x8c1af0
movl %eax, 0x28(%rsp)
cmpl $0xc, 0x28(%rsp)
jne 0x8bbe9e
movq 0x48(%rsp), %rdi
movl 0x44(%rsp), %esi
movq 0x38(%rsp), %rdx
movl 0x34(%rsp), %ecx
movl 0x30(%rsp), %r8d
movl 0x2c(%rsp), %r9d
movq 0x10(%rbp), %r10
movl 0x18(%rbp), %eax
movq %r10, (%rsp)
movl %eax, 0x8(%rsp)
callq 0x8c1bb0
jmp 0x8c1ae6
movq 0x48(%rsp), %r10
movl 0x44(%rsp), %r9d
movq 0x38(%rsp), %r8
movl 0x34(%rsp), %edi
movl 0x30(%rsp), %esi
movl 0x2c(%rsp), %edx
movq 0x10(%rbp), %rcx
movl 0x18(%rbp), %eax
movq %r10, 0x2188(%rsp)
movl %r9d, 0x2184(%rsp)
movq %r8, 0x2178(%rsp)
movl %edi, 0x2174(%rsp)
movl %esi, 0x2170(%rsp)
movl %edx, 0x216c(%rsp)
movq %rcx, 0x2160(%rsp)
movl %eax, 0x215c(%rsp)
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
callq 0x8c1af0
movl %eax, 0x207c(%rsp)
cmpl $0x2, 0x207c(%rsp)
jne 0x8bda59
movq 0x2188(%rsp), %rax
movq %rax, 0x2070(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
cmpl $0x8, 0x215c(%rsp)
je 0x8bcc25
cmpl $0x8, 0x2170(%rsp)
jg 0x8bc4c5
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2110(%rsp), %rdx
callq 0x8cbd20
cmpl $0x2, 0x2170(%rsp)
jne 0x8bc084
movq 0x2070(%rsp), %rax
movswl (%rax), %eax
movl %eax, 0x21ac(%rsp)
vmovd 0x21ac(%rsp), %xmm0
vmovdqa %xmm0, 0x2190(%rsp)
vmovdqa 0x2190(%rsp), %xmm0
vmovdqa %xmm0, 0x2050(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x2050(%rsp), %rcx
callq 0x8cbeb0
vmovdqa %xmm0, 0x2040(%rsp)
vmovdqa 0x2040(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x2030(%rsp)
vmovdqa 0x2030(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc100
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bbfbf
jmp 0x8bc4c0
cmpl $0x4, 0x2170(%rsp)
jne 0x8bc18f
movq 0x2070(%rsp), %rdi
callq 0x8cc180
movl %eax, 0x21cc(%rsp)
vmovd 0x21cc(%rsp), %xmm0
vmovdqa %xmm0, 0x21b0(%rsp)
vmovdqa 0x21b0(%rsp), %xmm0
vmovdqa %xmm0, 0x2010(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x2010(%rsp), %rcx
callq 0x8cc1a0
vmovdqa %xmm0, 0x2000(%rsp)
vmovdqa 0x2000(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x1ff0(%rsp)
vmovdqa 0x1ff0(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc2f0
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bc0ca
jmp 0x8bc4be
movq 0x2070(%rsp), %rax
movq %rax, 0x3528(%rsp)
movq 0x3528(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3510(%rsp)
vmovdqa 0x3510(%rsp), %xmm0
vmovdqa %xmm0, 0x1fd0(%rsp)
movq 0x2070(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3508(%rsp)
movq 0x3508(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x34f0(%rsp)
vmovdqa 0x34f0(%rsp), %xmm0
vmovdqa %xmm0, 0x1fe0(%rsp)
vmovdqa 0x1fd0(%rsp), %xmm1
vmovdqa 0x1fe0(%rsp), %xmm0
vmovdqa %xmm1, 0x37a0(%rsp)
vmovdqa %xmm0, 0x3790(%rsp)
vmovdqa 0x37a0(%rsp), %xmm0
vmovdqa 0x3790(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x1fb0(%rsp)
movq 0x2070(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x34e8(%rsp)
movq 0x34e8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x34d0(%rsp)
vmovdqa 0x34d0(%rsp), %xmm0
vmovdqa %xmm0, 0x1fd0(%rsp)
vmovdqa 0x1fd0(%rsp), %xmm0
vmovdqa 0x1fe0(%rsp), %xmm1
vmovdqa %xmm1, 0x3780(%rsp)
vmovdqa %xmm0, 0x3770(%rsp)
vmovdqa 0x3780(%rsp), %xmm0
vmovdqa 0x3770(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x1fc0(%rsp)
vmovdqa 0x1fb0(%rsp), %xmm1
vmovdqa 0x1fc0(%rsp), %xmm0
vmovdqa %xmm1, 0x37c0(%rsp)
vmovdqa %xmm0, 0x37b0(%rsp)
vmovdqa 0x37c0(%rsp), %xmm0
vmovdqa 0x37b0(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x1fa0(%rsp)
vmovdqa 0x1fb0(%rsp), %xmm1
vmovdqa 0x1fc0(%rsp), %xmm0
vmovdqa %xmm1, 0x3960(%rsp)
vmovdqa %xmm0, 0x3950(%rsp)
vmovdqa 0x3960(%rsp), %xmm0
vmovdqa 0x3950(%rsp), %xmm1
vpunpckhbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
vmovdqa %xmm0, 0x1f90(%rsp)
leaq 0x1fa0(%rsp), %rdi
leaq 0x2110(%rsp), %rsi
movq %rsi, 0x20(%rsp)
callq 0x8cc390
movq 0x20(%rsp), %rsi
vmovdqa %xmm0, 0x1f80(%rsp)
leaq 0x1f90(%rsp), %rdi
callq 0x8cc390
vmovdqa %xmm0, 0x1f70(%rsp)
vmovdqa 0x1f80(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x1f60(%rsp)
vmovdqa 0x1f70(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x1f50(%rsp)
vmovdqa 0x1f60(%rsp), %xmm1
vmovdqa 0x1f50(%rsp), %xmm0
vmovdqa %xmm1, 0x3980(%rsp)
vmovdqa %xmm0, 0x3970(%rsp)
vmovdqa 0x3980(%rsp), %xmm0
vmovdqa 0x3970(%rsp), %xmm1
vpackuswb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1f40(%rsp)
movq 0x2178(%rsp), %rax
vmovdqa 0x1f40(%rsp), %xmm0
movq %rax, 0x3448(%rsp)
vmovdqa %xmm0, 0x3430(%rsp)
movq 0x3430(%rsp), %rcx
movq 0x3448(%rsp), %rax
movq %rcx, (%rax)
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1f40(%rsp), %xmm0
callq 0x8cc3d0
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bc1c6
jmp 0x8bc4be
jmp 0x8bc4c0
jmp 0x8bcc20
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2080(%rsp), %rdx
callq 0x8cc410
cmpl $0x10, 0x2170(%rsp)
jne 0x8bc5c0
movq 0x2070(%rsp), %rax
movq %rax, 0x2648(%rsp)
movq 0x2648(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1f20(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x1f20(%rsp), %rcx
leaq 0x1ee0(%rsp), %r8
callq 0x8cc690
movq 0x2178(%rsp), %rsi
movslq 0x2174(%rsp), %rdx
leaq 0x1ee0(%rsp), %rdi
callq 0x8cc860
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bc514
jmp 0x8bcc1e
cmpl $0x20, 0x2170(%rsp)
jne 0x8bc6d9
movq 0x2070(%rsp), %rax
movq %rax, 0x3990(%rsp)
movq 0x3990(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1ea0(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1ea0(%rsp), %ymm0
leaq 0x1ea0(%rsp), %rdx
addq $0x20, %rdx
movq 0x2178(%rsp), %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
vmovdqa 0x1ec0(%rsp), %ymm0
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
leaq 0x2080(%rsp), %rsi
leaq 0x1ea0(%rsp), %rdx
callq 0x8cc8d0
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bc5f3
jmp 0x8bcc1c
cmpl $0x40, 0x2170(%rsp)
jne 0x8bc8b7
movq 0x2070(%rsp), %rax
movq %rax, 0x39a0(%rsp)
movq 0x39a0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1e20(%rsp)
movq 0x2070(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x3998(%rsp)
movq 0x3998(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1e40(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1e20(%rsp), %ymm0
leaq 0x1e20(%rsp), %rdx
addq $0x40, %rdx
movq 0x2178(%rsp), %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1e40(%rsp), %ymm0
leaq 0x1e20(%rsp), %rdx
addq $0x40, %rdx
addq $0x20, %rdx
movq 0x2178(%rsp), %rcx
addq $0x20, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
vmovdqa 0x1e60(%rsp), %ymm0
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
leaq 0x2080(%rsp), %rsi
leaq 0x1e20(%rsp), %rdx
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1e80(%rsp), %ymm0
leaq 0x1e20(%rsp), %rdx
addq $0x20, %rdx
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
addq $0x20, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bc735
jmp 0x8bcc1a
movq 0x2070(%rsp), %rax
movq %rax, 0x39c0(%rsp)
movq 0x39c0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1d20(%rsp)
movq 0x2070(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x39b8(%rsp)
movq 0x39b8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1d40(%rsp)
movq 0x2070(%rsp), %rax
addq $0x40, %rax
movq %rax, 0x39b0(%rsp)
movq 0x39b0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1d60(%rsp)
movq 0x2070(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x39a8(%rsp)
movq 0x39a8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1d80(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1d20(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x80, %rdx
movq 0x2178(%rsp), %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1d40(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x80, %rdx
addq $0x20, %rdx
movq 0x2178(%rsp), %rcx
addq $0x20, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x40, %rdi
vmovdqa 0x1d60(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x80, %rdx
addq $0x40, %rdx
movq 0x2178(%rsp), %rcx
addq $0x40, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x60, %rdi
vmovdqa 0x1d80(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x80, %rdx
addq $0x60, %rdx
movq 0x2178(%rsp), %rcx
addq $0x60, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
vmovdqa 0x1da0(%rsp), %ymm0
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
leaq 0x2080(%rsp), %rsi
leaq 0x1d20(%rsp), %rdx
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1dc0(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x20, %rdx
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
addq $0x20, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x40, %rdi
vmovdqa 0x1de0(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x40, %rdx
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
addq $0x40, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x60, %rdi
vmovdqa 0x1e00(%rsp), %ymm0
leaq 0x1d20(%rsp), %rdx
addq $0x60, %rdx
movq 0x2178(%rsp), %rcx
movslq 0x2174(%rsp), %rax
addq %rax, %rcx
addq $0x60, %rcx
leaq 0x2080(%rsp), %rsi
callq 0x8cc8d0
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bc957
jmp 0x8bcc1a
jmp 0x8bcc1c
jmp 0x8bcc1e
jmp 0x8bcc20
jmp 0x8bda54
cmpl $0x8, 0x2170(%rsp)
jg 0x8bd26a
cmpl $0x2, 0x2170(%rsp)
jne 0x8bce5c
movq 0x2070(%rsp), %rax
movswl (%rax), %eax
movl %eax, 0x222c(%rsp)
vmovd 0x222c(%rsp), %xmm0
vmovdqa %xmm0, 0x2210(%rsp)
vmovdqa 0x2210(%rsp), %xmm0
vmovdqa %xmm0, 0x1d00(%rsp)
movq 0x2070(%rsp), %rax
movslq 0x2184(%rsp), %rcx
movswl (%rax,%rcx), %eax
movl %eax, 0x220c(%rsp)
vmovd 0x220c(%rsp), %xmm0
vmovdqa %xmm0, 0x21f0(%rsp)
vmovdqa 0x21f0(%rsp), %xmm0
vmovdqa %xmm0, 0x1d10(%rsp)
vmovdqa 0x1d00(%rsp), %xmm1
vmovdqa 0x1d10(%rsp), %xmm0
vmovdqa %xmm1, 0x3ac0(%rsp)
vmovdqa %xmm0, 0x3ab0(%rsp)
vmovdqa 0x3ac0(%rsp), %xmm0
vmovdqa 0x3ab0(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1cf0(%rsp)
vmovdqa 0x1cf0(%rsp), %xmm0
vmovdqa %xmm0, 0x34c0(%rsp)
vmovdqa 0x34c0(%rsp), %xmm0
vmovdqa %xmm0, 0x34b0(%rsp)
movl 0x34b0(%rsp), %eax
movw %ax, %cx
movq 0x2178(%rsp), %rax
movw %cx, (%rax)
movq 0x2070(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
movswl (%rax,%rcx), %eax
movl %eax, 0x21ec(%rsp)
vmovd 0x21ec(%rsp), %xmm0
vmovdqa %xmm0, 0x21d0(%rsp)
vmovdqa 0x21d0(%rsp), %xmm0
vmovdqa %xmm0, 0x1d00(%rsp)
vmovdqa 0x1d00(%rsp), %xmm0
vmovdqa 0x1d10(%rsp), %xmm1
vmovdqa %xmm1, 0x3aa0(%rsp)
vmovdqa %xmm0, 0x3a90(%rsp)
vmovdqa 0x3aa0(%rsp), %xmm0
vmovdqa 0x3a90(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1ce0(%rsp)
vmovdqa 0x1ce0(%rsp), %xmm0
vmovdqa %xmm0, 0x34a0(%rsp)
vmovdqa 0x34a0(%rsp), %xmm0
vmovdqa %xmm0, 0x3490(%rsp)
movl 0x3490(%rsp), %eax
movw %ax, %dx
movq 0x2178(%rsp), %rax
movslq 0x2174(%rsp), %rcx
movw %dx, (%rax,%rcx)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bcc77
jmp 0x8bd265
cmpl $0x4, 0x2170(%rsp)
jne 0x8bd04a
movq 0x2070(%rsp), %rdi
callq 0x8cc180
movl %eax, 0x228c(%rsp)
vmovd 0x228c(%rsp), %xmm0
vmovdqa %xmm0, 0x2270(%rsp)
vmovdqa 0x2270(%rsp), %xmm0
vmovdqa %xmm0, 0x1cc0(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x226c(%rsp)
vmovd 0x226c(%rsp), %xmm0
vmovdqa %xmm0, 0x2250(%rsp)
vmovdqa 0x2250(%rsp), %xmm0
vmovdqa %xmm0, 0x1cd0(%rsp)
vmovdqa 0x1cc0(%rsp), %xmm1
vmovdqa 0x1cd0(%rsp), %xmm0
vmovdqa %xmm1, 0x3b00(%rsp)
vmovdqa %xmm0, 0x3af0(%rsp)
vmovdqa 0x3b00(%rsp), %xmm0
vmovdqa 0x3af0(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1cb0(%rsp)
movq 0x2178(%rsp), %rdi
vmovdqa 0x1cb0(%rsp), %xmm0
callq 0x8cb680
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x224c(%rsp)
vmovd 0x224c(%rsp), %xmm0
vmovdqa %xmm0, 0x2230(%rsp)
vmovdqa 0x2230(%rsp), %xmm0
vmovdqa %xmm0, 0x1cc0(%rsp)
vmovdqa 0x1cd0(%rsp), %xmm1
vmovdqa 0x1cc0(%rsp), %xmm0
vmovdqa %xmm1, 0x3ae0(%rsp)
vmovdqa %xmm0, 0x3ad0(%rsp)
vmovdqa 0x3ae0(%rsp), %xmm0
vmovdqa 0x3ad0(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1ca0(%rsp)
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1ca0(%rsp), %xmm0
callq 0x8cb680
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bcea2
jmp 0x8bd263
movq 0x2070(%rsp), %rax
movq %rax, 0x3588(%rsp)
movq 0x3588(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3570(%rsp)
vmovdqa 0x3570(%rsp), %xmm0
vmovdqa %xmm0, 0x1c80(%rsp)
movq 0x2070(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3568(%rsp)
movq 0x3568(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3550(%rsp)
vmovdqa 0x3550(%rsp), %xmm0
vmovdqa %xmm0, 0x1c90(%rsp)
vmovdqa 0x1c80(%rsp), %xmm1
vmovdqa 0x1c90(%rsp), %xmm0
vmovdqa %xmm1, 0x3b40(%rsp)
vmovdqa %xmm0, 0x3b30(%rsp)
vmovdqa 0x3b40(%rsp), %xmm0
vmovdqa 0x3b30(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1c70(%rsp)
movq 0x2178(%rsp), %rax
vmovdqa 0x1c70(%rsp), %xmm0
movq %rax, 0x3488(%rsp)
vmovdqa %xmm0, 0x3470(%rsp)
movq 0x3470(%rsp), %rcx
movq 0x3488(%rsp), %rax
movq %rcx, (%rax)
movq 0x2070(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3548(%rsp)
movq 0x3548(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3530(%rsp)
vmovdqa 0x3530(%rsp), %xmm0
vmovdqa %xmm0, 0x1c80(%rsp)
vmovdqa 0x1c80(%rsp), %xmm0
vmovdqa 0x1c90(%rsp), %xmm1
vmovdqa %xmm1, 0x3b20(%rsp)
vmovdqa %xmm0, 0x3b10(%rsp)
vmovdqa 0x3b20(%rsp), %xmm0
vmovdqa 0x3b10(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1c60(%rsp)
movq 0x2178(%rsp), %rax
movslq 0x2174(%rsp), %rcx
addq %rcx, %rax
vmovdqa 0x1c60(%rsp), %xmm0
movq %rax, 0x3468(%rsp)
vmovdqa %xmm0, 0x3450(%rsp)
movq 0x3450(%rsp), %rcx
movq 0x3468(%rsp), %rax
movq %rcx, (%rax)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bd081
jmp 0x8bd263
jmp 0x8bd265
jmp 0x8bda52
cmpl $0x10, 0x2170(%rsp)
jne 0x8bd462
movq 0x2070(%rsp), %rax
movq %rax, 0x2660(%rsp)
movq 0x2660(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1c40(%rsp)
movq 0x2070(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x2658(%rsp)
movq 0x2658(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1c50(%rsp)
vmovdqa 0x1c40(%rsp), %xmm1
vmovdqa 0x1c50(%rsp), %xmm0
vmovdqa %xmm1, 0x3b80(%rsp)
vmovdqa %xmm0, 0x3b70(%rsp)
vmovdqa 0x3b80(%rsp), %xmm0
vmovdqa 0x3b70(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1c30(%rsp)
movq 0x2178(%rsp), %rax
vmovdqa 0x1c30(%rsp), %xmm0
movq %rax, 0x3428(%rsp)
vmovdqa %xmm0, 0x3410(%rsp)
vmovdqa 0x3410(%rsp), %xmm0
movq 0x3428(%rsp), %rax
vmovdqu %xmm0, (%rax)
movq 0x2070(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2650(%rsp)
movq 0x2650(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1c40(%rsp)
vmovdqa 0x1c50(%rsp), %xmm1
vmovdqa 0x1c40(%rsp), %xmm0
vmovdqa %xmm1, 0x3b60(%rsp)
vmovdqa %xmm0, 0x3b50(%rsp)
vmovdqa 0x3b60(%rsp), %xmm0
vmovdqa 0x3b50(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1c20(%rsp)
movq 0x2178(%rsp), %rax
movslq 0x2174(%rsp), %rcx
addq %rcx, %rax
vmovdqa 0x1c20(%rsp), %xmm0
movq %rax, 0x3408(%rsp)
vmovdqa %xmm0, 0x33f0(%rsp)
vmovdqa 0x33f0(%rsp), %xmm0
movq 0x3408(%rsp), %rax
vmovdqu %xmm0, (%rax)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bd29d
jmp 0x8bda50
cmpl $0x20, 0x2170(%rsp)
jne 0x8bd56b
movq 0x2070(%rsp), %rax
movq %rax, 0x39c8(%rsp)
movq 0x39c8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1be0(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1be0(%rsp), %ymm0
leaq 0x1be0(%rsp), %rsi
addq $0x20, %rsi
movq 0x2178(%rsp), %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
vmovdqa 0x1c00(%rsp), %ymm0
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
leaq 0x1be0(%rsp), %rsi
callq 0x8cc940
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bd495
jmp 0x8bda4e
cmpl $0x40, 0x2170(%rsp)
jne 0x8bd729
movq 0x2070(%rsp), %rax
movq %rax, 0x39d8(%rsp)
movq 0x39d8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1b60(%rsp)
movq 0x2070(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x39d0(%rsp)
movq 0x39d0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1b80(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1b60(%rsp), %ymm0
leaq 0x1b60(%rsp), %rsi
addq $0x40, %rsi
movq 0x2178(%rsp), %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1b80(%rsp), %ymm0
leaq 0x1b60(%rsp), %rsi
addq $0x40, %rsi
addq $0x20, %rsi
movq 0x2178(%rsp), %rdx
addq $0x20, %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
vmovdqa 0x1ba0(%rsp), %ymm0
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
leaq 0x1b60(%rsp), %rsi
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1bc0(%rsp), %ymm0
leaq 0x1b60(%rsp), %rsi
addq $0x20, %rsi
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
addq $0x20, %rdx
callq 0x8cc940
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bd5c7
jmp 0x8bda4c
movq 0x2070(%rsp), %rax
movq %rax, 0x39f8(%rsp)
movq 0x39f8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1a60(%rsp)
movq 0x2070(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x39f0(%rsp)
movq 0x39f0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1a80(%rsp)
movq 0x2070(%rsp), %rax
addq $0x40, %rax
movq %rax, 0x39e8(%rsp)
movq 0x39e8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1aa0(%rsp)
movq 0x2070(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x39e0(%rsp)
movq 0x39e0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1ac0(%rsp)
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x1a60(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x80, %rsi
movq 0x2178(%rsp), %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1a80(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x80, %rsi
addq $0x20, %rsi
movq 0x2178(%rsp), %rdx
addq $0x20, %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x40, %rdi
vmovdqa 0x1aa0(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x80, %rsi
addq $0x40, %rsi
movq 0x2178(%rsp), %rdx
addq $0x40, %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
addq $0x60, %rdi
vmovdqa 0x1ac0(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x80, %rsi
addq $0x60, %rsi
movq 0x2178(%rsp), %rdx
addq $0x60, %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
vmovdqa 0x1ae0(%rsp), %ymm0
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
leaq 0x1a60(%rsp), %rsi
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x20, %rdi
vmovdqa 0x1b00(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x20, %rsi
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
addq $0x20, %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x40, %rdi
vmovdqa 0x1b20(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x40, %rsi
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
addq $0x40, %rdx
callq 0x8cc940
movq 0x2070(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll %eax
cltq
addq %rax, %rdi
addq $0x60, %rdi
vmovdqa 0x1b40(%rsp), %ymm0
leaq 0x1a60(%rsp), %rsi
addq $0x60, %rsi
movq 0x2178(%rsp), %rdx
movslq 0x2174(%rsp), %rax
addq %rax, %rdx
addq $0x60, %rdx
callq 0x8cc940
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x2070(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2070(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bd7c9
jmp 0x8bda4c
jmp 0x8bda4e
jmp 0x8bda50
jmp 0x8bda52
jmp 0x8bda54
jmp 0x8c1ae4
cmpl $0x4, 0x207c(%rsp)
jne 0x8bea30
movq 0x2188(%rsp), %rax
movslq 0x2184(%rsp), %rdx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, 0x1a58(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
cmpl $0x4, 0x2170(%rsp)
jg 0x8bdfa4
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2110(%rsp), %rdx
callq 0x8cca00
cmpl $0x2, 0x2170(%rsp)
jne 0x8bdd39
movq 0x1a58(%rsp), %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x22ec(%rsp)
vmovd 0x22ec(%rsp), %xmm0
vmovdqa %xmm0, 0x22d0(%rsp)
vmovdqa 0x22d0(%rsp), %xmm0
vmovdqa %xmm0, 0x1a10(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x22cc(%rsp)
vmovd 0x22cc(%rsp), %xmm0
vmovdqa %xmm0, 0x22b0(%rsp)
vmovdqa 0x22b0(%rsp), %xmm0
vmovdqa %xmm0, 0x1a20(%rsp)
movq 0x1a58(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x22ac(%rsp)
vmovd 0x22ac(%rsp), %xmm0
vmovdqa %xmm0, 0x2290(%rsp)
vmovdqa 0x2290(%rsp), %xmm0
vmovdqa %xmm0, 0x1a30(%rsp)
vmovdqa 0x1a10(%rsp), %xmm1
vmovdqa 0x1a20(%rsp), %xmm0
vmovdqa %xmm1, 0x3bc0(%rsp)
vmovdqa %xmm0, 0x3bb0(%rsp)
vmovdqa 0x3bc0(%rsp), %xmm0
vmovdqa 0x3bb0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x19e0(%rsp)
vmovdqa 0x1a20(%rsp), %xmm1
vmovdqa 0x1a30(%rsp), %xmm0
vmovdqa %xmm1, 0x3ba0(%rsp)
vmovdqa %xmm0, 0x3b90(%rsp)
vmovdqa 0x3ba0(%rsp), %xmm0
vmovdqa 0x3b90(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x19d0(%rsp)
vmovdqa 0x19e0(%rsp), %xmm1
vmovdqa 0x19d0(%rsp), %xmm0
vmovdqa %xmm1, 0x37e0(%rsp)
vmovdqa %xmm0, 0x37d0(%rsp)
vmovdqa 0x37e0(%rsp), %xmm0
vmovdqa 0x37d0(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x19f0(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x1a58(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1a58(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x1a10(%rsp), %rcx
leaq 0x19f0(%rsp), %r8
callq 0x8ccc90
vmovdqa %xmm0, 0x19c0(%rsp)
vmovdqa 0x19c0(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x19b0(%rsp)
vmovdqa 0x19b0(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc100
vmovdqa 0x1a00(%rsp), %xmm0
vmovdqa %xmm0, 0x19f0(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bdc5a
jmp 0x8bdf9f
movq 0x1a58(%rsp), %rdi
callq 0x8cc180
movl %eax, 0x234c(%rsp)
vmovd 0x234c(%rsp), %xmm0
vmovdqa %xmm0, 0x2330(%rsp)
vmovdqa 0x2330(%rsp), %xmm0
vmovdqa %xmm0, 0x1970(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x232c(%rsp)
vmovd 0x232c(%rsp), %xmm0
vmovdqa %xmm0, 0x2310(%rsp)
vmovdqa 0x2310(%rsp), %xmm0
vmovdqa %xmm0, 0x1980(%rsp)
movq 0x1a58(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x230c(%rsp)
vmovd 0x230c(%rsp), %xmm0
vmovdqa %xmm0, 0x22f0(%rsp)
vmovdqa 0x22f0(%rsp), %xmm0
vmovdqa %xmm0, 0x1990(%rsp)
vmovdqa 0x1970(%rsp), %xmm1
vmovdqa 0x1980(%rsp), %xmm0
vmovdqa %xmm1, 0x3d40(%rsp)
vmovdqa %xmm0, 0x3d30(%rsp)
vmovdqa 0x3d40(%rsp), %xmm0
vmovdqa 0x3d30(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x1940(%rsp)
vmovdqa 0x1980(%rsp), %xmm1
vmovdqa 0x1990(%rsp), %xmm0
vmovdqa %xmm1, 0x3d20(%rsp)
vmovdqa %xmm0, 0x3d10(%rsp)
vmovdqa 0x3d20(%rsp), %xmm0
vmovdqa 0x3d10(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x1930(%rsp)
vmovdqa 0x1940(%rsp), %xmm1
vmovdqa 0x1930(%rsp), %xmm0
vmovdqa %xmm1, 0x3800(%rsp)
vmovdqa %xmm0, 0x37f0(%rsp)
vmovdqa 0x3800(%rsp), %xmm0
vmovdqa 0x37f0(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x1950(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x1a58(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1a58(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x1970(%rsp), %rcx
leaq 0x1950(%rsp), %r8
callq 0x8ccdf0
vmovdqa %xmm0, 0x1920(%rsp)
vmovdqa 0x1920(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x1910(%rsp)
vmovdqa 0x1910(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc2f0
vmovdqa 0x1960(%rsp), %xmm0
vmovdqa %xmm0, 0x1950(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bdec3
jmp 0x8bdf9f
jmp 0x8bea2b
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2080(%rsp), %rdx
callq 0x8ccf50
cmpl $0x8, 0x2170(%rsp)
jne 0x8be204
movq 0x1a58(%rsp), %rax
movq %rax, 0x35e8(%rsp)
movq 0x35e8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x35d0(%rsp)
vmovdqa 0x35d0(%rsp), %xmm0
vmovdqa %xmm0, 0x18d0(%rsp)
movq 0x1a58(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x35c8(%rsp)
movq 0x35c8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x35b0(%rsp)
vmovdqa 0x35b0(%rsp), %xmm0
vmovdqa %xmm0, 0x18e0(%rsp)
movq 0x1a58(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x35a8(%rsp)
movq 0x35a8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3590(%rsp)
vmovdqa 0x3590(%rsp), %xmm0
vmovdqa %xmm0, 0x18f0(%rsp)
vmovdqa 0x18d0(%rsp), %xmm0
vmovdqa %xmm0, 0x26f0(%rsp)
vmovdqa 0x26f0(%rsp), %xmm0
vmovdqa 0x18e0(%rsp), %xmm1
vmovdqa %xmm1, 0x1870(%rsp)
vmovdqa %xmm0, 0x1860(%rsp)
vmovdqa 0x18e0(%rsp), %xmm0
vmovdqa %xmm0, 0x26e0(%rsp)
vmovdqa 0x26e0(%rsp), %xmm0
vmovdqa 0x18f0(%rsp), %xmm1
vmovdqa %xmm1, 0x1850(%rsp)
vmovdqa %xmm0, 0x1840(%rsp)
vmovaps 0x1860(%rsp), %ymm1
vmovaps 0x1840(%rsp), %ymm0
vmovaps %ymm1, 0x2880(%rsp)
vmovaps %ymm0, 0x2860(%rsp)
vmovaps 0x2880(%rsp), %ymm0
vmovaps 0x2860(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovdqa %ymm0, 0x1880(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x1a58(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1a58(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x18d0(%rsp), %rcx
leaq 0x1880(%rsp), %r8
callq 0x8ccfd0
vmovdqa %ymm0, 0x1820(%rsp)
vmovdqa 0x1820(%rsp), %ymm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cd130
vmovdqa 0x18a0(%rsp), %ymm0
vmovdqa %ymm0, 0x1880(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8be13c
jmp 0x8bea29
cmpl $0x10, 0x2170(%rsp)
jne 0x8be468
movq 0x1a58(%rsp), %rax
movq %rax, 0x2678(%rsp)
movq 0x2678(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x17e0(%rsp)
movq 0x1a58(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x2670(%rsp)
movq 0x2670(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x17f0(%rsp)
movq 0x1a58(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2668(%rsp)
movq 0x2668(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1800(%rsp)
vmovdqa 0x17e0(%rsp), %xmm0
vmovdqa %xmm0, 0x2710(%rsp)
vmovdqa 0x2710(%rsp), %xmm0
vmovdqa 0x17f0(%rsp), %xmm1
vmovdqa %xmm1, 0x1710(%rsp)
vmovdqa %xmm0, 0x1700(%rsp)
vmovdqa 0x17f0(%rsp), %xmm0
vmovdqa %xmm0, 0x2700(%rsp)
vmovdqa 0x2700(%rsp), %xmm0
vmovdqa 0x1800(%rsp), %xmm1
vmovdqa %xmm1, 0x16f0(%rsp)
vmovdqa %xmm0, 0x16e0(%rsp)
vmovaps 0x1700(%rsp), %ymm1
vmovaps 0x16e0(%rsp), %ymm0
vmovaps %ymm1, 0x28c0(%rsp)
vmovaps %ymm0, 0x28a0(%rsp)
vmovaps 0x28c0(%rsp), %ymm0
vmovaps 0x28a0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x1760(%rsp)
vmovaps 0x1700(%rsp), %ymm1
vmovaps 0x16e0(%rsp), %ymm0
vmovaps %ymm1, 0x2f00(%rsp)
vmovaps %ymm0, 0x2ee0(%rsp)
vmovaps 0x2f00(%rsp), %ymm0
vmovaps 0x2ee0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0x17a0(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x1a58(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1a58(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x17e0(%rsp), %rcx
leaq 0x1760(%rsp), %r8
leaq 0x1720(%rsp), %r9
vzeroupper
callq 0x8cd180
movq 0x2178(%rsp), %rsi
movslq 0x2174(%rsp), %rdx
leaq 0x1720(%rsp), %rdi
callq 0x8cc860
vmovdqa 0x1780(%rsp), %ymm0
vmovdqa %ymm0, 0x1760(%rsp)
vmovdqa 0x17c0(%rsp), %ymm0
vmovdqa %ymm0, 0x17a0(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8be38d
jmp 0x8bea27
cmpl $0x20, 0x2170(%rsp)
jne 0x8be715
movq 0x1a58(%rsp), %rax
movq %rax, 0x3a10(%rsp)
movq 0x3a10(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x1660(%rsp)
movq 0x1a58(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3a08(%rsp)
movq 0x3a08(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x1680(%rsp)
movq 0x1a58(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a00(%rsp)
movq 0x3a00(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x16a0(%rsp)
vmovaps 0x1660(%rsp), %ymm1
vmovaps 0x1680(%rsp), %ymm0
vmovaps %ymm1, 0x2940(%rsp)
vmovaps %ymm0, 0x2920(%rsp)
vmovaps 0x2940(%rsp), %ymm0
vmovaps 0x2920(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x15e0(%rsp)
vmovaps 0x1660(%rsp), %ymm1
vmovaps 0x1680(%rsp), %ymm0
vmovaps %ymm1, 0x2f80(%rsp)
vmovaps %ymm0, 0x2f60(%rsp)
vmovaps 0x2f80(%rsp), %ymm0
vmovaps 0x2f60(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x1620(%rsp)
vmovaps 0x1680(%rsp), %ymm1
vmovaps 0x16a0(%rsp), %ymm0
vmovaps %ymm1, 0x2900(%rsp)
vmovaps %ymm0, 0x28e0(%rsp)
vmovaps 0x2900(%rsp), %ymm0
vmovaps 0x28e0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x1560(%rsp)
vmovaps 0x1680(%rsp), %ymm1
vmovaps 0x16a0(%rsp), %ymm0
vmovaps %ymm1, 0x2f40(%rsp)
vmovaps %ymm0, 0x2f20(%rsp)
vmovaps 0x2f40(%rsp), %ymm0
vmovaps 0x2f20(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0x15a0(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x1a58(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1a58(%rsp)
movq 0x1a58(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x1660(%rsp), %rcx
leaq 0x15e0(%rsp), %r8
leaq 0x1560(%rsp), %r9
leaq 0x14e0(%rsp), %rax
movq %rax, (%rsp)
vzeroupper
callq 0x8cd320
movq 0x2178(%rsp), %rsi
movl 0x2174(%rsp), %edx
leaq 0x14e0(%rsp), %rdi
callq 0x8cd510
vmovdqa 0x1600(%rsp), %ymm0
vmovdqa %ymm0, 0x15e0(%rsp)
vmovdqa 0x1640(%rsp), %ymm0
vmovdqa %ymm0, 0x1620(%rsp)
vmovdqa 0x1580(%rsp), %ymm0
vmovdqa %ymm0, 0x1560(%rsp)
vmovdqa 0x15c0(%rsp), %ymm0
vmovdqa %ymm0, 0x15a0(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8be60b
jmp 0x8bea25
movl $0x0, 0x2158(%rsp)
movq 0x1a58(%rsp), %rax
movslq 0x2158(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x12d8(%rsp)
movq 0x2178(%rsp), %rax
movslq 0x2158(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x12d0(%rsp)
movq 0x12d8(%rsp), %rax
movq %rax, 0x3a28(%rsp)
movq 0x3a28(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x1460(%rsp)
movq 0x12d8(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3a20(%rsp)
movq 0x3a20(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x1480(%rsp)
movq 0x12d8(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a18(%rsp)
movq 0x3a18(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x14a0(%rsp)
vmovaps 0x1460(%rsp), %ymm1
vmovaps 0x1480(%rsp), %ymm0
vmovaps %ymm1, 0x29c0(%rsp)
vmovaps %ymm0, 0x29a0(%rsp)
vmovaps 0x29c0(%rsp), %ymm0
vmovaps 0x29a0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x13e0(%rsp)
vmovaps 0x1460(%rsp), %ymm1
vmovaps 0x1480(%rsp), %ymm0
vmovaps %ymm1, 0x3000(%rsp)
vmovaps %ymm0, 0x2fe0(%rsp)
vmovaps 0x3000(%rsp), %ymm0
vmovaps 0x2fe0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x1420(%rsp)
vmovaps 0x1480(%rsp), %ymm1
vmovaps 0x14a0(%rsp), %ymm0
vmovaps %ymm1, 0x2980(%rsp)
vmovaps %ymm0, 0x2960(%rsp)
vmovaps 0x2980(%rsp), %ymm0
vmovaps 0x2960(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x1360(%rsp)
vmovaps 0x1480(%rsp), %ymm1
vmovaps 0x14a0(%rsp), %ymm0
vmovaps %ymm1, 0x2fc0(%rsp)
vmovaps %ymm0, 0x2fa0(%rsp)
vmovaps 0x2fc0(%rsp), %ymm0
vmovaps 0x2fa0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0x13a0(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x12d8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x12d8(%rsp)
movq 0x12d8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x1460(%rsp), %rcx
leaq 0x13e0(%rsp), %r8
leaq 0x1360(%rsp), %r9
leaq 0x12e0(%rsp), %rax
movq %rax, (%rsp)
vzeroupper
callq 0x8cd320
movq 0x12d0(%rsp), %rsi
movl 0x2174(%rsp), %edx
leaq 0x12e0(%rsp), %rdi
callq 0x8cd510
vmovdqa 0x1400(%rsp), %ymm0
vmovdqa %ymm0, 0x13e0(%rsp)
vmovdqa 0x1440(%rsp), %ymm0
vmovdqa %ymm0, 0x1420(%rsp)
vmovdqa 0x1380(%rsp), %ymm0
vmovdqa %ymm0, 0x1360(%rsp)
vmovdqa 0x13c0(%rsp), %ymm0
vmovdqa %ymm0, 0x13a0(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x12d0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x12d0(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8be8f9
movl 0x2158(%rsp), %eax
addl $0x20, %eax
movl %eax, 0x2158(%rsp)
movl 0x2158(%rsp), %eax
cmpl 0x2170(%rsp), %eax
jl 0x8be720
jmp 0x8bea25
jmp 0x8bea27
jmp 0x8bea29
jmp 0x8bea2b
jmp 0x8c1ae2
cmpl $0x6, 0x207c(%rsp)
jne 0x8bfee3
movq 0x2188(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll %ecx
movslq %ecx, %rdx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, 0x12c8(%rsp)
cmpl $0x4, 0x2170(%rsp)
jg 0x8bf257
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2110(%rsp), %rdx
callq 0x8cd560
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
cmpl $0x2, 0x2170(%rsp)
jne 0x8bee81
movq 0x12c8(%rsp), %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x23ec(%rsp)
vmovd 0x23ec(%rsp), %xmm0
vmovdqa %xmm0, 0x23d0(%rsp)
vmovdqa 0x23d0(%rsp), %xmm0
vmovdqa %xmm0, 0x1260(%rsp)
movq 0x12c8(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x23cc(%rsp)
vmovd 0x23cc(%rsp), %xmm0
vmovdqa %xmm0, 0x23b0(%rsp)
vmovdqa 0x23b0(%rsp), %xmm0
vmovdqa %xmm0, 0x1270(%rsp)
movq 0x12c8(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x23ac(%rsp)
vmovd 0x23ac(%rsp), %xmm0
vmovdqa %xmm0, 0x2390(%rsp)
vmovdqa 0x2390(%rsp), %xmm0
vmovdqa %xmm0, 0x1280(%rsp)
movq 0x12c8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x238c(%rsp)
vmovd 0x238c(%rsp), %xmm0
vmovdqa %xmm0, 0x2370(%rsp)
vmovdqa 0x2370(%rsp), %xmm0
vmovdqa %xmm0, 0x1290(%rsp)
movq 0x12c8(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll $0x2, %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x236c(%rsp)
vmovd 0x236c(%rsp), %xmm0
vmovdqa %xmm0, 0x2350(%rsp)
vmovdqa 0x2350(%rsp), %xmm0
vmovdqa %xmm0, 0x12a0(%rsp)
vmovdqa 0x1260(%rsp), %xmm1
vmovdqa 0x1270(%rsp), %xmm0
vmovdqa %xmm1, 0x3c40(%rsp)
vmovdqa %xmm0, 0x3c30(%rsp)
vmovdqa 0x3c40(%rsp), %xmm0
vmovdqa 0x3c30(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x1220(%rsp)
vmovdqa 0x1270(%rsp), %xmm1
vmovdqa 0x1280(%rsp), %xmm0
vmovdqa %xmm1, 0x3c20(%rsp)
vmovdqa %xmm0, 0x3c10(%rsp)
vmovdqa 0x3c20(%rsp), %xmm0
vmovdqa 0x3c10(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x1210(%rsp)
vmovdqa 0x1280(%rsp), %xmm1
vmovdqa 0x1290(%rsp), %xmm0
vmovdqa %xmm1, 0x3c00(%rsp)
vmovdqa %xmm0, 0x3bf0(%rsp)
vmovdqa 0x3c00(%rsp), %xmm0
vmovdqa 0x3bf0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x1200(%rsp)
vmovdqa 0x1290(%rsp), %xmm1
vmovdqa 0x12a0(%rsp), %xmm0
vmovdqa %xmm1, 0x3be0(%rsp)
vmovdqa %xmm0, 0x3bd0(%rsp)
vmovdqa 0x3be0(%rsp), %xmm0
vmovdqa 0x3bd0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x11f0(%rsp)
vmovdqa 0x1220(%rsp), %xmm1
vmovdqa 0x1210(%rsp), %xmm0
vmovdqa %xmm1, 0x3840(%rsp)
vmovdqa %xmm0, 0x3830(%rsp)
vmovdqa 0x3840(%rsp), %xmm0
vmovdqa 0x3830(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x1230(%rsp)
vmovdqa 0x1200(%rsp), %xmm1
vmovdqa 0x11f0(%rsp), %xmm0
vmovdqa %xmm1, 0x3820(%rsp)
vmovdqa %xmm0, 0x3810(%rsp)
vmovdqa 0x3820(%rsp), %xmm0
vmovdqa 0x3810(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x1240(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x12c8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x12c8(%rsp)
movq 0x12c8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x1260(%rsp), %rcx
leaq 0x1230(%rsp), %r8
callq 0x8cd8e0
vmovdqa %xmm0, 0x11e0(%rsp)
vmovdqa 0x11e0(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x11d0(%rsp)
vmovdqa 0x11d0(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc100
vmovdqa 0x1240(%rsp), %xmm0
vmovdqa %xmm0, 0x1230(%rsp)
vmovdqa 0x1250(%rsp), %xmm0
vmovdqa %xmm0, 0x1240(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bed90
jmp 0x8bf252
movq 0x12c8(%rsp), %rdi
callq 0x8cc180
movl %eax, 0x248c(%rsp)
vmovd 0x248c(%rsp), %xmm0
vmovdqa %xmm0, 0x2470(%rsp)
vmovdqa 0x2470(%rsp), %xmm0
vmovdqa %xmm0, 0x1170(%rsp)
movq 0x12c8(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x246c(%rsp)
vmovd 0x246c(%rsp), %xmm0
vmovdqa %xmm0, 0x2450(%rsp)
vmovdqa 0x2450(%rsp), %xmm0
vmovdqa %xmm0, 0x1180(%rsp)
movq 0x12c8(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x244c(%rsp)
vmovd 0x244c(%rsp), %xmm0
vmovdqa %xmm0, 0x2430(%rsp)
vmovdqa 0x2430(%rsp), %xmm0
vmovdqa %xmm0, 0x1190(%rsp)
movq 0x12c8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x242c(%rsp)
vmovd 0x242c(%rsp), %xmm0
vmovdqa %xmm0, 0x2410(%rsp)
vmovdqa 0x2410(%rsp), %xmm0
vmovdqa %xmm0, 0x11a0(%rsp)
movq 0x12c8(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll $0x2, %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x240c(%rsp)
vmovd 0x240c(%rsp), %xmm0
vmovdqa %xmm0, 0x23f0(%rsp)
vmovdqa 0x23f0(%rsp), %xmm0
vmovdqa %xmm0, 0x11b0(%rsp)
vmovdqa 0x1170(%rsp), %xmm1
vmovdqa 0x1180(%rsp), %xmm0
vmovdqa %xmm1, 0x3dc0(%rsp)
vmovdqa %xmm0, 0x3db0(%rsp)
vmovdqa 0x3dc0(%rsp), %xmm0
vmovdqa 0x3db0(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x1130(%rsp)
vmovdqa 0x1180(%rsp), %xmm1
vmovdqa 0x1190(%rsp), %xmm0
vmovdqa %xmm1, 0x3da0(%rsp)
vmovdqa %xmm0, 0x3d90(%rsp)
vmovdqa 0x3da0(%rsp), %xmm0
vmovdqa 0x3d90(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x1120(%rsp)
vmovdqa 0x1190(%rsp), %xmm1
vmovdqa 0x11a0(%rsp), %xmm0
vmovdqa %xmm1, 0x3d80(%rsp)
vmovdqa %xmm0, 0x3d70(%rsp)
vmovdqa 0x3d80(%rsp), %xmm0
vmovdqa 0x3d70(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x1110(%rsp)
vmovdqa 0x11a0(%rsp), %xmm1
vmovdqa 0x11b0(%rsp), %xmm0
vmovdqa %xmm1, 0x3d60(%rsp)
vmovdqa %xmm0, 0x3d50(%rsp)
vmovdqa 0x3d60(%rsp), %xmm0
vmovdqa 0x3d50(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x1100(%rsp)
vmovdqa 0x1130(%rsp), %xmm1
vmovdqa 0x1120(%rsp), %xmm0
vmovdqa %xmm1, 0x3880(%rsp)
vmovdqa %xmm0, 0x3870(%rsp)
vmovdqa 0x3880(%rsp), %xmm0
vmovdqa 0x3870(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x1140(%rsp)
vmovdqa 0x1110(%rsp), %xmm1
vmovdqa 0x1100(%rsp), %xmm0
vmovdqa %xmm1, 0x3860(%rsp)
vmovdqa %xmm0, 0x3850(%rsp)
vmovdqa 0x3860(%rsp), %xmm0
vmovdqa 0x3850(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x1150(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x12c8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x12c8(%rsp)
movq 0x12c8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x1170(%rsp), %rcx
leaq 0x1140(%rsp), %r8
callq 0x8cda40
vmovdqa %xmm0, 0x10f0(%rsp)
vmovdqa 0x10f0(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x10e0(%rsp)
vmovdqa 0x10e0(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc2f0
vmovdqa 0x1150(%rsp), %xmm0
vmovdqa %xmm0, 0x1140(%rsp)
vmovdqa 0x1160(%rsp), %xmm0
vmovdqa %xmm0, 0x1150(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bf164
jmp 0x8bf252
jmp 0x8bfede
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2080(%rsp), %rdx
callq 0x8cdba0
cmpl $0x8, 0x2170(%rsp)
jne 0x8bf616
movq 0x12c8(%rsp), %rax
movq %rax, 0x3688(%rsp)
movq 0x3688(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3670(%rsp)
vmovdqa 0x3670(%rsp), %xmm0
vmovdqa %xmm0, 0x1080(%rsp)
movq 0x12c8(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3668(%rsp)
movq 0x3668(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3650(%rsp)
vmovdqa 0x3650(%rsp), %xmm0
vmovdqa %xmm0, 0x1090(%rsp)
movq 0x12c8(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3648(%rsp)
movq 0x3648(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3630(%rsp)
vmovdqa 0x3630(%rsp), %xmm0
vmovdqa %xmm0, 0x10a0(%rsp)
movq 0x12c8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3628(%rsp)
movq 0x3628(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3610(%rsp)
vmovdqa 0x3610(%rsp), %xmm0
vmovdqa %xmm0, 0x10b0(%rsp)
movq 0x12c8(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3608(%rsp)
movq 0x3608(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x35f0(%rsp)
vmovdqa 0x35f0(%rsp), %xmm0
vmovdqa %xmm0, 0x10c0(%rsp)
vmovdqa 0x1080(%rsp), %xmm0
vmovdqa %xmm0, 0x2750(%rsp)
vmovdqa 0x2750(%rsp), %xmm0
vmovdqa 0x1090(%rsp), %xmm1
vmovdqa %xmm1, 0x1010(%rsp)
vmovdqa %xmm0, 0x1000(%rsp)
vmovdqa 0x1090(%rsp), %xmm0
vmovdqa %xmm0, 0x2740(%rsp)
vmovdqa 0x2740(%rsp), %xmm0
vmovdqa 0x10a0(%rsp), %xmm1
vmovdqa %xmm1, 0xff0(%rsp)
vmovdqa %xmm0, 0xfe0(%rsp)
vmovdqa 0x10a0(%rsp), %xmm0
vmovdqa %xmm0, 0x2730(%rsp)
vmovdqa 0x2730(%rsp), %xmm0
vmovdqa 0x10b0(%rsp), %xmm1
vmovdqa %xmm1, 0xfd0(%rsp)
vmovdqa %xmm0, 0xfc0(%rsp)
vmovdqa 0x10b0(%rsp), %xmm0
vmovdqa %xmm0, 0x2720(%rsp)
vmovdqa 0x2720(%rsp), %xmm0
vmovdqa 0x10c0(%rsp), %xmm1
vmovdqa %xmm1, 0xfb0(%rsp)
vmovdqa %xmm0, 0xfa0(%rsp)
vmovaps 0x1000(%rsp), %ymm1
vmovaps 0xfe0(%rsp), %ymm0
vmovaps %ymm1, 0x2a40(%rsp)
vmovaps %ymm0, 0x2a20(%rsp)
vmovaps 0x2a40(%rsp), %ymm0
vmovaps 0x2a20(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x1020(%rsp)
vmovaps 0xfc0(%rsp), %ymm1
vmovaps 0xfa0(%rsp), %ymm0
vmovaps %ymm1, 0x2a00(%rsp)
vmovaps %ymm0, 0x29e0(%rsp)
vmovaps 0x2a00(%rsp), %ymm0
vmovaps 0x29e0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovdqa %ymm0, 0x1040(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x12c8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x12c8(%rsp)
movq 0x12c8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x1080(%rsp), %rcx
leaq 0x1020(%rsp), %r8
callq 0x8cdc20
vmovdqa %ymm0, 0xf80(%rsp)
vmovdqa 0xf80(%rsp), %ymm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cd130
vmovdqa 0x1040(%rsp), %ymm0
vmovdqa %ymm0, 0x1020(%rsp)
vmovdqa 0x1060(%rsp), %ymm0
vmovdqa %ymm0, 0x1040(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bf53c
jmp 0x8bfedc
cmpl $0x10, 0x2170(%rsp)
jne 0x8bfa0a
movq 0x12c8(%rsp), %rax
movq %rax, 0x26a0(%rsp)
movq 0x26a0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf20(%rsp)
movq 0x12c8(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x2698(%rsp)
movq 0x2698(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf30(%rsp)
movq 0x12c8(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2690(%rsp)
movq 0x2690(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf40(%rsp)
movq 0x12c8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2688(%rsp)
movq 0x2688(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf50(%rsp)
movq 0x12c8(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2680(%rsp)
movq 0x2680(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf60(%rsp)
vmovdqa 0xf20(%rsp), %xmm0
vmovdqa %xmm0, 0x2790(%rsp)
vmovdqa 0x2790(%rsp), %xmm0
vmovdqa 0xf30(%rsp), %xmm1
vmovdqa %xmm1, 0xe10(%rsp)
vmovdqa %xmm0, 0xe00(%rsp)
vmovdqa 0xf30(%rsp), %xmm0
vmovdqa %xmm0, 0x2780(%rsp)
vmovdqa 0x2780(%rsp), %xmm0
vmovdqa 0xf40(%rsp), %xmm1
vmovdqa %xmm1, 0xdf0(%rsp)
vmovdqa %xmm0, 0xde0(%rsp)
vmovdqa 0xf40(%rsp), %xmm0
vmovdqa %xmm0, 0x2770(%rsp)
vmovdqa 0x2770(%rsp), %xmm0
vmovdqa 0xf50(%rsp), %xmm1
vmovdqa %xmm1, 0xdd0(%rsp)
vmovdqa %xmm0, 0xdc0(%rsp)
vmovdqa 0xf50(%rsp), %xmm0
vmovdqa %xmm0, 0x2760(%rsp)
vmovdqa 0x2760(%rsp), %xmm0
vmovdqa 0xf60(%rsp), %xmm1
vmovdqa %xmm1, 0xdb0(%rsp)
vmovdqa %xmm0, 0xda0(%rsp)
vmovaps 0xe00(%rsp), %ymm1
vmovaps 0xde0(%rsp), %ymm0
vmovaps %ymm1, 0x2ac0(%rsp)
vmovaps %ymm0, 0x2aa0(%rsp)
vmovaps 0x2ac0(%rsp), %ymm0
vmovaps 0x2aa0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xe60(%rsp)
vmovaps 0xdc0(%rsp), %ymm1
vmovaps 0xda0(%rsp), %ymm0
vmovaps %ymm1, 0x2a80(%rsp)
vmovaps %ymm0, 0x2a60(%rsp)
vmovaps 0x2a80(%rsp), %ymm0
vmovaps 0x2a60(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xe80(%rsp)
vmovaps 0xe00(%rsp), %ymm1
vmovaps 0xde0(%rsp), %ymm0
vmovaps %ymm1, 0x3080(%rsp)
vmovaps %ymm0, 0x3060(%rsp)
vmovaps 0x3080(%rsp), %ymm0
vmovaps 0x3060(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0xec0(%rsp)
vmovaps 0xdc0(%rsp), %ymm1
vmovaps 0xda0(%rsp), %ymm0
vmovaps %ymm1, 0x3040(%rsp)
vmovaps %ymm0, 0x3020(%rsp)
vmovaps 0x3040(%rsp), %ymm0
vmovaps 0x3020(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0xee0(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x12c8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x12c8(%rsp)
movq 0x12c8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0xf20(%rsp), %rcx
leaq 0xe60(%rsp), %r8
leaq 0xe20(%rsp), %r9
vzeroupper
callq 0x8cdd80
movq 0x2178(%rsp), %rsi
movslq 0x2174(%rsp), %rdx
leaq 0xe20(%rsp), %rdi
callq 0x8cc860
vmovdqa 0xe80(%rsp), %ymm0
vmovdqa %ymm0, 0xe60(%rsp)
vmovdqa 0xea0(%rsp), %ymm0
vmovdqa %ymm0, 0xe80(%rsp)
vmovdqa 0xee0(%rsp), %ymm0
vmovdqa %ymm0, 0xec0(%rsp)
vmovdqa 0xf00(%rsp), %ymm0
vmovdqa %ymm0, 0xee0(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bf90b
jmp 0x8bfeda
movl $0x0, 0x2158(%rsp)
movq 0x12c8(%rsp), %rax
movslq 0x2158(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0xad8(%rsp)
movq 0x2178(%rsp), %rax
movslq 0x2158(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0xad0(%rsp)
movq 0xad8(%rsp), %rax
movq %rax, 0x3a50(%rsp)
movq 0x3a50(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0xce0(%rsp)
movq 0xad8(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3a48(%rsp)
movq 0x3a48(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0xd00(%rsp)
movq 0xad8(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a40(%rsp)
movq 0x3a40(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0xd20(%rsp)
movq 0xad8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a38(%rsp)
movq 0x3a38(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0xd40(%rsp)
movq 0xad8(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a30(%rsp)
movq 0x3a30(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0xd60(%rsp)
vmovaps 0xce0(%rsp), %ymm1
vmovaps 0xd00(%rsp), %ymm0
vmovaps %ymm1, 0x2bc0(%rsp)
vmovaps %ymm0, 0x2ba0(%rsp)
vmovaps 0x2bc0(%rsp), %ymm0
vmovaps 0x2ba0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xc20(%rsp)
vmovaps 0xd20(%rsp), %ymm1
vmovaps 0xd40(%rsp), %ymm0
vmovaps %ymm1, 0x2b80(%rsp)
vmovaps %ymm0, 0x2b60(%rsp)
vmovaps 0x2b80(%rsp), %ymm0
vmovaps 0x2b60(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xc40(%rsp)
vmovaps 0xce0(%rsp), %ymm1
vmovaps 0xd00(%rsp), %ymm0
vmovaps %ymm1, 0x3180(%rsp)
vmovaps %ymm0, 0x3160(%rsp)
vmovaps 0x3180(%rsp), %ymm0
vmovaps 0x3160(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0xc80(%rsp)
vmovaps 0xd20(%rsp), %ymm1
vmovaps 0xd40(%rsp), %ymm0
vmovaps %ymm1, 0x3140(%rsp)
vmovaps %ymm0, 0x3120(%rsp)
vmovaps 0x3140(%rsp), %ymm0
vmovaps 0x3120(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0xca0(%rsp)
vmovaps 0xd00(%rsp), %ymm1
vmovaps 0xd20(%rsp), %ymm0
vmovaps %ymm1, 0x2b40(%rsp)
vmovaps %ymm0, 0x2b20(%rsp)
vmovaps 0x2b40(%rsp), %ymm0
vmovaps 0x2b20(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xb60(%rsp)
vmovaps 0xd40(%rsp), %ymm1
vmovaps 0xd60(%rsp), %ymm0
vmovaps %ymm1, 0x2b00(%rsp)
vmovaps %ymm0, 0x2ae0(%rsp)
vmovaps 0x2b00(%rsp), %ymm0
vmovaps 0x2ae0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xb80(%rsp)
vmovaps 0xd00(%rsp), %ymm1
vmovaps 0xd20(%rsp), %ymm0
vmovaps %ymm1, 0x3100(%rsp)
vmovaps %ymm0, 0x30e0(%rsp)
vmovaps 0x3100(%rsp), %ymm0
vmovaps 0x30e0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0xbc0(%rsp)
vmovaps 0xd40(%rsp), %ymm1
vmovaps 0xd60(%rsp), %ymm0
vmovaps %ymm1, 0x30c0(%rsp)
vmovaps %ymm0, 0x30a0(%rsp)
vmovaps 0x30c0(%rsp), %ymm0
vmovaps 0x30a0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0xbe0(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0xad8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xad8(%rsp)
movq 0xad8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0xce0(%rsp), %rcx
leaq 0xc20(%rsp), %r8
leaq 0xb60(%rsp), %r9
leaq 0xae0(%rsp), %rax
movq %rax, (%rsp)
vzeroupper
callq 0x8cdf20
movq 0xad0(%rsp), %rsi
movl 0x2174(%rsp), %edx
leaq 0xae0(%rsp), %rdi
callq 0x8cd510
vmovdqa 0xc40(%rsp), %ymm0
vmovdqa %ymm0, 0xc20(%rsp)
vmovdqa 0xc60(%rsp), %ymm0
vmovdqa %ymm0, 0xc40(%rsp)
vmovdqa 0xca0(%rsp), %ymm0
vmovdqa %ymm0, 0xc80(%rsp)
vmovdqa 0xcc0(%rsp), %ymm0
vmovdqa %ymm0, 0xca0(%rsp)
vmovdqa 0xb80(%rsp), %ymm0
vmovdqa %ymm0, 0xb60(%rsp)
vmovdqa 0xba0(%rsp), %ymm0
vmovdqa %ymm0, 0xb80(%rsp)
vmovdqa 0xbe0(%rsp), %ymm0
vmovdqa %ymm0, 0xbc0(%rsp)
vmovdqa 0xc00(%rsp), %ymm0
vmovdqa %ymm0, 0xbe0(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0xad0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xad0(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8bfd66
movl 0x2158(%rsp), %eax
addl $0x20, %eax
movl %eax, 0x2158(%rsp)
movl 0x2158(%rsp), %eax
cmpl 0x2170(%rsp), %eax
jl 0x8bfa15
jmp 0x8bfeda
jmp 0x8bfedc
jmp 0x8bfede
jmp 0x8c1ae0
cmpl $0x8, 0x207c(%rsp)
jne 0x8c1ade
movq 0x2188(%rsp), %rax
imull $0x3, 0x2184(%rsp), %ecx
movslq %ecx, %rdx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, 0xac8(%rsp)
cmpl $0x4, 0x2170(%rsp)
jg 0x8c09e9
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2110(%rsp), %rdx
callq 0x8ce140
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
cmpl $0x2, 0x2170(%rsp)
jne 0x8c04a4
movq 0xac8(%rsp), %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x256c(%rsp)
vmovd 0x256c(%rsp), %xmm0
vmovdqa %xmm0, 0x2550(%rsp)
vmovdqa 0x2550(%rsp), %xmm0
vmovdqa %xmm0, 0xa40(%rsp)
movq 0xac8(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x254c(%rsp)
vmovd 0x254c(%rsp), %xmm0
vmovdqa %xmm0, 0x2530(%rsp)
vmovdqa 0x2530(%rsp), %xmm0
vmovdqa %xmm0, 0xa50(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x252c(%rsp)
vmovd 0x252c(%rsp), %xmm0
vmovdqa %xmm0, 0x2510(%rsp)
vmovdqa 0x2510(%rsp), %xmm0
vmovdqa %xmm0, 0xa60(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x250c(%rsp)
vmovd 0x250c(%rsp), %xmm0
vmovdqa %xmm0, 0x24f0(%rsp)
vmovdqa 0x24f0(%rsp), %xmm0
vmovdqa %xmm0, 0xa70(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll $0x2, %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x24ec(%rsp)
vmovd 0x24ec(%rsp), %xmm0
vmovdqa %xmm0, 0x24d0(%rsp)
vmovdqa 0x24d0(%rsp), %xmm0
vmovdqa %xmm0, 0xa80(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,4), %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x24cc(%rsp)
vmovd 0x24cc(%rsp), %xmm0
vmovdqa %xmm0, 0x24b0(%rsp)
vmovdqa 0x24b0(%rsp), %xmm0
vmovdqa %xmm0, 0xa90(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
addq %rax, %rdi
callq 0x8ccc70
cwtl
movl %eax, 0x24ac(%rsp)
vmovd 0x24ac(%rsp), %xmm0
vmovdqa %xmm0, 0x2490(%rsp)
vmovdqa 0x2490(%rsp), %xmm0
vmovdqa %xmm0, 0xaa0(%rsp)
vmovdqa 0xa40(%rsp), %xmm1
vmovdqa 0xa50(%rsp), %xmm0
vmovdqa %xmm1, 0x3d00(%rsp)
vmovdqa %xmm0, 0x3cf0(%rsp)
vmovdqa 0x3d00(%rsp), %xmm0
vmovdqa 0x3cf0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x9f0(%rsp)
vmovdqa 0xa50(%rsp), %xmm1
vmovdqa 0xa60(%rsp), %xmm0
vmovdqa %xmm1, 0x3ce0(%rsp)
vmovdqa %xmm0, 0x3cd0(%rsp)
vmovdqa 0x3ce0(%rsp), %xmm0
vmovdqa 0x3cd0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x9e0(%rsp)
vmovdqa 0xa60(%rsp), %xmm1
vmovdqa 0xa70(%rsp), %xmm0
vmovdqa %xmm1, 0x3cc0(%rsp)
vmovdqa %xmm0, 0x3cb0(%rsp)
vmovdqa 0x3cc0(%rsp), %xmm0
vmovdqa 0x3cb0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x9d0(%rsp)
vmovdqa 0xa70(%rsp), %xmm1
vmovdqa 0xa80(%rsp), %xmm0
vmovdqa %xmm1, 0x3ca0(%rsp)
vmovdqa %xmm0, 0x3c90(%rsp)
vmovdqa 0x3ca0(%rsp), %xmm0
vmovdqa 0x3c90(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x9c0(%rsp)
vmovdqa 0xa80(%rsp), %xmm1
vmovdqa 0xa90(%rsp), %xmm0
vmovdqa %xmm1, 0x3c80(%rsp)
vmovdqa %xmm0, 0x3c70(%rsp)
vmovdqa 0x3c80(%rsp), %xmm0
vmovdqa 0x3c70(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x9b0(%rsp)
vmovdqa 0xa90(%rsp), %xmm1
vmovdqa 0xaa0(%rsp), %xmm0
vmovdqa %xmm1, 0x3c60(%rsp)
vmovdqa %xmm0, 0x3c50(%rsp)
vmovdqa 0x3c60(%rsp), %xmm0
vmovdqa 0x3c50(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x9a0(%rsp)
vmovdqa 0x9f0(%rsp), %xmm1
vmovdqa 0x9e0(%rsp), %xmm0
vmovdqa %xmm1, 0x38e0(%rsp)
vmovdqa %xmm0, 0x38d0(%rsp)
vmovdqa 0x38e0(%rsp), %xmm0
vmovdqa 0x38d0(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0xa00(%rsp)
vmovdqa 0x9d0(%rsp), %xmm1
vmovdqa 0x9c0(%rsp), %xmm0
vmovdqa %xmm1, 0x38c0(%rsp)
vmovdqa %xmm0, 0x38b0(%rsp)
vmovdqa 0x38c0(%rsp), %xmm0
vmovdqa 0x38b0(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0xa10(%rsp)
vmovdqa 0x9b0(%rsp), %xmm1
vmovdqa 0x9a0(%rsp), %xmm0
vmovdqa %xmm1, 0x38a0(%rsp)
vmovdqa %xmm0, 0x3890(%rsp)
vmovdqa 0x38a0(%rsp), %xmm0
vmovdqa 0x3890(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0xa20(%rsp)
movq 0xac8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0xa40(%rsp), %rcx
leaq 0xa00(%rsp), %r8
callq 0x8ce5c0
vmovdqa %xmm0, 0x990(%rsp)
vmovdqa 0x990(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x980(%rsp)
vmovdqa 0x980(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc100
vmovdqa 0xa10(%rsp), %xmm0
vmovdqa %xmm0, 0xa00(%rsp)
vmovdqa 0xa20(%rsp), %xmm0
vmovdqa %xmm0, 0xa10(%rsp)
vmovdqa 0xa30(%rsp), %xmm0
vmovdqa %xmm0, 0xa20(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0xac8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xac8(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8c03a1
jmp 0x8c09e4
movq 0xac8(%rsp), %rdi
callq 0x8cc180
movl %eax, 0x2644(%rsp)
vmovd 0x2644(%rsp), %xmm0
vmovdqa %xmm0, 0x2630(%rsp)
vmovdqa 0x2630(%rsp), %xmm0
vmovdqa %xmm0, 0x900(%rsp)
movq 0xac8(%rsp), %rdi
movslq 0x2184(%rsp), %rax
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x262c(%rsp)
vmovd 0x262c(%rsp), %xmm0
vmovdqa %xmm0, 0x2610(%rsp)
vmovdqa 0x2610(%rsp), %xmm0
vmovdqa %xmm0, 0x910(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %eax
addl %eax, %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x260c(%rsp)
vmovd 0x260c(%rsp), %xmm0
vmovdqa %xmm0, 0x25f0(%rsp)
vmovdqa 0x25f0(%rsp), %xmm0
vmovdqa %xmm0, 0x920(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x25ec(%rsp)
vmovd 0x25ec(%rsp), %xmm0
vmovdqa %xmm0, 0x25d0(%rsp)
vmovdqa 0x25d0(%rsp), %xmm0
vmovdqa %xmm0, 0x930(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %eax
shll $0x2, %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x25cc(%rsp)
vmovd 0x25cc(%rsp), %xmm0
vmovdqa %xmm0, 0x25b0(%rsp)
vmovdqa 0x25b0(%rsp), %xmm0
vmovdqa %xmm0, 0x940(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,4), %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x25ac(%rsp)
vmovd 0x25ac(%rsp), %xmm0
vmovdqa %xmm0, 0x2590(%rsp)
vmovdqa 0x2590(%rsp), %xmm0
vmovdqa %xmm0, 0x950(%rsp)
movq 0xac8(%rsp), %rdi
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
addq %rax, %rdi
callq 0x8cc180
movl %eax, 0x258c(%rsp)
vmovd 0x258c(%rsp), %xmm0
vmovdqa %xmm0, 0x2570(%rsp)
vmovdqa 0x2570(%rsp), %xmm0
vmovdqa %xmm0, 0x960(%rsp)
vmovdqa 0x900(%rsp), %xmm1
vmovdqa 0x910(%rsp), %xmm0
vmovdqa %xmm1, 0x3e80(%rsp)
vmovdqa %xmm0, 0x3e70(%rsp)
vmovdqa 0x3e80(%rsp), %xmm0
vmovdqa 0x3e70(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x8b0(%rsp)
vmovdqa 0x910(%rsp), %xmm1
vmovdqa 0x920(%rsp), %xmm0
vmovdqa %xmm1, 0x3e60(%rsp)
vmovdqa %xmm0, 0x3e50(%rsp)
vmovdqa 0x3e60(%rsp), %xmm0
vmovdqa 0x3e50(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x8a0(%rsp)
vmovdqa 0x920(%rsp), %xmm1
vmovdqa 0x930(%rsp), %xmm0
vmovdqa %xmm1, 0x3e40(%rsp)
vmovdqa %xmm0, 0x3e30(%rsp)
vmovdqa 0x3e40(%rsp), %xmm0
vmovdqa 0x3e30(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x890(%rsp)
vmovdqa 0x930(%rsp), %xmm1
vmovdqa 0x940(%rsp), %xmm0
vmovdqa %xmm1, 0x3e20(%rsp)
vmovdqa %xmm0, 0x3e10(%rsp)
vmovdqa 0x3e20(%rsp), %xmm0
vmovdqa 0x3e10(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x880(%rsp)
vmovdqa 0x940(%rsp), %xmm1
vmovdqa 0x950(%rsp), %xmm0
vmovdqa %xmm1, 0x3e00(%rsp)
vmovdqa %xmm0, 0x3df0(%rsp)
vmovdqa 0x3e00(%rsp), %xmm0
vmovdqa 0x3df0(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x870(%rsp)
vmovdqa 0x950(%rsp), %xmm1
vmovdqa 0x960(%rsp), %xmm0
vmovdqa %xmm1, 0x3de0(%rsp)
vmovdqa %xmm0, 0x3dd0(%rsp)
vmovdqa 0x3de0(%rsp), %xmm0
vmovdqa 0x3dd0(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x860(%rsp)
vmovdqa 0x8b0(%rsp), %xmm1
vmovdqa 0x8a0(%rsp), %xmm0
vmovdqa %xmm1, 0x3940(%rsp)
vmovdqa %xmm0, 0x3930(%rsp)
vmovdqa 0x3940(%rsp), %xmm0
vmovdqa 0x3930(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x8c0(%rsp)
vmovdqa 0x890(%rsp), %xmm1
vmovdqa 0x880(%rsp), %xmm0
vmovdqa %xmm1, 0x3920(%rsp)
vmovdqa %xmm0, 0x3910(%rsp)
vmovdqa 0x3920(%rsp), %xmm0
vmovdqa 0x3910(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x8d0(%rsp)
vmovdqa 0x870(%rsp), %xmm1
vmovdqa 0x860(%rsp), %xmm0
vmovdqa %xmm1, 0x3900(%rsp)
vmovdqa %xmm0, 0x38f0(%rsp)
vmovdqa 0x3900(%rsp), %xmm0
vmovdqa 0x38f0(%rsp), %xmm1
vpunpcklbw %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
vmovdqa %xmm0, 0x8e0(%rsp)
movq 0xac8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2110(%rsp), %rdx
leaq 0x900(%rsp), %rcx
leaq 0x8c0(%rsp), %r8
callq 0x8ce730
vmovdqa %xmm0, 0x850(%rsp)
vmovdqa 0x850(%rsp), %xmm0
callq 0x8cc000
vmovdqa %xmm0, 0x840(%rsp)
vmovdqa 0x840(%rsp), %xmm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cc2f0
vmovdqa 0x8d0(%rsp), %xmm0
vmovdqa %xmm0, 0x8c0(%rsp)
vmovdqa 0x8e0(%rsp), %xmm0
vmovdqa %xmm0, 0x8d0(%rsp)
vmovdqa 0x8f0(%rsp), %xmm0
vmovdqa %xmm0, 0x8e0(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0xac8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xac8(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8c08e4
jmp 0x8c09e4
jmp 0x8c1adc
movq 0x2160(%rsp), %rdi
movl 0x215c(%rsp), %esi
leaq 0x2080(%rsp), %rdx
callq 0x8ce8a0
cmpl $0x8, 0x2170(%rsp)
jne 0x8c0efd
movq 0xac8(%rsp), %rax
movq %rax, 0x3768(%rsp)
movq 0x3768(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3750(%rsp)
vmovdqa 0x3750(%rsp), %xmm0
vmovdqa %xmm0, 0x7c0(%rsp)
movq 0xac8(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3748(%rsp)
movq 0x3748(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3730(%rsp)
vmovdqa 0x3730(%rsp), %xmm0
vmovdqa %xmm0, 0x7d0(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3728(%rsp)
movq 0x3728(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3710(%rsp)
vmovdqa 0x3710(%rsp), %xmm0
vmovdqa %xmm0, 0x7e0(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3708(%rsp)
movq 0x3708(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x36f0(%rsp)
vmovdqa 0x36f0(%rsp), %xmm0
vmovdqa %xmm0, 0x7f0(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x36e8(%rsp)
movq 0x36e8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x36d0(%rsp)
vmovdqa 0x36d0(%rsp), %xmm0
vmovdqa %xmm0, 0x800(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,4), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x36c8(%rsp)
movq 0x36c8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x36b0(%rsp)
vmovdqa 0x36b0(%rsp), %xmm0
vmovdqa %xmm0, 0x810(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %edx
addl %edx, %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x36a8(%rsp)
movq 0x36a8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x3690(%rsp)
vmovdqa 0x3690(%rsp), %xmm0
vmovdqa %xmm0, 0x820(%rsp)
vmovdqa 0x7c0(%rsp), %xmm0
vmovdqa %xmm0, 0x27f0(%rsp)
vmovdqa 0x27f0(%rsp), %xmm0
vmovdqa 0x7d0(%rsp), %xmm1
vmovdqa %xmm1, 0x730(%rsp)
vmovdqa %xmm0, 0x720(%rsp)
vmovdqa 0x7d0(%rsp), %xmm0
vmovdqa %xmm0, 0x27e0(%rsp)
vmovdqa 0x27e0(%rsp), %xmm0
vmovdqa 0x7e0(%rsp), %xmm1
vmovdqa %xmm1, 0x710(%rsp)
vmovdqa %xmm0, 0x700(%rsp)
vmovdqa 0x7e0(%rsp), %xmm0
vmovdqa %xmm0, 0x27d0(%rsp)
vmovdqa 0x27d0(%rsp), %xmm0
vmovdqa 0x7f0(%rsp), %xmm1
vmovdqa %xmm1, 0x6f0(%rsp)
vmovdqa %xmm0, 0x6e0(%rsp)
vmovdqa 0x7f0(%rsp), %xmm0
vmovdqa %xmm0, 0x27c0(%rsp)
vmovdqa 0x27c0(%rsp), %xmm0
vmovdqa 0x800(%rsp), %xmm1
vmovdqa %xmm1, 0x6d0(%rsp)
vmovdqa %xmm0, 0x6c0(%rsp)
vmovdqa 0x800(%rsp), %xmm0
vmovdqa %xmm0, 0x27b0(%rsp)
vmovdqa 0x27b0(%rsp), %xmm0
vmovdqa 0x810(%rsp), %xmm1
vmovdqa %xmm1, 0x6b0(%rsp)
vmovdqa %xmm0, 0x6a0(%rsp)
vmovdqa 0x810(%rsp), %xmm0
vmovdqa %xmm0, 0x27a0(%rsp)
vmovdqa 0x27a0(%rsp), %xmm0
vmovdqa 0x820(%rsp), %xmm1
vmovdqa %xmm1, 0x690(%rsp)
vmovdqa %xmm0, 0x680(%rsp)
vmovaps 0x720(%rsp), %ymm1
vmovaps 0x700(%rsp), %ymm0
vmovaps %ymm1, 0x2c80(%rsp)
vmovaps %ymm0, 0x2c60(%rsp)
vmovaps 0x2c80(%rsp), %ymm0
vmovaps 0x2c60(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x6e0(%rsp), %ymm1
vmovaps 0x6c0(%rsp), %ymm0
vmovaps %ymm1, 0x2c40(%rsp)
vmovaps %ymm0, 0x2c20(%rsp)
vmovaps 0x2c40(%rsp), %ymm0
vmovaps 0x2c20(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x760(%rsp)
vmovaps 0x6a0(%rsp), %ymm1
vmovaps 0x680(%rsp), %ymm0
vmovaps %ymm1, 0x2c00(%rsp)
vmovaps %ymm0, 0x2be0(%rsp)
vmovaps 0x2c00(%rsp), %ymm0
vmovaps 0x2be0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovdqa %ymm0, 0x780(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movq 0xac8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x7c0(%rsp), %rcx
leaq 0x740(%rsp), %r8
callq 0x8ce920
vmovdqa %ymm0, 0x660(%rsp)
vmovdqa 0x660(%rsp), %ymm0
movq 0x2178(%rsp), %rdi
movslq 0x2174(%rsp), %rsi
callq 0x8cd130
vmovdqa 0x760(%rsp), %ymm0
vmovdqa %ymm0, 0x740(%rsp)
vmovdqa 0x780(%rsp), %ymm0
vmovdqa %ymm0, 0x760(%rsp)
vmovdqa 0x7a0(%rsp), %ymm0
vmovdqa %ymm0, 0x780(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0xac8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xac8(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8c0e11
jmp 0x8c1ada
cmpl $0x10, 0x2170(%rsp)
jne 0x8c1477
movq 0xac8(%rsp), %rax
movq %rax, 0x26d8(%rsp)
movq 0x26d8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x5e0(%rsp)
movq 0xac8(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x26d0(%rsp)
movq 0x26d0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x5f0(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x26c8(%rsp)
movq 0x26c8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x600(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x26c0(%rsp)
movq 0x26c0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x610(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x26b8(%rsp)
movq 0x26b8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x620(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,4), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x26b0(%rsp)
movq 0x26b0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x630(%rsp)
movq 0xac8(%rsp), %rax
movl 0x2184(%rsp), %edx
addl %edx, %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x26a8(%rsp)
movq 0x26a8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x640(%rsp)
vmovdqa 0x5e0(%rsp), %xmm0
vmovdqa %xmm0, 0x2850(%rsp)
vmovdqa 0x2850(%rsp), %xmm0
vmovdqa 0x5f0(%rsp), %xmm1
vmovdqa %xmm1, 0x490(%rsp)
vmovdqa %xmm0, 0x480(%rsp)
vmovdqa 0x5f0(%rsp), %xmm0
vmovdqa %xmm0, 0x2840(%rsp)
vmovdqa 0x2840(%rsp), %xmm0
vmovdqa 0x600(%rsp), %xmm1
vmovdqa %xmm1, 0x470(%rsp)
vmovdqa %xmm0, 0x460(%rsp)
vmovdqa 0x600(%rsp), %xmm0
vmovdqa %xmm0, 0x2830(%rsp)
vmovdqa 0x2830(%rsp), %xmm0
vmovdqa 0x610(%rsp), %xmm1
vmovdqa %xmm1, 0x450(%rsp)
vmovdqa %xmm0, 0x440(%rsp)
vmovdqa 0x610(%rsp), %xmm0
vmovdqa %xmm0, 0x2820(%rsp)
vmovdqa 0x2820(%rsp), %xmm0
vmovdqa 0x620(%rsp), %xmm1
vmovdqa %xmm1, 0x430(%rsp)
vmovdqa %xmm0, 0x420(%rsp)
vmovdqa 0x620(%rsp), %xmm0
vmovdqa %xmm0, 0x2810(%rsp)
vmovdqa 0x2810(%rsp), %xmm0
vmovdqa 0x630(%rsp), %xmm1
vmovdqa %xmm1, 0x410(%rsp)
vmovdqa %xmm0, 0x400(%rsp)
vmovdqa 0x630(%rsp), %xmm0
vmovdqa %xmm0, 0x2800(%rsp)
vmovdqa 0x2800(%rsp), %xmm0
vmovdqa 0x640(%rsp), %xmm1
vmovdqa %xmm1, 0x3f0(%rsp)
vmovdqa %xmm0, 0x3e0(%rsp)
vmovaps 0x480(%rsp), %ymm1
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm1, 0x2d40(%rsp)
vmovaps %ymm0, 0x2d20(%rsp)
vmovaps 0x2d40(%rsp), %ymm0
vmovaps 0x2d20(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm1, 0x2d00(%rsp)
vmovaps %ymm0, 0x2ce0(%rsp)
vmovaps 0x2d00(%rsp), %ymm0
vmovaps 0x2ce0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x500(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x2cc0(%rsp)
vmovaps %ymm0, 0x2ca0(%rsp)
vmovaps 0x2cc0(%rsp), %ymm0
vmovaps 0x2ca0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x480(%rsp), %ymm1
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm1, 0x3240(%rsp)
vmovaps %ymm0, 0x3220(%rsp)
vmovaps 0x3240(%rsp), %ymm0
vmovaps 0x3220(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm1, 0x3200(%rsp)
vmovaps %ymm0, 0x31e0(%rsp)
vmovaps 0x3200(%rsp), %ymm0
vmovaps 0x31e0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x31c0(%rsp)
vmovaps %ymm0, 0x31a0(%rsp)
vmovaps 0x31c0(%rsp), %ymm0
vmovaps 0x31a0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0x5a0(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movq 0xac8(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x5e0(%rsp), %rcx
leaq 0x4e0(%rsp), %r8
leaq 0x4a0(%rsp), %r9
vzeroupper
callq 0x8cea90
movq 0x2178(%rsp), %rsi
movslq 0x2174(%rsp), %rdx
leaq 0x4a0(%rsp), %rdi
callq 0x8cc860
vmovdqa 0x500(%rsp), %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
vmovdqa 0x520(%rsp), %ymm0
vmovdqa %ymm0, 0x500(%rsp)
vmovdqa 0x540(%rsp), %ymm0
vmovdqa %ymm0, 0x520(%rsp)
vmovdqa 0x580(%rsp), %ymm0
vmovdqa %ymm0, 0x560(%rsp)
vmovdqa 0x5a0(%rsp), %ymm0
vmovdqa %ymm0, 0x580(%rsp)
vmovdqa 0x5c0(%rsp), %ymm0
vmovdqa %ymm0, 0x5a0(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0xac8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xac8(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x2178(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x2178(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8c1354
jmp 0x8c1ad8
movl $0x0, 0x2158(%rsp)
movq 0xac8(%rsp), %rax
movslq 0x2158(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x58(%rsp)
movq 0x2178(%rsp), %rax
movslq 0x2158(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x50(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x3a88(%rsp)
movq 0x3a88(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
movq 0x58(%rsp), %rax
movslq 0x2184(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x3a80(%rsp)
movq 0x3a80(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x300(%rsp)
movq 0x58(%rsp), %rax
movl 0x2184(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a78(%rsp)
movq 0x3a78(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x320(%rsp)
movq 0x58(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a70(%rsp)
movq 0x3a70(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x340(%rsp)
movq 0x58(%rsp), %rax
movl 0x2184(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a68(%rsp)
movq 0x3a68(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x360(%rsp)
movq 0x58(%rsp), %rax
movl 0x2184(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,4), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a60(%rsp)
movq 0x3a60(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x380(%rsp)
movq 0x58(%rsp), %rax
movl 0x2184(%rsp), %edx
addl %edx, %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3a58(%rsp)
movq 0x3a58(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm1, 0x2ec0(%rsp)
vmovaps %ymm0, 0x2ea0(%rsp)
vmovaps 0x2ec0(%rsp), %ymm0
vmovaps 0x2ea0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x320(%rsp), %ymm1
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm1, 0x2e80(%rsp)
vmovaps %ymm0, 0x2e60(%rsp)
vmovaps 0x2e80(%rsp), %ymm0
vmovaps 0x2e60(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x360(%rsp), %ymm1
vmovaps 0x380(%rsp), %ymm0
vmovaps %ymm1, 0x2e40(%rsp)
vmovaps %ymm0, 0x2e20(%rsp)
vmovaps 0x2e40(%rsp), %ymm0
vmovaps 0x2e20(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm1, 0x33c0(%rsp)
vmovaps %ymm0, 0x33a0(%rsp)
vmovaps 0x33c0(%rsp), %ymm0
vmovaps 0x33a0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x320(%rsp), %ymm1
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm1, 0x3380(%rsp)
vmovaps %ymm0, 0x3360(%rsp)
vmovaps 0x3380(%rsp), %ymm0
vmovaps 0x3360(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x360(%rsp), %ymm1
vmovaps 0x380(%rsp), %ymm0
vmovaps %ymm1, 0x3340(%rsp)
vmovaps %ymm0, 0x3320(%rsp)
vmovaps 0x3340(%rsp), %ymm0
vmovaps 0x3320(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm1, 0x2e00(%rsp)
vmovaps %ymm0, 0x2de0(%rsp)
vmovaps 0x2e00(%rsp), %ymm0
vmovaps 0x2de0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm1, 0x2dc0(%rsp)
vmovaps %ymm0, 0x2da0(%rsp)
vmovaps 0x2dc0(%rsp), %ymm0
vmovaps 0x2da0(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x380(%rsp), %ymm1
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm1, 0x2d80(%rsp)
vmovaps %ymm0, 0x2d60(%rsp)
vmovaps 0x2d80(%rsp), %ymm0
vmovaps 0x2d60(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm1, 0x3300(%rsp)
vmovaps %ymm0, 0x32e0(%rsp)
vmovaps 0x3300(%rsp), %ymm0
vmovaps 0x32e0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm1, 0x32c0(%rsp)
vmovaps %ymm0, 0x32a0(%rsp)
vmovaps 0x32c0(%rsp), %ymm0
vmovaps 0x32a0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x380(%rsp), %ymm1
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm1, 0x3280(%rsp)
vmovaps %ymm0, 0x3260(%rsp)
vmovaps 0x3280(%rsp), %ymm0
vmovaps 0x3260(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovdqa %ymm0, 0x1a0(%rsp)
movl 0x216c(%rsp), %eax
movl %eax, 0x2154(%rsp)
movq 0x58(%rsp), %rdi
movslq 0x2184(%rsp), %rsi
leaq 0x2080(%rsp), %rdx
leaq 0x2e0(%rsp), %rcx
leaq 0x1e0(%rsp), %r8
leaq 0xe0(%rsp), %r9
leaq 0x60(%rsp), %rax
movq %rax, (%rsp)
vzeroupper
callq 0x8cec40
movq 0x50(%rsp), %rsi
movl 0x2174(%rsp), %edx
leaq 0x60(%rsp), %rdi
callq 0x8cd510
vmovdqa 0x200(%rsp), %ymm0
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x220(%rsp), %ymm0
vmovdqa %ymm0, 0x200(%rsp)
vmovdqa 0x240(%rsp), %ymm0
vmovdqa %ymm0, 0x220(%rsp)
vmovdqa 0x280(%rsp), %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x2a0(%rsp), %ymm0
vmovdqa %ymm0, 0x280(%rsp)
vmovdqa 0x2c0(%rsp), %ymm0
vmovdqa %ymm0, 0x2a0(%rsp)
vmovdqa 0x100(%rsp), %ymm0
vmovdqa %ymm0, 0xe0(%rsp)
vmovdqa 0x120(%rsp), %ymm0
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x140(%rsp), %ymm0
vmovdqa %ymm0, 0x120(%rsp)
vmovdqa 0x180(%rsp), %ymm0
vmovdqa %ymm0, 0x160(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa %ymm0, 0x1a0(%rsp)
movl 0x2184(%rsp), %ecx
shll %ecx
movq 0x58(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x58(%rsp)
movl 0x2174(%rsp), %ecx
shll %ecx
movq 0x50(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x50(%rsp)
movl 0x2154(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x2154(%rsp)
cmpl $0x0, 0x2154(%rsp)
jne 0x8c1934
movl 0x2158(%rsp), %eax
addl $0x20, %eax
movl %eax, 0x2158(%rsp)
movl 0x2158(%rsp), %eax
cmpl 0x2170(%rsp), %eax
jl 0x8c1482
jmp 0x8c1ad8
jmp 0x8c1ada
jmp 0x8c1adc
jmp 0x8c1ade
jmp 0x8c1ae0
jmp 0x8c1ae2
jmp 0x8c1ae4
jmp 0x8c1ae6
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nop
| /m-ab-s[P]aom/av1/common/x86/convolve_avx2.c |
av1_convolve_x_sr_avx2 | void av1_convolve_x_sr_avx2(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride, int32_t w,
int32_t h,
const InterpFilterParams *filter_params_x,
const int32_t subpel_x_qn,
ConvolveParams *conv_params) {
#if CONFIG_SVT_AV1
const int horz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
if (horz_tap == 12) {
av1_convolve_x_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_x, subpel_x_qn, conv_params);
} else {
av1_convolve_x_sr_specialized_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_x, subpel_x_qn,
conv_params);
}
#else
av1_convolve_x_sr_general_avx2(src, src_stride, dst, dst_stride, w, h,
filter_params_x, subpel_x_qn, conv_params);
#endif
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x10a0, %rsp # imm = 0x10A0
movq 0x20(%rbp), %rax
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x38(%rsp)
movl %esi, 0x34(%rsp)
movq %rdx, 0x28(%rsp)
movl %ecx, 0x24(%rsp)
movl %r8d, 0x20(%rsp)
movl %r9d, 0x1c(%rsp)
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
callq 0x8c1af0
movl %eax, 0x18(%rsp)
cmpl $0xc, 0x18(%rsp)
jne 0x8c571d
movq 0x38(%rsp), %rdi
movl 0x34(%rsp), %esi
movq 0x28(%rsp), %rdx
movl 0x24(%rsp), %ecx
movl 0x20(%rsp), %r8d
movl 0x1c(%rsp), %r9d
movq 0x10(%rbp), %r11
movl 0x18(%rbp), %r10d
movq 0x20(%rbp), %rax
movq %r11, (%rsp)
movl %r10d, 0x8(%rsp)
movq %rax, 0x10(%rsp)
callq 0x8c7df0
jmp 0x8c7de3
movq 0x38(%rsp), %r11
movl 0x34(%rsp), %r10d
movq 0x28(%rsp), %r9
movl 0x24(%rsp), %r8d
movl 0x20(%rsp), %edi
movl 0x1c(%rsp), %esi
movq 0x10(%rbp), %rdx
movl 0x18(%rbp), %ecx
movq 0x20(%rbp), %rax
movq %r11, 0x6d0(%rsp)
movl %r10d, 0x6cc(%rsp)
movq %r9, 0x6c0(%rsp)
movl %r8d, 0x6bc(%rsp)
movl %edi, 0x6b8(%rsp)
movl %esi, 0x6b4(%rsp)
movq %rdx, 0x6a8(%rsp)
movl %ecx, 0x6a4(%rsp)
movq %rax, 0x698(%rsp)
movl 0x6b4(%rsp), %eax
movl %eax, 0x694(%rsp)
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
callq 0x8c1af0
movl %eax, 0x5bc(%rsp)
cmpl $0x2, 0x5bc(%rsp)
jne 0x8c6639
movq 0x6d0(%rsp), %rax
movq %rax, 0x5b0(%rsp)
cmpl $0x8, 0x6a4(%rsp)
je 0x8c5dde
cmpl $0x8, 0x6b8(%rsp)
jg 0x8c5af6
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x650(%rsp), %rdx
callq 0x8cbd20
cmpl $0x2, 0x6b8(%rsp)
jne 0x8c58d5
jmp 0x8c5818
movq 0x5b0(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x650(%rsp), %rdx
callq 0x8d0fc0
vmovdqa %xmm0, 0x5a0(%rsp)
vmovdqa 0x5a0(%rsp), %xmm0
callq 0x8d12d0
vmovdqa %xmm0, 0x590(%rsp)
vmovdqa 0x590(%rsp), %xmm0
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rsi
callq 0x8cc100
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c5818
jmp 0x8c5af1
cmpl $0x4, 0x6b8(%rsp)
jne 0x8c59a2
jmp 0x8c58e5
movq 0x5b0(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x650(%rsp), %rdx
callq 0x8d13d0
vmovdqa %xmm0, 0x580(%rsp)
vmovdqa 0x580(%rsp), %xmm0
callq 0x8d12d0
vmovdqa %xmm0, 0x570(%rsp)
vmovdqa 0x570(%rsp), %xmm0
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rsi
callq 0x8cc2f0
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c58e5
jmp 0x8c5aef
jmp 0x8c59a4
movq 0x5b0(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x650(%rsp), %rdx
leaq 0x550(%rsp), %rcx
callq 0x8d16e0
vmovdqa 0x550(%rsp), %xmm0
callq 0x8d12d0
vmovdqa %xmm0, 0x550(%rsp)
vmovdqa 0x560(%rsp), %xmm0
callq 0x8d12d0
vmovdqa %xmm0, 0x560(%rsp)
vmovdqa 0x550(%rsp), %xmm1
vmovdqa 0x560(%rsp), %xmm0
vmovdqa %xmm1, 0x930(%rsp)
vmovdqa %xmm0, 0x920(%rsp)
vmovdqa 0x930(%rsp), %xmm0
vmovdqa 0x920(%rsp), %xmm1
vpackuswb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x540(%rsp)
movq 0x6c0(%rsp), %rax
vmovdqa 0x540(%rsp), %xmm0
movq %rax, 0x878(%rsp)
vmovdqa %xmm0, 0x860(%rsp)
movq 0x860(%rsp), %rcx
movq 0x878(%rsp), %rax
movq %rcx, (%rax)
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rax
addq %rax, %rdi
vmovdqa 0x540(%rsp), %xmm0
callq 0x8cc3d0
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c59a4
jmp 0x8c5aef
jmp 0x8c5af1
jmp 0x8c5dd9
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x5c0(%rsp), %rdx
callq 0x8cc410
cmpl $0x10, 0x6b8(%rsp)
jne 0x8c5bc6
jmp 0x8c5b22
movq 0x5b0(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x5c0(%rsp), %rdx
leaq 0x500(%rsp), %rcx
callq 0x8d1820
movq 0x6c0(%rsp), %rsi
movslq 0x6bc(%rsp), %rdx
leaq 0x500(%rsp), %rdi
callq 0x8d1940
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c5b22
jmp 0x8c5dd7
cmpl $0x20, 0x6b8(%rsp)
jne 0x8c5c44
jmp 0x8c5bd2
movq 0x5b0(%rsp), %rdi
movq 0x6c0(%rsp), %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movl 0x6cc(%rsp), %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c5bd2
jmp 0x8c5dd5
cmpl $0x40, 0x6b8(%rsp)
jne 0x8c5cef
jmp 0x8c5c54
movq 0x5b0(%rsp), %rdi
movq 0x6c0(%rsp), %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movq 0x5b0(%rsp), %rdi
addq $0x20, %rdi
movq 0x6c0(%rsp), %rdx
addq $0x20, %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movl 0x6cc(%rsp), %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c5c54
jmp 0x8c5dd3
jmp 0x8c5cf1
movq 0x5b0(%rsp), %rdi
movq 0x6c0(%rsp), %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movq 0x5b0(%rsp), %rdi
addq $0x20, %rdi
movq 0x6c0(%rsp), %rdx
addq $0x20, %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movq 0x5b0(%rsp), %rdi
addq $0x40, %rdi
movq 0x6c0(%rsp), %rdx
addq $0x40, %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movq 0x5b0(%rsp), %rdi
addq $0x60, %rdi
movq 0x6c0(%rsp), %rdx
addq $0x60, %rdx
leaq 0x5c0(%rsp), %rsi
callq 0x8d19b0
movl 0x6cc(%rsp), %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c5cf1
jmp 0x8c5dd3
jmp 0x8c5dd5
jmp 0x8c5dd7
jmp 0x8c5dd9
jmp 0x8c6634
cmpl $0x2, 0x6b8(%rsp)
jne 0x8c5f26
jmp 0x8c5dee
movq 0x5b0(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
callq 0x8d1a00
vmovdqa %xmm0, 0x4f0(%rsp)
vmovdqa 0x4f0(%rsp), %xmm0
vpsrldq $0x1, %xmm0, %xmm0 # xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
vmovdqa %xmm0, 0x4e0(%rsp)
vmovdqa 0x4f0(%rsp), %xmm1
vmovdqa 0x4e0(%rsp), %xmm0
vmovdqa %xmm1, 0x9a0(%rsp)
vmovdqa %xmm0, 0x990(%rsp)
vmovdqa 0x9a0(%rsp), %xmm0
vmovdqa 0x990(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x4d0(%rsp)
vmovdqa 0x4d0(%rsp), %xmm0
vmovdqa %xmm0, 0x910(%rsp)
vmovdqa 0x910(%rsp), %xmm0
vmovdqa %xmm0, 0x900(%rsp)
movl 0x900(%rsp), %eax
movw %ax, %cx
movq 0x6c0(%rsp), %rax
movw %cx, (%rax)
vmovdqa 0x4d0(%rsp), %xmm0
vpextrw $0x2, %xmm0, %eax
movw %ax, %dx
movq 0x6c0(%rsp), %rax
movslq 0x6bc(%rsp), %rcx
movw %dx, (%rax,%rcx)
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c5dee
jmp 0x8c6632
cmpl $0x4, 0x6b8(%rsp)
jne 0x8c6040
jmp 0x8c5f36
movq 0x5b0(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
callq 0x8d1a30
vmovdqa %xmm0, 0x4c0(%rsp)
vmovdqa 0x4c0(%rsp), %xmm0
vpsrldq $0x1, %xmm0, %xmm0 # xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
vmovdqa %xmm0, 0x4b0(%rsp)
vmovdqa 0x4c0(%rsp), %xmm1
vmovdqa 0x4b0(%rsp), %xmm0
vmovdqa %xmm1, 0x9c0(%rsp)
vmovdqa %xmm0, 0x9b0(%rsp)
vmovdqa 0x9c0(%rsp), %xmm0
vmovdqa 0x9b0(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x4a0(%rsp)
movq 0x6c0(%rsp), %rdi
vmovdqa 0x4a0(%rsp), %xmm0
callq 0x8cb680
movl 0x4a8(%rsp), %edx
movq 0x6c0(%rsp), %rax
movslq 0x6bc(%rsp), %rcx
movl %edx, (%rax,%rcx)
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c5f36
jmp 0x8c6630
cmpl $0x8, 0x6b8(%rsp)
jne 0x8c6230
jmp 0x8c6050
movq 0x5b0(%rsp), %rax
movq %rax, 0x6e0(%rsp)
movq 0x6e0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x490(%rsp)
movq 0x5b0(%rsp), %rax
movslq 0x6cc(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x6d8(%rsp)
movq 0x6d8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x480(%rsp)
vmovdqa 0x490(%rsp), %xmm0
vpsrldq $0x1, %xmm0, %xmm0 # xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
vmovdqa %xmm0, 0x470(%rsp)
vmovdqa 0x480(%rsp), %xmm0
vpsrldq $0x1, %xmm0, %xmm0 # xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
vmovdqa %xmm0, 0x460(%rsp)
vmovdqa 0x490(%rsp), %xmm1
vmovdqa 0x470(%rsp), %xmm0
vmovdqa %xmm1, 0xa00(%rsp)
vmovdqa %xmm0, 0x9f0(%rsp)
vmovdqa 0xa00(%rsp), %xmm0
vmovdqa 0x9f0(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x450(%rsp)
vmovdqa 0x480(%rsp), %xmm1
vmovdqa 0x460(%rsp), %xmm0
vmovdqa %xmm1, 0x9e0(%rsp)
vmovdqa %xmm0, 0x9d0(%rsp)
vmovdqa 0x9e0(%rsp), %xmm0
vmovdqa 0x9d0(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x440(%rsp)
movq 0x6c0(%rsp), %rax
vmovdqa 0x450(%rsp), %xmm0
movq %rax, 0x8b8(%rsp)
vmovdqa %xmm0, 0x8a0(%rsp)
movq 0x8a0(%rsp), %rcx
movq 0x8b8(%rsp), %rax
movq %rcx, (%rax)
movq 0x6c0(%rsp), %rax
movslq 0x6bc(%rsp), %rcx
addq %rcx, %rax
vmovdqa 0x440(%rsp), %xmm0
movq %rax, 0x898(%rsp)
vmovdqa %xmm0, 0x880(%rsp)
movq 0x880(%rsp), %rcx
movq 0x898(%rsp), %rax
movq %rcx, (%rax)
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c6050
jmp 0x8c662e
cmpl $0x10, 0x6b8(%rsp)
jne 0x8c6453
jmp 0x8c6240
movq 0x5b0(%rsp), %rax
movq %rax, 0x700(%rsp)
movq 0x700(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x430(%rsp)
movq 0x5b0(%rsp), %rax
addq $0x1, %rax
movq %rax, 0x6f8(%rsp)
movq 0x6f8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x420(%rsp)
movq 0x5b0(%rsp), %rax
movslq 0x6cc(%rsp), %rcx
addq %rcx, %rax
movq %rax, 0x6f0(%rsp)
movq 0x6f0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x410(%rsp)
movq 0x5b0(%rsp), %rax
movslq 0x6cc(%rsp), %rcx
addq %rcx, %rax
addq $0x1, %rax
movq %rax, 0x6e8(%rsp)
movq 0x6e8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x400(%rsp)
vmovdqa 0x430(%rsp), %xmm1
vmovdqa 0x420(%rsp), %xmm0
vmovdqa %xmm1, 0xa40(%rsp)
vmovdqa %xmm0, 0xa30(%rsp)
vmovdqa 0xa40(%rsp), %xmm0
vmovdqa 0xa30(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x3f0(%rsp)
vmovdqa 0x410(%rsp), %xmm1
vmovdqa 0x400(%rsp), %xmm0
vmovdqa %xmm1, 0xa20(%rsp)
vmovdqa %xmm0, 0xa10(%rsp)
vmovdqa 0xa20(%rsp), %xmm0
vmovdqa 0xa10(%rsp), %xmm1
vpavgb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x3e0(%rsp)
movq 0x6c0(%rsp), %rax
vmovdqa 0x3f0(%rsp), %xmm0
movq %rax, 0x838(%rsp)
vmovdqa %xmm0, 0x820(%rsp)
vmovdqa 0x820(%rsp), %xmm0
movq 0x838(%rsp), %rax
vmovdqu %xmm0, (%rax)
movq 0x6c0(%rsp), %rax
movslq 0x6bc(%rsp), %rcx
addq %rcx, %rax
vmovdqa 0x3e0(%rsp), %xmm0
movq %rax, 0x818(%rsp)
vmovdqa %xmm0, 0x800(%rsp)
vmovdqa 0x800(%rsp), %xmm0
movq 0x818(%rsp), %rax
vmovdqu %xmm0, (%rax)
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c6240
jmp 0x8c662c
cmpl $0x20, 0x6b8(%rsp)
jne 0x8c64c9
jmp 0x8c645f
movq 0x5b0(%rsp), %rdi
movq 0x6c0(%rsp), %rsi
callq 0x8d1a60
movl 0x6cc(%rsp), %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c645f
jmp 0x8c662a
cmpl $0x40, 0x6b8(%rsp)
jne 0x8c6564
jmp 0x8c64d9
movq 0x5b0(%rsp), %rdi
movq 0x6c0(%rsp), %rsi
callq 0x8d1a60
movq 0x5b0(%rsp), %rdi
addq $0x20, %rdi
movq 0x6c0(%rsp), %rsi
addq $0x20, %rsi
callq 0x8d1a60
movl 0x6cc(%rsp), %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c64d9
jmp 0x8c6628
jmp 0x8c6566
movq 0x5b0(%rsp), %rdi
movq 0x6c0(%rsp), %rsi
callq 0x8d1a60
movq 0x5b0(%rsp), %rdi
addq $0x20, %rdi
movq 0x6c0(%rsp), %rsi
addq $0x20, %rsi
callq 0x8d1a60
movq 0x5b0(%rsp), %rdi
addq $0x40, %rdi
movq 0x6c0(%rsp), %rsi
addq $0x40, %rsi
callq 0x8d1a60
movq 0x5b0(%rsp), %rdi
addq $0x60, %rdi
movq 0x6c0(%rsp), %rsi
addq $0x60, %rsi
callq 0x8d1a60
movl 0x6cc(%rsp), %ecx
movq 0x5b0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5b0(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c6566
jmp 0x8c6628
jmp 0x8c662a
jmp 0x8c662c
jmp 0x8c662e
jmp 0x8c6630
jmp 0x8c6632
jmp 0x8c6634
jmp 0x8c7de1
cmpl $0x4, 0x5bc(%rsp)
jne 0x8c6d12
movq 0x6d0(%rsp), %rax
addq $-0x1, %rax
movq %rax, 0x3d8(%rsp)
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x650(%rsp), %rdx
callq 0x8cca00
cmpl $0x2, 0x6b8(%rsp)
jne 0x8c6744
jmp 0x8c6687
movq 0x3d8(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x650(%rsp), %rdx
callq 0x8d1b20
vmovdqa %xmm0, 0x3c0(%rsp)
vmovdqa 0x3c0(%rsp), %xmm0
callq 0x8d12d0
vmovdqa %xmm0, 0x3b0(%rsp)
vmovdqa 0x3b0(%rsp), %xmm0
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rsi
callq 0x8cc100
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x3d8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3d8(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c6687
jmp 0x8c6d0d
cmpl $0x4, 0x6b8(%rsp)
jne 0x8c6811
jmp 0x8c6754
movq 0x3d8(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x650(%rsp), %rdx
callq 0x8d20f0
vmovdqa %xmm0, 0x3a0(%rsp)
vmovdqa 0x3a0(%rsp), %xmm0
callq 0x8d12d0
vmovdqa %xmm0, 0x390(%rsp)
vmovdqa 0x390(%rsp), %xmm0
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rsi
callq 0x8cc2f0
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x3d8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x3d8(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c6754
jmp 0x8c6d0b
cmpl $0x8, 0x6b8(%rsp)
jne 0x8c6aa5
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x5c0(%rsp), %rdx
callq 0x8cae40
leaq 0x2540fe(%rip), %rax # 0xb1a940
movq %rax, 0x950(%rsp)
movq 0x950(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x340(%rsp)
leaq 0x2540fa(%rip), %rax # 0xb1a960
movq %rax, 0x948(%rsp)
movq 0x948(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x360(%rsp)
movl $0x0, 0x33c(%rsp)
movl 0x33c(%rsp), %eax
cmpl 0x6b4(%rsp), %eax
jge 0x8c6aa0
movq 0x3d8(%rsp), %rax
movl 0x33c(%rsp), %ecx
movl 0x6cc(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x710(%rsp)
movq 0x710(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x730(%rsp)
vmovdqa 0x730(%rsp), %xmm0
movq 0x3d8(%rsp), %rax
movl 0x33c(%rsp), %ecx
movl 0x6cc(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x708(%rsp)
movq 0x708(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0x720(%rsp)
vmovdqa 0x720(%rsp), %xmm1
vmovdqa %xmm1, 0x310(%rsp)
vmovdqa %xmm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm0
leaq 0x5e0(%rsp), %rdi
leaq 0x340(%rsp), %rsi
callq 0x8d0cb0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
callq 0x8d26c0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0x760(%rsp)
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x760(%rsp), %ymm0
vmovaps 0x740(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0x7c0(%rsp)
vmovaps 0x7c0(%rsp), %ymm0
vmovdqa %xmm0, 0x2b0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x2a0(%rsp)
movq 0x6c0(%rsp), %rax
movl 0x33c(%rsp), %ecx
movl 0x6bc(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x2b0(%rsp), %xmm0
movq %rax, 0x8f8(%rsp)
vmovdqa %xmm0, 0x8e0(%rsp)
movq 0x8e0(%rsp), %rcx
movq 0x8f8(%rsp), %rax
movq %rcx, (%rax)
movq 0x6c0(%rsp), %rax
movl 0x33c(%rsp), %ecx
movl 0x6bc(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x2a0(%rsp), %xmm0
movq %rax, 0x8d8(%rsp)
vmovdqa %xmm0, 0x8c0(%rsp)
movq 0x8c0(%rsp), %rcx
movq 0x8d8(%rsp), %rax
movq %rcx, (%rax)
movl 0x33c(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x33c(%rsp)
jmp 0x8c688e
jmp 0x8c6d09
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x5c0(%rsp), %rdx
callq 0x8cae40
leaq 0x253e78(%rip), %rax # 0xb1a940
movq %rax, 0x968(%rsp)
movq 0x968(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x260(%rsp)
leaq 0x253e74(%rip), %rax # 0xb1a960
movq %rax, 0x960(%rsp)
movq 0x960(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x280(%rsp)
movl $0x0, 0x25c(%rsp)
movl 0x25c(%rsp), %eax
cmpl 0x6b4(%rsp), %eax
jge 0x8c6d07
movl $0x0, 0x258(%rsp)
movl 0x258(%rsp), %eax
cmpl 0x6b8(%rsp), %eax
jge 0x8c6cf1
movq 0x3d8(%rsp), %rax
movl 0x25c(%rsp), %ecx
movl 0x6cc(%rsp), %edx
imull %edx, %ecx
movl 0x258(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x958(%rsp)
movq 0x958(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x3d8(%rsp), %rax
movl 0x25c(%rsp), %esi
movl 0x6cc(%rsp), %ecx
imull %ecx, %esi
movl 0x258(%rsp), %edx
movl %edx, %ecx
movl %esi, %edx
leal 0x8(%rcx,%rdx), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x718(%rsp)
movq 0x718(%rsp), %rax
vmovdqu (%rax), %xmm1
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x220(%rsp), %ymm0
leaq 0x5e0(%rsp), %rdi
leaq 0x260(%rsp), %rsi
callq 0x8d0cb0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x200(%rsp), %ymm0
callq 0x8d26c0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps %ymm0, 0x7a0(%rsp)
vmovaps %ymm0, 0x780(%rsp)
vmovaps 0x7a0(%rsp), %ymm0
vmovaps 0x780(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps %ymm0, 0x7e0(%rsp)
vmovdqa 0x7e0(%rsp), %xmm0
vmovdqa %xmm0, 0x1d0(%rsp)
movq 0x6c0(%rsp), %rax
movl 0x25c(%rsp), %ecx
imull 0x6bc(%rsp), %ecx
addl 0x258(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x1d0(%rsp), %xmm0
movq %rax, 0x858(%rsp)
vmovdqa %xmm0, 0x840(%rsp)
vmovdqa 0x840(%rsp), %xmm0
movq 0x858(%rsp), %rax
vmovdqu %xmm0, (%rax)
movl 0x258(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x258(%rsp)
jmp 0x8c6b33
movl 0x25c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x25c(%rsp)
jmp 0x8c6b14
jmp 0x8c6d09
jmp 0x8c6d0b
jmp 0x8c6d0d
jmp 0x8c7ddf
leaq 0x253c27(%rip), %rax # 0xb1a940
movq %rax, 0x988(%rsp)
movq 0x988(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x140(%rsp)
leaq 0x253c23(%rip), %rax # 0xb1a960
movq %rax, 0x980(%rsp)
movq 0x980(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x160(%rsp)
leaq 0x253c1f(%rip), %rax # 0xb1a980
movq %rax, 0x978(%rsp)
movq 0x978(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x180(%rsp)
cmpl $0x6, 0x5bc(%rsp)
jne 0x8c7188
movq 0x6d0(%rsp), %rax
addq $-0x2, %rax
movq %rax, 0x138(%rsp)
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x5c0(%rsp), %rdx
vzeroupper
callq 0x8cdba0
cmpl $0x8, 0x6b8(%rsp)
jne 0x8c6e7d
jmp 0x8c6dcf
movq 0x138(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x5c0(%rsp), %rdx
leaq 0x140(%rsp), %rcx
callq 0x8d28c0
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x100(%rsp), %ymm0
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rsi
callq 0x8d2910
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x138(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x138(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c6dcf
jmp 0x8c7183
cmpl $0x10, 0x6b8(%rsp)
jne 0x8c6f38
jmp 0x8c6e8d
movq 0x138(%rsp), %rdi
movl 0x6cc(%rsp), %esi
leaq 0x5c0(%rsp), %rdx
leaq 0x140(%rsp), %rcx
leaq 0xc0(%rsp), %r8
callq 0x8d2960
movq 0x6c0(%rsp), %rsi
movslq 0x6bc(%rsp), %rdx
leaq 0xc0(%rsp), %rdi
callq 0x8d1940
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0x138(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x138(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c6e8d
jmp 0x8c7181
cmpl $0x20, 0x6b8(%rsp)
jne 0x8c6fbe
jmp 0x8c6f44
movq 0x138(%rsp), %rdi
movq 0x6c0(%rsp), %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movl 0x6cc(%rsp), %ecx
movq 0x138(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x138(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c6f44
jmp 0x8c717f
cmpl $0x40, 0x6b8(%rsp)
jne 0x8c7079
jmp 0x8c6fce
movq 0x138(%rsp), %rdi
movq 0x6c0(%rsp), %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movq 0x138(%rsp), %rdi
addq $0x20, %rdi
movq 0x6c0(%rsp), %rcx
addq $0x20, %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movl 0x6cc(%rsp), %ecx
movq 0x138(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x138(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c6fce
jmp 0x8c717d
jmp 0x8c707b
movq 0x138(%rsp), %rdi
movq 0x6c0(%rsp), %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movq 0x138(%rsp), %rdi
addq $0x20, %rdi
movq 0x6c0(%rsp), %rcx
addq $0x20, %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movq 0x138(%rsp), %rdi
addq $0x40, %rdi
movq 0x6c0(%rsp), %rcx
addq $0x40, %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movq 0x138(%rsp), %rdi
addq $0x60, %rdi
movq 0x6c0(%rsp), %rcx
addq $0x60, %rcx
leaq 0x5c0(%rsp), %rsi
leaq 0x140(%rsp), %rdx
callq 0x8d29d0
movl 0x6cc(%rsp), %ecx
movq 0x138(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x138(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c707b
jmp 0x8c717d
jmp 0x8c717f
jmp 0x8c7181
jmp 0x8c7183
jmp 0x8c7ddd
cmpl $0x8, 0x5bc(%rsp)
jne 0x8c7ddb
movq 0x6d0(%rsp), %rax
addq $-0x3, %rax
movq %rax, 0xb8(%rsp)
leaq 0x2537ef(%rip), %rax # 0xb1a9a0
movq %rax, 0x970(%rsp)
movq 0x970(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1a0(%rsp)
movq 0x6a8(%rsp), %rdi
movl 0x6a4(%rsp), %esi
leaq 0x5c0(%rsp), %rdx
vzeroupper
callq 0x8ce8a0
cmpl $0x8, 0x6b8(%rsp)
jne 0x8c72ab
jmp 0x8c71fd
movq 0xb8(%rsp), %rdi
movslq 0x6cc(%rsp), %rsi
leaq 0x5c0(%rsp), %rdx
leaq 0x140(%rsp), %rcx
callq 0x8d2a20
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0x80(%rsp), %ymm0
movq 0x6c0(%rsp), %rdi
movslq 0x6bc(%rsp), %rsi
callq 0x8d2910
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0xb8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xb8(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c71fd
jmp 0x8c7dd9
cmpl $0x10, 0x6b8(%rsp)
jne 0x8c73ec
jmp 0x8c72bb
movq 0xb8(%rsp), %rcx
movl 0x6cc(%rsp), %eax
movq %rcx, 0xa78(%rsp)
movl %eax, 0xa74(%rsp)
leaq 0x5c0(%rsp), %rax
movq %rax, 0xa68(%rsp)
leaq 0x140(%rsp), %rax
movq %rax, 0xa60(%rsp)
leaq 0x40(%rsp), %rax
movq %rax, 0xa58(%rsp)
movq 0xa78(%rsp), %rdi
movslq 0xa74(%rsp), %rsi
movq 0xa68(%rsp), %rdx
movq 0xa60(%rsp), %rcx
callq 0x8d2a20
movq 0xa58(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0xa78(%rsp), %rdi
addq $0x8, %rdi
movslq 0xa74(%rsp), %rsi
movq 0xa68(%rsp), %rdx
movq 0xa60(%rsp), %rcx
callq 0x8d2a20
movq 0xa58(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x6c0(%rsp), %rsi
movslq 0x6bc(%rsp), %rdx
leaq 0x40(%rsp), %rdi
vzeroupper
callq 0x8d1940
movl 0x6cc(%rsp), %ecx
shll %ecx
movq 0xb8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xb8(%rsp)
movl 0x6bc(%rsp), %ecx
shll %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
subl $0x2, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, 0x694(%rsp)
jne 0x8c72bb
jmp 0x8c7dd7
cmpl $0x20, 0x6b8(%rsp)
jne 0x8c7590
jmp 0x8c73fc
movq 0xb8(%rsp), %rcx
movq 0x6c0(%rsp), %rax
movq %rcx, 0xad8(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xad0(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xac8(%rsp)
movq %rax, 0xac0(%rsp)
movq 0xad8(%rsp), %rdx
movq 0xad0(%rsp), %rcx
movq 0xac8(%rsp), %rax
movq %rdx, 0x1078(%rsp)
movq %rcx, 0x1070(%rsp)
movq %rax, 0x1068(%rsp)
leaq 0xa80(%rsp), %rax
movq %rax, 0x1060(%rsp)
movq 0x1078(%rsp), %rax
movq %rax, 0x1088(%rsp)
movq 0x1088(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1040(%rsp)
movq 0x1078(%rsp), %rax
addq $0x8, %rax
movq %rax, 0x1080(%rsp)
movq 0x1080(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1020(%rsp)
vmovdqa 0x1040(%rsp), %ymm0
movq 0x1070(%rsp), %rdi
movq 0x1068(%rsp), %rsi
callq 0x8d2f50
movq 0x1060(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0x1020(%rsp), %ymm0
movq 0x1070(%rsp), %rdi
movq 0x1068(%rsp), %rsi
callq 0x8d2f50
movq 0x1060(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xac0(%rsp), %rsi
leaq 0xa80(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movl 0x6cc(%rsp), %ecx
movq 0xb8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xb8(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c73fc
jmp 0x8c7dd5
cmpl $0x40, 0x6b8(%rsp)
jne 0x8c7877
jmp 0x8c75a0
movq 0xb8(%rsp), %rcx
movq 0x6c0(%rsp), %rax
movq %rcx, 0xb98(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xb90(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xb88(%rsp)
movq %rax, 0xb80(%rsp)
movq 0xb98(%rsp), %rdx
movq 0xb90(%rsp), %rcx
movq 0xb88(%rsp), %rax
movq %rdx, 0xf88(%rsp)
movq %rcx, 0xf80(%rsp)
movq %rax, 0xf78(%rsp)
leaq 0xb40(%rsp), %rax
movq %rax, 0xf70(%rsp)
movq 0xf88(%rsp), %rax
movq %rax, 0xf98(%rsp)
movq 0xf98(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xf40(%rsp)
movq 0xf88(%rsp), %rax
addq $0x8, %rax
movq %rax, 0xf90(%rsp)
movq 0xf90(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xf20(%rsp)
vmovdqa 0xf40(%rsp), %ymm0
movq 0xf80(%rsp), %rdi
movq 0xf78(%rsp), %rsi
callq 0x8d2f50
movq 0xf70(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0xf20(%rsp), %ymm0
movq 0xf80(%rsp), %rdi
movq 0xf78(%rsp), %rsi
callq 0x8d2f50
movq 0xf70(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xb80(%rsp), %rsi
leaq 0xb40(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movq 0xb8(%rsp), %rcx
addq $0x20, %rcx
movq 0x6c0(%rsp), %rax
addq $0x20, %rax
movq %rcx, 0xb38(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xb30(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xb28(%rsp)
movq %rax, 0xb20(%rsp)
movq 0xb38(%rsp), %rdx
movq 0xb30(%rsp), %rcx
movq 0xb28(%rsp), %rax
movq %rdx, 0x1008(%rsp)
movq %rcx, 0x1000(%rsp)
movq %rax, 0xff8(%rsp)
leaq 0xae0(%rsp), %rax
movq %rax, 0xff0(%rsp)
movq 0x1008(%rsp), %rax
movq %rax, 0x1018(%rsp)
movq 0x1018(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xfc0(%rsp)
movq 0x1008(%rsp), %rax
addq $0x8, %rax
movq %rax, 0x1010(%rsp)
movq 0x1010(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xfa0(%rsp)
vmovdqa 0xfc0(%rsp), %ymm0
movq 0x1000(%rsp), %rdi
movq 0xff8(%rsp), %rsi
callq 0x8d2f50
movq 0xff0(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0xfa0(%rsp), %ymm0
movq 0x1000(%rsp), %rdi
movq 0xff8(%rsp), %rsi
callq 0x8d2f50
movq 0xff0(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xb20(%rsp), %rsi
leaq 0xae0(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movl 0x6cc(%rsp), %ecx
movq 0xb8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xb8(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c75a0
jmp 0x8c7dd3
jmp 0x8c7879
movq 0xb8(%rsp), %rcx
movq 0x6c0(%rsp), %rax
movq %rcx, 0xd18(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xd10(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xd08(%rsp)
movq %rax, 0xd00(%rsp)
movq 0xd18(%rsp), %rdx
movq 0xd10(%rsp), %rcx
movq 0xd08(%rsp), %rax
movq %rdx, 0xd88(%rsp)
movq %rcx, 0xd80(%rsp)
movq %rax, 0xd78(%rsp)
leaq 0xcc0(%rsp), %rax
movq %rax, 0xd70(%rsp)
movq 0xd88(%rsp), %rax
movq %rax, 0xd98(%rsp)
movq 0xd98(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xd40(%rsp)
movq 0xd88(%rsp), %rax
addq $0x8, %rax
movq %rax, 0xd90(%rsp)
movq 0xd90(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xd20(%rsp)
vmovdqa 0xd40(%rsp), %ymm0
movq 0xd80(%rsp), %rdi
movq 0xd78(%rsp), %rsi
callq 0x8d2f50
movq 0xd70(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0xd20(%rsp), %ymm0
movq 0xd80(%rsp), %rdi
movq 0xd78(%rsp), %rsi
callq 0x8d2f50
movq 0xd70(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xd00(%rsp), %rsi
leaq 0xcc0(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movq 0xb8(%rsp), %rcx
addq $0x20, %rcx
movq 0x6c0(%rsp), %rax
addq $0x20, %rax
movq %rcx, 0xcb8(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xcb0(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xca8(%rsp)
movq %rax, 0xca0(%rsp)
movq 0xcb8(%rsp), %rdx
movq 0xcb0(%rsp), %rcx
movq 0xca8(%rsp), %rax
movq %rdx, 0xe08(%rsp)
movq %rcx, 0xe00(%rsp)
movq %rax, 0xdf8(%rsp)
leaq 0xc60(%rsp), %rax
movq %rax, 0xdf0(%rsp)
movq 0xe08(%rsp), %rax
movq %rax, 0xe18(%rsp)
movq 0xe18(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xdc0(%rsp)
movq 0xe08(%rsp), %rax
addq $0x8, %rax
movq %rax, 0xe10(%rsp)
movq 0xe10(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xda0(%rsp)
vmovdqa 0xdc0(%rsp), %ymm0
movq 0xe00(%rsp), %rdi
movq 0xdf8(%rsp), %rsi
callq 0x8d2f50
movq 0xdf0(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0xda0(%rsp), %ymm0
movq 0xe00(%rsp), %rdi
movq 0xdf8(%rsp), %rsi
callq 0x8d2f50
movq 0xdf0(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xca0(%rsp), %rsi
leaq 0xc60(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movq 0xb8(%rsp), %rcx
addq $0x40, %rcx
movq 0x6c0(%rsp), %rax
addq $0x40, %rax
movq %rcx, 0xc58(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xc50(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xc48(%rsp)
movq %rax, 0xc40(%rsp)
movq 0xc58(%rsp), %rdx
movq 0xc50(%rsp), %rcx
movq 0xc48(%rsp), %rax
movq %rdx, 0xe88(%rsp)
movq %rcx, 0xe80(%rsp)
movq %rax, 0xe78(%rsp)
leaq 0xc00(%rsp), %rax
movq %rax, 0xe70(%rsp)
movq 0xe88(%rsp), %rax
movq %rax, 0xe98(%rsp)
movq 0xe98(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xe40(%rsp)
movq 0xe88(%rsp), %rax
addq $0x8, %rax
movq %rax, 0xe90(%rsp)
movq 0xe90(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xe20(%rsp)
vmovdqa 0xe40(%rsp), %ymm0
movq 0xe80(%rsp), %rdi
movq 0xe78(%rsp), %rsi
callq 0x8d2f50
movq 0xe70(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0xe20(%rsp), %ymm0
movq 0xe80(%rsp), %rdi
movq 0xe78(%rsp), %rsi
callq 0x8d2f50
movq 0xe70(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xc40(%rsp), %rsi
leaq 0xc00(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movq 0xb8(%rsp), %rcx
addq $0x60, %rcx
movq 0x6c0(%rsp), %rax
addq $0x60, %rax
movq %rcx, 0xbf8(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0xbf0(%rsp)
leaq 0x140(%rsp), %rcx
movq %rcx, 0xbe8(%rsp)
movq %rax, 0xbe0(%rsp)
movq 0xbf8(%rsp), %rdx
movq 0xbf0(%rsp), %rcx
movq 0xbe8(%rsp), %rax
movq %rdx, 0xf08(%rsp)
movq %rcx, 0xf00(%rsp)
movq %rax, 0xef8(%rsp)
leaq 0xba0(%rsp), %rax
movq %rax, 0xef0(%rsp)
movq 0xf08(%rsp), %rax
movq %rax, 0xf18(%rsp)
movq 0xf18(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xec0(%rsp)
movq 0xf08(%rsp), %rax
addq $0x8, %rax
movq %rax, 0xf10(%rsp)
movq 0xf10(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0xea0(%rsp)
vmovdqa 0xec0(%rsp), %ymm0
movq 0xf00(%rsp), %rdi
movq 0xef8(%rsp), %rsi
callq 0x8d2f50
movq 0xef0(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa 0xea0(%rsp), %ymm0
movq 0xf00(%rsp), %rdi
movq 0xef8(%rsp), %rsi
callq 0x8d2f50
movq 0xef0(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0xbe0(%rsp), %rsi
leaq 0xba0(%rsp), %rdi
vzeroupper
callq 0x8d2c10
movl 0x6cc(%rsp), %ecx
movq 0xb8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xb8(%rsp)
movl 0x6bc(%rsp), %ecx
movq 0x6c0(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6c0(%rsp)
movl 0x694(%rsp), %eax
addl $-0x1, %eax
movl %eax, 0x694(%rsp)
cmpl $0x0, %eax
jne 0x8c7879
jmp 0x8c7dd3
jmp 0x8c7dd5
jmp 0x8c7dd7
jmp 0x8c7dd9
jmp 0x8c7ddb
jmp 0x8c7ddd
jmp 0x8c7ddf
jmp 0x8c7de1
jmp 0x8c7de3
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/convolve_avx2.c |
av1_convolve_x_sr_general_avx2 | static inline void av1_convolve_x_sr_general_avx2(
const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
ConvolveParams *conv_params) {
const int bits = FILTER_BITS - conv_params->round_0;
const __m128i round_shift = _mm_cvtsi32_si128(bits);
__m256i round_0_const =
_mm256_set1_epi16((1 << (conv_params->round_0 - 1)) >> 1);
__m128i round_0_shift = _mm_cvtsi32_si128(conv_params->round_0 - 1);
__m256i round_const = _mm256_set1_epi16((1 << bits) >> 1);
int i, horiz_tap = get_filter_tap(filter_params_x, subpel_x_qn);
assert(bits >= 0);
assert((FILTER_BITS - conv_params->round_1) >= 0 ||
((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS));
assert(conv_params->round_0 > 0);
__m256i coeffs[6], filt[4];
filt[0] = _mm256_load_si256((__m256i const *)(filt_global_avx2));
filt[1] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32));
if (horiz_tap == 6)
prepare_coeffs_6t_lowbd(filter_params_x, subpel_x_qn, coeffs);
else if (horiz_tap == 12) {
prepare_coeffs_12taps(filter_params_x, subpel_x_qn, coeffs);
} else {
prepare_coeffs_lowbd(filter_params_x, subpel_x_qn, coeffs);
}
// horz_filt as 4 tap
if (horiz_tap == 4) {
const int fo_horiz = 1;
const uint8_t *const src_ptr = src - fo_horiz;
if (w <= 8) {
for (i = 0; i < h; i += 2) {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
_mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&src_ptr[i * src_stride + src_stride]))),
0x20);
__m256i res_16b = convolve_lowbd_x_4tap(data, coeffs + 1, filt);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
round_0_shift);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
round_shift);
/* rounding code */
// 8 bit conversion and saturation to uint8
__m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
const __m128i res_0 = _mm256_castsi256_si128(res_8b);
const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
if (w > 4) {
_mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
_mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
} else if (w > 2) {
xx_storel_32(&dst[i * dst_stride], res_0);
xx_storel_32(&dst[i * dst_stride + dst_stride], res_1);
} else {
__m128i *const p_0 = (__m128i *)&dst[i * dst_stride];
__m128i *const p_1 = (__m128i *)&dst[i * dst_stride + dst_stride];
*(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
*(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
}
}
} else {
for (i = 0; i < h; ++i) {
for (int j = 0; j < w; j += 16) {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
// 18 19 20 21 22 23
const __m256i data = _mm256_inserti128_si256(
_mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
_mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
1);
__m256i res_16b = convolve_lowbd_x_4tap(data, coeffs + 1, filt);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
round_0_shift);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
round_shift);
/* rounding code */
// 8 bit conversion and saturation to uint8
__m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
// Store values into the destination buffer
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
res_8b = _mm256_permute4x64_epi64(res_8b, 216);
__m128i res = _mm256_castsi256_si128(res_8b);
_mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
}
}
}
} else if (horiz_tap == 6) {
const int fo_horiz = horiz_tap / 2 - 1;
const uint8_t *const src_ptr = src - fo_horiz;
filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
if (w <= 8) {
for (i = 0; i < h; i += 2) {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
_mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&src_ptr[i * src_stride + src_stride]))),
0x20);
__m256i res_16b = convolve_lowbd_x_6tap(data, coeffs, filt);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
round_0_shift);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
round_shift);
/* rounding code */
// 8 bit conversion and saturation to uint8
__m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
const __m128i res_0 = _mm256_castsi256_si128(res_8b);
const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
if (w > 4) {
_mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
_mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
} else if (w > 2) {
xx_storel_32(&dst[i * dst_stride], res_0);
xx_storel_32(&dst[i * dst_stride + dst_stride], res_1);
} else {
__m128i *const p_0 = (__m128i *)&dst[i * dst_stride];
__m128i *const p_1 = (__m128i *)&dst[i * dst_stride + dst_stride];
*(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0);
*(uint16_t *)p_1 = _mm_cvtsi128_si32(res_1);
}
}
} else {
for (i = 0; i < h; ++i) {
for (int j = 0; j < w; j += 16) {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
// 18 19 20 21 22 23
const __m256i data = _mm256_inserti128_si256(
_mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
_mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
1);
__m256i res_16b = convolve_lowbd_x_6tap(data, coeffs, filt);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
round_0_shift);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
round_shift);
/* rounding code */
// 8 bit conversion and saturation to uint8
__m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
// Store values into the destination buffer
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
res_8b = _mm256_permute4x64_epi64(res_8b, 216);
__m128i res = _mm256_castsi256_si128(res_8b);
_mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
}
}
}
} else if (horiz_tap == 12) { // horiz_tap == 12
const int fo_horiz = filter_params_x->taps / 2 - 1;
const uint8_t *const src_ptr = src - fo_horiz;
const __m256i v_zero = _mm256_setzero_si256();
round_0_const = _mm256_set1_epi32((1 << (conv_params->round_0)) >> 1);
round_const = _mm256_set1_epi32((1 << bits) >> 1);
round_0_shift = _mm_cvtsi32_si128(conv_params->round_0);
__m256i s[6];
if (w <= 4) {
for (i = 0; i < h; i += 2) {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
_mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&src_ptr[i * src_stride + src_stride]))),
0x20);
// row0 0..7 row1 0..7
const __m256i s_16lo = _mm256_unpacklo_epi8(data, v_zero);
// row0 8..F row1 8..F
const __m256i s_16hi = _mm256_unpackhi_epi8(data, v_zero);
// row0 00 00 01 01 .. 03 03 row1 00 00 01 01 .. 03 03
const __m256i s_lolo = _mm256_unpacklo_epi16(s_16lo, s_16lo);
// row0 04 04 .. 07 07 row1 04 04 .. 07 07
const __m256i s_lohi = _mm256_unpackhi_epi16(s_16lo, s_16lo);
// row0 08 08 09 09 .. 0B 0B row1 08 08 09 09 .. 0B 0B
const __m256i s_hilo = _mm256_unpacklo_epi16(s_16hi, s_16hi);
// row0 0C 0C .. 0F 0F row1 0C 0C .. 0F 0F
const __m256i s_hihi = _mm256_unpackhi_epi16(s_16hi, s_16hi);
// 00 01 01 02 02 03 03 04 10 11 11 12 12 13 13 14
s[0] = _mm256_alignr_epi8(s_lohi, s_lolo, 2);
// 02 03 03 04 04 05 05 06 12 13 13 14 14 15 15 16
s[1] = _mm256_alignr_epi8(s_lohi, s_lolo, 10);
// 04 05 05 06 06 07 07 08 14 15 15 16 16 17 17 18
s[2] = _mm256_alignr_epi8(s_hilo, s_lohi, 2);
// 06 07 07 08 08 09 09 0A 16 17 17 18 18 19 19 1A
s[3] = _mm256_alignr_epi8(s_hilo, s_lohi, 10);
// 08 09 09 0A 0A 0B 0B 0C 18 19 19 1A 1A 1B 1B 1C
s[4] = _mm256_alignr_epi8(s_hihi, s_hilo, 2);
// 0A 0B 0B 0C 0C 0D 0D 0E 1A 1B 1B 1C 1C 1D 1D 1E
s[5] = _mm256_alignr_epi8(s_hihi, s_hilo, 10);
const __m256i res_lo = convolve_12taps(s, coeffs);
__m256i res_32b_lo = _mm256_sra_epi32(
_mm256_add_epi32(res_lo, round_0_const), round_0_shift);
// 00 01 02 03 10 12 13 14
res_32b_lo = _mm256_sra_epi32(_mm256_add_epi32(res_32b_lo, round_const),
round_shift);
// 8 bit conversion and saturation to uint8
// 00 01 02 03 00 01 02 03 10 11 12 13 10 11 12 13
__m256i res_16b_lo = _mm256_packs_epi32(res_32b_lo, res_32b_lo);
// 00 01 02 03 00 01 02 03 00 01 02 03 00 01 02 03
// 10 11 12 13 10 11 12 13 10 11 12 13 10 11 12 13
__m256i res_8b_lo = _mm256_packus_epi16(res_16b_lo, res_16b_lo);
// 00 01 02 03 00 01 02 03 00 01 02 03 00 01 02 03
const __m128i res_0 = _mm256_extracti128_si256(res_8b_lo, 0);
// 10 11 12 13 10 11 12 13 10 11 12 13 10 11 12 13
const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
if (w > 2) {
// 00 01 02 03
*(int *)&dst[i * dst_stride] = _mm_cvtsi128_si32(res_0);
// 10 11 12 13
*(int *)&dst[i * dst_stride + dst_stride] = _mm_cvtsi128_si32(res_1);
} else {
// 00 01
*(uint16_t *)&dst[i * dst_stride] =
(uint16_t)_mm_cvtsi128_si32(res_0);
// 10 11
*(uint16_t *)&dst[i * dst_stride + dst_stride] =
(uint16_t)_mm_cvtsi128_si32(res_1);
}
}
} else {
for (i = 0; i < h; i++) {
for (int j = 0; j < w; j += 8) {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride + j]))),
_mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&src_ptr[i * src_stride + j + 4]))),
0x20);
// row0 0..7 4..B
const __m256i s_16lo = _mm256_unpacklo_epi8(data, v_zero);
// row0 8..F C..13
const __m256i s_16hi = _mm256_unpackhi_epi8(data, v_zero);
// row0 00 00 01 01 .. 03 03 04 04 05 05 .. 07 07
const __m256i s_lolo = _mm256_unpacklo_epi16(s_16lo, s_16lo);
// row0 04 04 .. 07 07 08 08 .. 0B 0B
const __m256i s_lohi = _mm256_unpackhi_epi16(s_16lo, s_16lo);
// row0 08 08 09 09 .. 0B 0B 0C 0C 0D 0D .. 0F 0F
const __m256i s_hilo = _mm256_unpacklo_epi16(s_16hi, s_16hi);
// row0 0C 0C 0D 0D .. 0F 0F 10 10 11 11 .. 13 13
const __m256i s_hihi = _mm256_unpackhi_epi16(s_16hi, s_16hi);
s[0] = _mm256_alignr_epi8(s_lohi, s_lolo, 2);
s[1] = _mm256_alignr_epi8(s_lohi, s_lolo, 10);
s[2] = _mm256_alignr_epi8(s_hilo, s_lohi, 2);
s[3] = _mm256_alignr_epi8(s_hilo, s_lohi, 10);
s[4] = _mm256_alignr_epi8(s_hihi, s_hilo, 2);
s[5] = _mm256_alignr_epi8(s_hihi, s_hilo, 10);
const __m256i res_lo = convolve_12taps(s, coeffs);
__m256i res_32b_lo = _mm256_sra_epi32(
_mm256_add_epi32(res_lo, round_0_const), round_0_shift);
res_32b_lo = _mm256_sra_epi32(
_mm256_add_epi32(res_32b_lo, round_const), round_shift);
// 8 bit conversion and saturation to uint8
__m256i res_16b_lo = _mm256_packs_epi32(res_32b_lo, res_32b_lo);
__m256i res_8b_lo = _mm256_packus_epi16(res_16b_lo, res_16b_lo);
const __m128i res_0 = _mm256_extracti128_si256(res_8b_lo, 0);
const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
*(int *)&dst[i * dst_stride + j] = _mm_cvtsi128_si32(res_0);
*(int *)&dst[i * dst_stride + j + 4] = _mm_cvtsi128_si32(res_1);
}
}
}
} else {
const int fo_horiz = filter_params_x->taps / 2 - 1;
const uint8_t *const src_ptr = src - fo_horiz;
filt[2] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 2));
filt[3] = _mm256_load_si256((__m256i const *)(filt_global_avx2 + 32 * 3));
if (w <= 8) {
for (i = 0; i < h; i += 2) {
const __m256i data = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride]))),
_mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&src_ptr[i * src_stride + src_stride]))),
0x20);
__m256i res_16b = convolve_lowbd_x(data, coeffs, filt);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
round_0_shift);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
round_shift);
/* rounding code */
// 8 bit conversion and saturation to uint8
__m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
const __m128i res_0 = _mm256_castsi256_si128(res_8b);
const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
if (w > 4) {
_mm_storel_epi64((__m128i *)&dst[i * dst_stride], res_0);
_mm_storel_epi64((__m128i *)&dst[i * dst_stride + dst_stride], res_1);
} else if (w > 2) {
xx_storel_32(&dst[i * dst_stride], res_0);
xx_storel_32(&dst[i * dst_stride + dst_stride], res_1);
} else {
__m128i *const p_0 = (__m128i *)&dst[i * dst_stride];
__m128i *const p_1 = (__m128i *)&dst[i * dst_stride + dst_stride];
*(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);
*(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);
}
}
} else {
for (i = 0; i < h; ++i) {
for (int j = 0; j < w; j += 16) {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
// 18 19 20 21 22 23
const __m256i data = _mm256_inserti128_si256(
_mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
_mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
1);
__m256i res_16b = convolve_lowbd_x(data, coeffs, filt);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_0_const),
round_0_shift);
res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
round_shift);
/* rounding code */
// 8 bit conversion and saturation to uint8
__m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
// Store values into the destination buffer
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
res_8b = _mm256_permute4x64_epi64(res_8b, 216);
__m128i res = _mm256_castsi256_si128(res_8b);
_mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
}
}
}
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x1d80, %rsp # imm = 0x1D80
movq 0x20(%rbp), %rax
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x998(%rsp)
movl %esi, 0x994(%rsp)
movq %rdx, 0x988(%rsp)
movl %ecx, 0x984(%rsp)
movl %r8d, 0x980(%rsp)
movl %r9d, 0x97c(%rsp)
movq 0x20(%rbp), %rax
movl 0x14(%rax), %ecx
movl $0x7, %eax
subl %ecx, %eax
movl %eax, 0x978(%rsp)
movl 0x978(%rsp), %eax
movl %eax, 0x9f0(%rsp)
vmovd 0x9f0(%rsp), %xmm0
vmovdqa %xmm0, 0x9e0(%rsp)
vmovdqa 0x9e0(%rsp), %xmm0
vmovdqa %xmm0, 0x960(%rsp)
movq 0x20(%rbp), %rax
movb 0x14(%rax), %cl
decb %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movl %edx, %ecx
shrl %ecx
movw %cx, 0x9f6(%rsp)
movw 0x9f6(%rsp), %cx
movw %cx, 0xc(%rsp)
movw %cx, 0x1c5e(%rsp)
movw %cx, 0x1c5c(%rsp)
movw %cx, 0x1c5a(%rsp)
movw %cx, 0x1c58(%rsp)
movw %cx, 0x1c56(%rsp)
movw %cx, 0x1c54(%rsp)
movw %cx, 0x1c52(%rsp)
movw %cx, 0x1c50(%rsp)
movw %cx, 0x1c4e(%rsp)
movw %cx, 0x1c4c(%rsp)
movw %cx, 0x1c4a(%rsp)
movw %cx, 0x1c48(%rsp)
movw %cx, 0x1c46(%rsp)
movw %cx, 0x1c44(%rsp)
movw %cx, 0x1c42(%rsp)
movw %cx, 0x1c40(%rsp)
movzwl 0x1c40(%rsp), %ecx
vmovd %ecx, %xmm0
movzwl 0x1c42(%rsp), %ecx
vpinsrw $0x1, %ecx, %xmm0, %xmm0
movzwl 0x1c44(%rsp), %ecx
vpinsrw $0x2, %ecx, %xmm0, %xmm0
movzwl 0x1c46(%rsp), %ecx
vpinsrw $0x3, %ecx, %xmm0, %xmm0
movzwl 0x1c48(%rsp), %ecx
vpinsrw $0x4, %ecx, %xmm0, %xmm0
movzwl 0x1c4a(%rsp), %ecx
vpinsrw $0x5, %ecx, %xmm0, %xmm0
movzwl 0x1c4c(%rsp), %ecx
vpinsrw $0x6, %ecx, %xmm0, %xmm0
movzwl 0x1c4e(%rsp), %ecx
vpinsrw $0x7, %ecx, %xmm0, %xmm0
movzwl 0x1c50(%rsp), %ecx
vmovd %ecx, %xmm1
movzwl 0x1c52(%rsp), %ecx
vpinsrw $0x1, %ecx, %xmm1, %xmm1
movzwl 0x1c54(%rsp), %ecx
vpinsrw $0x2, %ecx, %xmm1, %xmm1
movzwl 0x1c56(%rsp), %ecx
vpinsrw $0x3, %ecx, %xmm1, %xmm1
movzwl 0x1c58(%rsp), %ecx
vpinsrw $0x4, %ecx, %xmm1, %xmm1
movzwl 0x1c5a(%rsp), %ecx
vpinsrw $0x5, %ecx, %xmm1, %xmm1
movzwl 0x1c5c(%rsp), %ecx
vpinsrw $0x6, %ecx, %xmm1, %xmm1
movzwl 0x1c5e(%rsp), %ecx
vpinsrw $0x7, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1c30(%rsp)
vmovdqa %xmm0, 0x1c20(%rsp)
vmovaps 0x1c20(%rsp), %ymm0
vmovaps %ymm0, 0x940(%rsp)
movq 0x20(%rbp), %rcx
movl 0x14(%rcx), %ecx
decl %ecx
movl %ecx, 0x9dc(%rsp)
vmovd 0x9dc(%rsp), %xmm0
vmovdqa %xmm0, 0x9c0(%rsp)
vmovdqa 0x9c0(%rsp), %xmm0
vmovdqa %xmm0, 0x930(%rsp)
movb 0x978(%rsp), %cl
shll %cl, %eax
shrl %eax
movw %ax, 0x9f4(%rsp)
movw 0x9f4(%rsp), %ax
movw %ax, 0xe(%rsp)
movw %ax, 0x1c9e(%rsp)
movw %ax, 0x1c9c(%rsp)
movw %ax, 0x1c9a(%rsp)
movw %ax, 0x1c98(%rsp)
movw %ax, 0x1c96(%rsp)
movw %ax, 0x1c94(%rsp)
movw %ax, 0x1c92(%rsp)
movw %ax, 0x1c90(%rsp)
movw %ax, 0x1c8e(%rsp)
movw %ax, 0x1c8c(%rsp)
movw %ax, 0x1c8a(%rsp)
movw %ax, 0x1c88(%rsp)
movw %ax, 0x1c86(%rsp)
movw %ax, 0x1c84(%rsp)
movw %ax, 0x1c82(%rsp)
movw %ax, 0x1c80(%rsp)
movzwl 0x1c90(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1c92(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1c94(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1c96(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1c98(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x1c9a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x1c9c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x1c9e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm1
movzwl 0x1c80(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1c82(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1c84(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1c86(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1c88(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x1c8a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x1c8c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x1c8e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1c60(%rsp)
vmovdqa 0x1c60(%rsp), %ymm0
vmovdqa %ymm0, 0x900(%rsp)
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
vzeroupper
callq 0x8c1af0
movl %eax, 0x8f8(%rsp)
leaq 0x2526c0(%rip), %rax # 0xb1a8c0
movq %rax, 0x1d68(%rsp)
movq 0x1d68(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x7a0(%rsp)
leaq 0x25269c(%rip), %rax # 0xb1a8c0
addq $0x20, %rax
movq %rax, 0x1d60(%rsp)
movq 0x1d60(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x7c0(%rsp)
cmpl $0x6, 0x8f8(%rsp)
jne 0x8c8268
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
leaq 0x820(%rsp), %rdx
vzeroupper
callq 0x8ca750
jmp 0x8c82a4
cmpl $0xc, 0x8f8(%rsp)
jne 0x8c828b
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
leaq 0x820(%rsp), %rdx
vzeroupper
callq 0x8cad00
jmp 0x8c82a2
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
leaq 0x820(%rsp), %rdx
vzeroupper
callq 0x8cae40
jmp 0x8c82a4
cmpl $0x4, 0x8f8(%rsp)
jne 0x8c89ee
movl $0x1, 0x79c(%rsp)
movq 0x998(%rsp), %rax
addq $-0x1, %rax
movq %rax, 0x790(%rsp)
cmpl $0x8, 0x980(%rsp)
jg 0x8c8716
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8c8711
movq 0x790(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa58(%rsp)
movq 0xa58(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xaf0(%rsp)
vmovdqa 0xaf0(%rsp), %xmm0
movq 0x790(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa50(%rsp)
movq 0xa50(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0xae0(%rsp)
vmovdqa 0xae0(%rsp), %xmm1
vmovdqa %xmm1, 0x770(%rsp)
vmovdqa %xmm0, 0x760(%rsp)
vmovaps 0x760(%rsp), %ymm0
leaq 0x840(%rsp), %rdi
leaq 0x7a0(%rsp), %rsi
callq 0x8d0cb0
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x740(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0x11e0(%rsp)
vmovaps %ymm0, 0x11c0(%rsp)
vmovaps 0x11e0(%rsp), %ymm0
vmovaps 0x11c0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0xee0(%rsp)
vmovdqa %xmm0, 0xed0(%rsp)
vmovaps 0xee0(%rsp), %ymm0
vmovdqa 0xed0(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x740(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0x11a0(%rsp)
vmovaps %ymm0, 0x1180(%rsp)
vmovaps 0x11a0(%rsp), %ymm0
vmovaps 0x1180(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0xea0(%rsp)
vmovdqa %xmm0, 0xe90(%rsp)
vmovaps 0xea0(%rsp), %ymm0
vmovdqa 0xe90(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x740(%rsp), %ymm0
vmovaps %ymm0, 0x13e0(%rsp)
vmovaps %ymm0, 0x13c0(%rsp)
vmovaps 0x13e0(%rsp), %ymm0
vmovaps 0x13c0(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x720(%rsp)
vmovaps 0x720(%rsp), %ymm0
vmovaps %ymm0, 0x14a0(%rsp)
vmovaps 0x14a0(%rsp), %ymm0
vmovdqa %xmm0, 0x710(%rsp)
vmovdqa 0x730(%rsp), %xmm0
vmovdqa %xmm0, 0x700(%rsp)
cmpl $0x4, 0x980(%rsp)
jle 0x8c85c8
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x710(%rsp), %xmm0
movq %rax, 0x15d8(%rsp)
vmovdqa %xmm0, 0x15c0(%rsp)
movq 0x15c0(%rsp), %rcx
movq 0x15d8(%rsp), %rax
movq %rcx, (%rax)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x700(%rsp), %xmm0
movq %rax, 0x15b8(%rsp)
vmovdqa %xmm0, 0x15a0(%rsp)
movq 0x15a0(%rsp), %rcx
movq 0x15b8(%rsp), %rax
movq %rcx, (%rax)
jmp 0x8c86f9
cmpl $0x2, 0x980(%rsp)
jle 0x8c8635
movq 0x988(%rsp), %rdi
movl 0x8fc(%rsp), %eax
imull 0x984(%rsp), %eax
cltq
addq %rax, %rdi
vmovdqa 0x710(%rsp), %xmm0
vzeroupper
callq 0x8cb680
movq 0x988(%rsp), %rdi
movl 0x8fc(%rsp), %eax
imull 0x984(%rsp), %eax
addl 0x984(%rsp), %eax
cltq
addq %rax, %rdi
vmovdqa 0x700(%rsp), %xmm0
callq 0x8cb680
jmp 0x8c86f7
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6f8(%rsp)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x6f0(%rsp)
vmovdqa 0x710(%rsp), %xmm0
vmovdqa %xmm0, 0x1750(%rsp)
vmovdqa 0x1750(%rsp), %xmm0
vmovdqa %xmm0, 0x1740(%rsp)
movl 0x1740(%rsp), %eax
movw %ax, %cx
movq 0x6f8(%rsp), %rax
movw %cx, (%rax)
vmovdqa 0x700(%rsp), %xmm0
vmovdqa %xmm0, 0x1730(%rsp)
vmovdqa 0x1730(%rsp), %xmm0
vmovdqa %xmm0, 0x1720(%rsp)
movl 0x1720(%rsp), %eax
movw %ax, %cx
movq 0x6f0(%rsp), %rax
movw %cx, (%rax)
jmp 0x8c86f9
jmp 0x8c86fb
movl 0x8fc(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8c82ea
jmp 0x8c89e9
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8c89e7
movl $0x0, 0x6ec(%rsp)
movl 0x6ec(%rsp), %eax
cmpl 0x980(%rsp), %eax
jge 0x8c89cf
movq 0x790(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movl 0x6ec(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1d38(%rsp)
movq 0x1d38(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x790(%rsp), %rax
movl 0x8fc(%rsp), %esi
movl 0x994(%rsp), %ecx
imull %ecx, %esi
movl 0x6ec(%rsp), %edx
movl %edx, %ecx
movl %esi, %edx
leal 0x8(%rcx,%rdx), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa48(%rsp)
movq 0xa48(%rsp), %rax
vmovdqu (%rax), %xmm1
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x6c0(%rsp)
vmovaps 0x6c0(%rsp), %ymm0
leaq 0x840(%rsp), %rdi
leaq 0x7a0(%rsp), %rsi
callq 0x8d0cb0
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6a0(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0x1160(%rsp)
vmovaps %ymm0, 0x1140(%rsp)
vmovaps 0x1160(%rsp), %ymm0
vmovaps 0x1140(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0xe60(%rsp)
vmovdqa %xmm0, 0xe50(%rsp)
vmovaps 0xe60(%rsp), %ymm0
vmovdqa 0xe50(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6a0(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0x1120(%rsp)
vmovaps %ymm0, 0x1100(%rsp)
vmovaps 0x1120(%rsp), %ymm0
vmovaps 0x1100(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0xe20(%rsp)
vmovdqa %xmm0, 0xe10(%rsp)
vmovaps 0xe20(%rsp), %ymm0
vmovdqa 0xe10(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6a0(%rsp), %ymm0
vmovaps %ymm0, 0x13a0(%rsp)
vmovaps %ymm0, 0x1380(%rsp)
vmovaps 0x13a0(%rsp), %ymm0
vmovaps 0x1380(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x680(%rsp)
vmovaps 0x680(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovaps %ymm0, 0x680(%rsp)
vmovaps 0x680(%rsp), %ymm0
vmovaps %ymm0, 0x1480(%rsp)
vmovdqa 0x1480(%rsp), %xmm0
vmovdqa %xmm0, 0x670(%rsp)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
imull 0x984(%rsp), %ecx
addl 0x6ec(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x670(%rsp), %xmm0
movq %rax, 0x1518(%rsp)
vmovdqa %xmm0, 0x1500(%rsp)
vmovdqa 0x1500(%rsp), %xmm0
movq 0x1518(%rsp), %rax
vmovdqu %xmm0, (%rax)
movl 0x6ec(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x6ec(%rsp)
jmp 0x8c8740
jmp 0x8c89d1
movl 0x8fc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8c8721
jmp 0x8c89e9
jmp 0x8ca70d
cmpl $0x6, 0x8f8(%rsp)
jne 0x8c91a2
movl 0x8f8(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
subl $0x1, %eax
movl %eax, 0x66c(%rsp)
movq 0x998(%rsp), %rax
movslq 0x66c(%rsp), %rdx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, 0x660(%rsp)
leaq 0x251e84(%rip), %rax # 0xb1a8c0
addq $0x40, %rax
movq %rax, 0x1d58(%rsp)
movq 0x1d58(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x7e0(%rsp)
leaq 0x251e5c(%rip), %rax # 0xb1a8c0
addq $0x60, %rax
movq %rax, 0x1d50(%rsp)
movq 0x1d50(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x800(%rsp)
cmpl $0x8, 0x980(%rsp)
jg 0x8c8eca
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8c8ec5
movq 0x660(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa40(%rsp)
movq 0xa40(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xad0(%rsp)
vmovdqa 0xad0(%rsp), %xmm0
movq 0x660(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa38(%rsp)
movq 0xa38(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0xac0(%rsp)
vmovdqa 0xac0(%rsp), %xmm1
vmovdqa %xmm1, 0x650(%rsp)
vmovdqa %xmm0, 0x640(%rsp)
vmovaps 0x640(%rsp), %ymm0
leaq 0x820(%rsp), %rdi
leaq 0x7a0(%rsp), %rsi
callq 0x8d0d60
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x620(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0x10e0(%rsp)
vmovaps %ymm0, 0x10c0(%rsp)
vmovaps 0x10e0(%rsp), %ymm0
vmovaps 0x10c0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0xde0(%rsp)
vmovdqa %xmm0, 0xdd0(%rsp)
vmovaps 0xde0(%rsp), %ymm0
vmovdqa 0xdd0(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x620(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0x10a0(%rsp)
vmovaps %ymm0, 0x1080(%rsp)
vmovaps 0x10a0(%rsp), %ymm0
vmovaps 0x1080(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0xda0(%rsp)
vmovdqa %xmm0, 0xd90(%rsp)
vmovaps 0xda0(%rsp), %ymm0
vmovdqa 0xd90(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x620(%rsp), %ymm0
vmovaps %ymm0, 0x1360(%rsp)
vmovaps %ymm0, 0x1340(%rsp)
vmovaps 0x1360(%rsp), %ymm0
vmovaps 0x1340(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps %ymm0, 0x1460(%rsp)
vmovaps 0x1460(%rsp), %ymm0
vmovdqa %xmm0, 0x5f0(%rsp)
vmovdqa 0x610(%rsp), %xmm0
vmovdqa %xmm0, 0x5e0(%rsp)
cmpl $0x4, 0x980(%rsp)
jle 0x8c8d7c
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x5f0(%rsp), %xmm0
movq %rax, 0x1598(%rsp)
vmovdqa %xmm0, 0x1580(%rsp)
movq 0x1580(%rsp), %rcx
movq 0x1598(%rsp), %rax
movq %rcx, (%rax)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x5e0(%rsp), %xmm0
movq %rax, 0x1578(%rsp)
vmovdqa %xmm0, 0x1560(%rsp)
movq 0x1560(%rsp), %rcx
movq 0x1578(%rsp), %rax
movq %rcx, (%rax)
jmp 0x8c8ead
cmpl $0x2, 0x980(%rsp)
jle 0x8c8de9
movq 0x988(%rsp), %rdi
movl 0x8fc(%rsp), %eax
imull 0x984(%rsp), %eax
cltq
addq %rax, %rdi
vmovdqa 0x5f0(%rsp), %xmm0
vzeroupper
callq 0x8cb680
movq 0x988(%rsp), %rdi
movl 0x8fc(%rsp), %eax
imull 0x984(%rsp), %eax
addl 0x984(%rsp), %eax
cltq
addq %rax, %rdi
vmovdqa 0x5e0(%rsp), %xmm0
callq 0x8cb680
jmp 0x8c8eab
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5d8(%rsp)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x5d0(%rsp)
vmovdqa 0x5f0(%rsp), %xmm0
vmovdqa %xmm0, 0x1710(%rsp)
vmovdqa 0x1710(%rsp), %xmm0
vmovdqa %xmm0, 0x1700(%rsp)
movl 0x1700(%rsp), %eax
movw %ax, %cx
movq 0x5d8(%rsp), %rax
movw %cx, (%rax)
vmovdqa 0x5e0(%rsp), %xmm0
vmovdqa %xmm0, 0x16f0(%rsp)
vmovdqa 0x16f0(%rsp), %xmm0
vmovdqa %xmm0, 0x16e0(%rsp)
movl 0x16e0(%rsp), %eax
movw %ax, %cx
movq 0x5d0(%rsp), %rax
movw %cx, (%rax)
jmp 0x8c8ead
jmp 0x8c8eaf
movl 0x8fc(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8c8a9e
jmp 0x8c919d
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8c919b
movl $0x0, 0x5cc(%rsp)
movl 0x5cc(%rsp), %eax
cmpl 0x980(%rsp), %eax
jge 0x8c9183
movq 0x660(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movl 0x5cc(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1d30(%rsp)
movq 0x1d30(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x660(%rsp), %rax
movl 0x8fc(%rsp), %esi
movl 0x994(%rsp), %ecx
imull %ecx, %esi
movl 0x5cc(%rsp), %edx
movl %edx, %ecx
movl %esi, %edx
leal 0x8(%rcx,%rdx), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa30(%rsp)
movq 0xa30(%rsp), %rax
vmovdqu (%rax), %xmm1
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5a0(%rsp), %ymm0
leaq 0x820(%rsp), %rdi
leaq 0x7a0(%rsp), %rsi
callq 0x8d0d60
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x580(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0x1060(%rsp)
vmovaps %ymm0, 0x1040(%rsp)
vmovaps 0x1060(%rsp), %ymm0
vmovaps 0x1040(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0xd60(%rsp)
vmovdqa %xmm0, 0xd50(%rsp)
vmovaps 0xd60(%rsp), %ymm0
vmovdqa 0xd50(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x580(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0x1020(%rsp)
vmovaps %ymm0, 0x1000(%rsp)
vmovaps 0x1020(%rsp), %ymm0
vmovaps 0x1000(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0xd20(%rsp)
vmovdqa %xmm0, 0xd10(%rsp)
vmovaps 0xd20(%rsp), %ymm0
vmovdqa 0xd10(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm0, 0x1320(%rsp)
vmovaps %ymm0, 0x1300(%rsp)
vmovaps 0x1320(%rsp), %ymm0
vmovaps 0x1300(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x560(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x560(%rsp), %ymm0
vmovaps %ymm0, 0x1440(%rsp)
vmovdqa 0x1440(%rsp), %xmm0
vmovdqa %xmm0, 0x550(%rsp)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
imull 0x984(%rsp), %ecx
addl 0x5cc(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x550(%rsp), %xmm0
movq %rax, 0x14f8(%rsp)
vmovdqa %xmm0, 0x14e0(%rsp)
vmovdqa 0x14e0(%rsp), %xmm0
movq 0x14f8(%rsp), %rax
vmovdqu %xmm0, (%rax)
movl 0x5cc(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x5cc(%rsp)
jmp 0x8c8ef4
jmp 0x8c9185
movl 0x8fc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8c8ed5
jmp 0x8c919d
jmp 0x8ca70b
cmpl $0xc, 0x8f8(%rsp)
jne 0x8c9f8f
movq 0x10(%rbp), %rax
movzwl 0x8(%rax), %eax
shrl %eax
decl %eax
movl %eax, 0x54c(%rsp)
movq 0x998(%rsp), %rax
movslq 0x54c(%rsp), %rcx
subq %rcx, %rax
movq %rax, 0x540(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x1760(%rsp)
vmovaps 0x1760(%rsp), %ymm0
vmovaps %ymm0, 0x520(%rsp)
movq 0x20(%rbp), %rax
movb 0x14(%rax), %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movl %edx, %ecx
sarl %ecx
movl %ecx, 0x179c(%rsp)
movl 0x179c(%rsp), %ecx
movl %ecx, 0x4(%rsp)
movl %ecx, 0x1cdc(%rsp)
movl %ecx, 0x1cd8(%rsp)
movl %ecx, 0x1cd4(%rsp)
movl %ecx, 0x1cd0(%rsp)
movl %ecx, 0x1ccc(%rsp)
movl %ecx, 0x1cc8(%rsp)
movl %ecx, 0x1cc4(%rsp)
movl %ecx, 0x1cc0(%rsp)
movl 0x1cc4(%rsp), %r9d
movl 0x1cc8(%rsp), %r8d
movl 0x1ccc(%rsp), %edi
movl 0x1cd4(%rsp), %esi
movl 0x1cd8(%rsp), %edx
movl 0x1cdc(%rsp), %ecx
vmovd 0x1cc0(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x1cd0(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1cb0(%rsp)
vmovdqa %xmm0, 0x1ca0(%rsp)
vmovaps 0x1ca0(%rsp), %ymm0
vmovaps %ymm0, 0x940(%rsp)
movb 0x978(%rsp), %cl
shll %cl, %eax
sarl %eax
movl %eax, 0x1798(%rsp)
movl 0x1798(%rsp), %eax
movl %eax, 0x8(%rsp)
movl %eax, 0x1d24(%rsp)
movl %eax, 0x1d20(%rsp)
movl %eax, 0x1d1c(%rsp)
movl %eax, 0x1d18(%rsp)
movl %eax, 0x1d14(%rsp)
movl %eax, 0x1d10(%rsp)
movl %eax, 0x1d0c(%rsp)
movl %eax, 0x1d08(%rsp)
movl 0x1d0c(%rsp), %r8d
movl 0x1d10(%rsp), %edi
movl 0x1d14(%rsp), %esi
movl 0x1d1c(%rsp), %edx
movl 0x1d20(%rsp), %ecx
movl 0x1d24(%rsp), %eax
vmovd 0x1d08(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1d18(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1cf0(%rsp)
vmovdqa %xmm0, 0x1ce0(%rsp)
vmovaps 0x1ce0(%rsp), %ymm0
vmovaps %ymm0, 0x900(%rsp)
movq 0x20(%rbp), %rax
movl 0x14(%rax), %eax
movl %eax, 0x9bc(%rsp)
vmovd 0x9bc(%rsp), %xmm0
vmovdqa %xmm0, 0x9a0(%rsp)
vmovdqa 0x9a0(%rsp), %xmm0
vmovdqa %xmm0, 0x930(%rsp)
cmpl $0x4, 0x980(%rsp)
jg 0x8c99f4
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8c99ef
movq 0x540(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa28(%rsp)
movq 0xa28(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xab0(%rsp)
vmovdqa 0xab0(%rsp), %xmm0
movq 0x540(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa20(%rsp)
movq 0xa20(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0xaa0(%rsp)
vmovdqa 0xaa0(%rsp), %xmm1
vmovdqa %xmm1, 0x450(%rsp)
vmovdqa %xmm0, 0x440(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm1, 0xb60(%rsp)
vmovaps %ymm0, 0xb40(%rsp)
vmovaps 0xb60(%rsp), %ymm0
vmovaps 0xb40(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm1, 0xbe0(%rsp)
vmovaps %ymm0, 0xbc0(%rsp)
vmovaps 0xbe0(%rsp), %ymm0
vmovaps 0xbc0(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x1880(%rsp)
vmovaps %ymm0, 0x1860(%rsp)
vmovaps 0x1880(%rsp), %ymm0
vmovaps 0x1860(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x1980(%rsp)
vmovaps %ymm0, 0x1960(%rsp)
vmovaps 0x1980(%rsp), %ymm0
vmovaps 0x1960(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm0, 0x1840(%rsp)
vmovaps %ymm0, 0x1820(%rsp)
vmovaps 0x1840(%rsp), %ymm0
vmovaps 0x1820(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm0, 0x1940(%rsp)
vmovaps %ymm0, 0x1920(%rsp)
vmovaps 0x1940(%rsp), %ymm0
vmovaps 0x1920(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x380(%rsp)
vmovaps 0x3c0(%rsp), %ymm0
vmovaps 0x3e0(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x3c0(%rsp), %ymm0
vmovaps 0x3e0(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x480(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps 0x3c0(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps 0x3c0(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x380(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x380(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x500(%rsp)
leaq 0x460(%rsp), %rdi
leaq 0x820(%rsp), %rsi
callq 0x8cb830
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x360(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0x1b80(%rsp)
vmovaps %ymm0, 0x1b60(%rsp)
vmovaps 0x1b80(%rsp), %ymm0
vmovaps 0x1b60(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0x1a80(%rsp)
vmovdqa %xmm0, 0x1a70(%rsp)
vmovaps 0x1a80(%rsp), %ymm0
vmovdqa 0x1a70(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x340(%rsp)
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0x1b40(%rsp)
vmovaps %ymm0, 0x1b20(%rsp)
vmovaps 0x1b40(%rsp), %ymm0
vmovaps 0x1b20(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0x1a40(%rsp)
vmovdqa %xmm0, 0x1a30(%rsp)
vmovaps 0x1a40(%rsp), %ymm0
vmovdqa 0x1a30(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x340(%rsp)
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm0, 0x1c00(%rsp)
vmovaps %ymm0, 0x1be0(%rsp)
vmovaps 0x1c00(%rsp), %ymm0
vmovaps 0x1be0(%rsp), %ymm1
vpackssdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm0, 0x12e0(%rsp)
vmovaps %ymm0, 0x12c0(%rsp)
vmovaps 0x12e0(%rsp), %ymm0
vmovaps 0x12c0(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovdqa %xmm0, 0x2f0(%rsp)
vmovdqa 0x310(%rsp), %xmm0
vmovdqa %xmm0, 0x2e0(%rsp)
cmpl $0x2, 0x980(%rsp)
jle 0x8c9936
vmovdqa 0x2f0(%rsp), %xmm0
vmovdqa %xmm0, 0x16d0(%rsp)
vmovdqa 0x16d0(%rsp), %xmm0
vmovdqa %xmm0, 0x16c0(%rsp)
movl 0x16c0(%rsp), %edx
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %esi
imull %esi, %ecx
movslq %ecx, %rcx
movl %edx, (%rax,%rcx)
vmovdqa 0x2e0(%rsp), %xmm0
vmovdqa %xmm0, 0x16b0(%rsp)
vmovdqa 0x16b0(%rsp), %xmm0
vmovdqa %xmm0, 0x16a0(%rsp)
movl 0x16a0(%rsp), %edx
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
imull 0x984(%rsp), %ecx
addl 0x984(%rsp), %ecx
movslq %ecx, %rcx
movl %edx, (%rax,%rcx)
jmp 0x8c99d7
vmovdqa 0x2f0(%rsp), %xmm0
vmovdqa %xmm0, 0x1690(%rsp)
vmovdqa 0x1690(%rsp), %xmm0
vmovdqa %xmm0, 0x1680(%rsp)
movl 0x1680(%rsp), %eax
movw %ax, %dx
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %esi
imull %esi, %ecx
movslq %ecx, %rcx
movw %dx, (%rax,%rcx)
vmovdqa 0x2e0(%rsp), %xmm0
vmovdqa %xmm0, 0x1670(%rsp)
vmovdqa 0x1670(%rsp), %xmm0
vmovdqa %xmm0, 0x1660(%rsp)
movl 0x1660(%rsp), %eax
movw %ax, %dx
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
imull 0x984(%rsp), %ecx
addl 0x984(%rsp), %ecx
movslq %ecx, %rcx
movw %dx, (%rax,%rcx)
jmp 0x8c99d9
movl 0x8fc(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8c9406
jmp 0x8c9f8a
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8c9f88
movl $0x0, 0x2dc(%rsp)
movl 0x2dc(%rsp), %eax
cmpl 0x980(%rsp), %eax
jge 0x8c9f70
movq 0x540(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movl 0x2dc(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa18(%rsp)
movq 0xa18(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xa90(%rsp)
vmovdqa 0xa90(%rsp), %xmm0
movq 0x540(%rsp), %rcx
movl 0x8fc(%rsp), %eax
movl 0x994(%rsp), %edx
imull %edx, %eax
movl 0x2dc(%rsp), %edx
addl %edx, %eax
cltq
leaq 0x4(%rax,%rcx), %rax
movq %rax, 0xa10(%rsp)
movq 0xa10(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0xa80(%rsp)
vmovdqa 0xa80(%rsp), %xmm1
vmovdqa %xmm1, 0x2b0(%rsp)
vmovdqa %xmm0, 0x2a0(%rsp)
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm1, 0xb20(%rsp)
vmovaps %ymm0, 0xb00(%rsp)
vmovaps 0xb20(%rsp), %ymm0
vmovaps 0xb00(%rsp), %ymm1
vpunpcklbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm1, 0xba0(%rsp)
vmovaps %ymm0, 0xb80(%rsp)
vmovaps 0xba0(%rsp), %ymm0
vmovaps 0xb80(%rsp), %ymm1
vpunpckhbw %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm0, 0x1800(%rsp)
vmovaps %ymm0, 0x17e0(%rsp)
vmovaps 0x1800(%rsp), %ymm0
vmovaps 0x17e0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm0, 0x1900(%rsp)
vmovaps %ymm0, 0x18e0(%rsp)
vmovaps 0x1900(%rsp), %ymm0
vmovaps 0x18e0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x260(%rsp), %ymm0
vmovaps %ymm0, 0x17c0(%rsp)
vmovaps %ymm0, 0x17a0(%rsp)
vmovaps 0x17c0(%rsp), %ymm0
vmovaps 0x17a0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x260(%rsp), %ymm0
vmovaps %ymm0, 0x18c0(%rsp)
vmovaps %ymm0, 0x18a0(%rsp)
vmovaps 0x18c0(%rsp), %ymm0
vmovaps 0x18a0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x220(%rsp), %ymm0
vmovaps 0x240(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x220(%rsp), %ymm0
vmovaps 0x240(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x480(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x220(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x220(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x200(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x200(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x500(%rsp)
leaq 0x460(%rsp), %rdi
leaq 0x820(%rsp), %rsi
callq 0x8cb830
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0x1b00(%rsp)
vmovaps %ymm0, 0x1ae0(%rsp)
vmovaps 0x1b00(%rsp), %ymm0
vmovaps 0x1ae0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0x1a00(%rsp)
vmovdqa %xmm0, 0x19f0(%rsp)
vmovaps 0x1a00(%rsp), %ymm0
vmovdqa 0x19f0(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0x1ac0(%rsp)
vmovaps %ymm0, 0x1aa0(%rsp)
vmovaps 0x1ac0(%rsp), %ymm0
vmovaps 0x1aa0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0x19c0(%rsp)
vmovdqa %xmm0, 0x19b0(%rsp)
vmovaps 0x19c0(%rsp), %ymm0
vmovdqa 0x19b0(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1a0(%rsp), %ymm0
vmovaps %ymm0, 0x1bc0(%rsp)
vmovaps %ymm0, 0x1ba0(%rsp)
vmovaps 0x1bc0(%rsp), %ymm0
vmovaps 0x1ba0(%rsp), %ymm1
vpackssdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x180(%rsp), %ymm0
vmovaps %ymm0, 0x12a0(%rsp)
vmovaps %ymm0, 0x1280(%rsp)
vmovaps 0x12a0(%rsp), %ymm0
vmovaps 0x1280(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x160(%rsp), %ymm0
vmovdqa %xmm0, 0x150(%rsp)
vmovaps 0x160(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x140(%rsp)
vmovdqa 0x150(%rsp), %xmm0
vmovdqa %xmm0, 0x1650(%rsp)
vmovdqa 0x1650(%rsp), %xmm0
vmovdqa %xmm0, 0x1640(%rsp)
movl 0x1640(%rsp), %edx
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %esi
imull %esi, %ecx
movl 0x2dc(%rsp), %esi
addl %esi, %ecx
movslq %ecx, %rcx
movl %edx, (%rax,%rcx)
vmovdqa 0x140(%rsp), %xmm0
vmovdqa %xmm0, 0x1630(%rsp)
vmovdqa 0x1630(%rsp), %xmm0
vmovdqa %xmm0, 0x1620(%rsp)
movl 0x1620(%rsp), %edx
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
imull 0x984(%rsp), %ecx
addl 0x2dc(%rsp), %ecx
addl $0x4, %ecx
movslq %ecx, %rcx
movl %edx, (%rax,%rcx)
movl 0x2dc(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x2dc(%rsp)
jmp 0x8c9a1e
jmp 0x8c9f72
movl 0x8fc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8c99ff
jmp 0x8c9f8a
jmp 0x8ca709
movq 0x10(%rbp), %rax
movzwl 0x8(%rax), %eax
movl $0x2, %ecx
cltd
idivl %ecx
subl $0x1, %eax
movl %eax, 0x13c(%rsp)
movq 0x998(%rsp), %rax
movslq 0x13c(%rsp), %rdx
xorl %ecx, %ecx
subq %rdx, %rcx
addq %rcx, %rax
movq %rax, 0x130(%rsp)
leaq 0x2508f0(%rip), %rax # 0xb1a8c0
addq $0x40, %rax
movq %rax, 0x1d48(%rsp)
movq 0x1d48(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x7e0(%rsp)
leaq 0x2508c8(%rip), %rax # 0xb1a8c0
addq $0x60, %rax
movq %rax, 0x1d40(%rsp)
movq 0x1d40(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x800(%rsp)
cmpl $0x8, 0x980(%rsp)
jg 0x8ca45e
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8ca459
movq 0x130(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa08(%rsp)
movq 0xa08(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xa70(%rsp)
vmovdqa 0xa70(%rsp), %xmm0
movq 0x130(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0xa00(%rsp)
movq 0xa00(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0xa60(%rsp)
vmovdqa 0xa60(%rsp), %xmm1
vmovdqa %xmm1, 0x110(%rsp)
vmovdqa %xmm0, 0x100(%rsp)
vmovaps 0x100(%rsp), %ymm0
leaq 0x820(%rsp), %rdi
leaq 0x7a0(%rsp), %rsi
callq 0x8d0e70
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0xfe0(%rsp)
vmovaps %ymm0, 0xfc0(%rsp)
vmovaps 0xfe0(%rsp), %ymm0
vmovaps 0xfc0(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0xce0(%rsp)
vmovdqa %xmm0, 0xcd0(%rsp)
vmovaps 0xce0(%rsp), %ymm0
vmovdqa 0xcd0(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0xfa0(%rsp)
vmovaps %ymm0, 0xf80(%rsp)
vmovaps 0xfa0(%rsp), %ymm0
vmovaps 0xf80(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0xca0(%rsp)
vmovdqa %xmm0, 0xc90(%rsp)
vmovaps 0xca0(%rsp), %ymm0
vmovdqa 0xc90(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm0, 0x1260(%rsp)
vmovaps %ymm0, 0x1240(%rsp)
vmovaps 0x1260(%rsp), %ymm0
vmovaps 0x1240(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0x1420(%rsp)
vmovaps 0x1420(%rsp), %ymm0
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xd0(%rsp), %xmm0
vmovdqa %xmm0, 0xa0(%rsp)
cmpl $0x4, 0x980(%rsp)
jle 0x8ca310
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0xb0(%rsp), %xmm0
movq %rax, 0x1558(%rsp)
vmovdqa %xmm0, 0x1540(%rsp)
movq 0x1540(%rsp), %rcx
movq 0x1558(%rsp), %rax
movq %rcx, (%rax)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0xa0(%rsp), %xmm0
movq %rax, 0x1538(%rsp)
vmovdqa %xmm0, 0x1520(%rsp)
movq 0x1520(%rsp), %rcx
movq 0x1538(%rsp), %rax
movq %rcx, (%rax)
jmp 0x8ca441
cmpl $0x2, 0x980(%rsp)
jle 0x8ca37d
movq 0x988(%rsp), %rdi
movl 0x8fc(%rsp), %eax
imull 0x984(%rsp), %eax
cltq
addq %rax, %rdi
vmovdqa 0xb0(%rsp), %xmm0
vzeroupper
callq 0x8cb680
movq 0x988(%rsp), %rdi
movl 0x8fc(%rsp), %eax
imull 0x984(%rsp), %eax
addl 0x984(%rsp), %eax
cltq
addq %rax, %rdi
vmovdqa 0xa0(%rsp), %xmm0
callq 0x8cb680
jmp 0x8ca43f
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x98(%rsp)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x984(%rsp), %edx
imull %edx, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x90(%rsp)
vmovdqa 0xb0(%rsp), %xmm0
vmovdqa %xmm0, 0x1610(%rsp)
vmovdqa 0x1610(%rsp), %xmm0
vmovdqa %xmm0, 0x1600(%rsp)
movl 0x1600(%rsp), %eax
movw %ax, %cx
movq 0x98(%rsp), %rax
movw %cx, (%rax)
vmovdqa 0xa0(%rsp), %xmm0
vmovdqa %xmm0, 0x15f0(%rsp)
vmovdqa 0x15f0(%rsp), %xmm0
vmovdqa %xmm0, 0x15e0(%rsp)
movl 0x15e0(%rsp), %eax
movw %ax, %cx
movq 0x90(%rsp), %rax
movw %cx, (%rax)
jmp 0x8ca441
jmp 0x8ca443
movl 0x8fc(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8ca032
jmp 0x8ca707
movl $0x0, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
cmpl 0x97c(%rsp), %eax
jge 0x8ca705
movl $0x0, 0x8c(%rsp)
movl 0x8c(%rsp), %eax
cmpl 0x980(%rsp), %eax
jge 0x8ca6ed
movq 0x130(%rsp), %rax
movl 0x8fc(%rsp), %ecx
movl 0x994(%rsp), %edx
imull %edx, %ecx
movl 0x8c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x1d28(%rsp)
movq 0x1d28(%rsp), %rax
vmovups (%rax), %ymm0
movq 0x130(%rsp), %rax
movl 0x8fc(%rsp), %esi
movl 0x994(%rsp), %ecx
imull %ecx, %esi
movl 0x8c(%rsp), %edx
movl %edx, %ecx
movl %esi, %edx
leal 0x8(%rcx,%rdx), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, 0x9f8(%rsp)
movq 0x9f8(%rsp), %rax
vmovdqu (%rax), %xmm1
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm0
leaq 0x820(%rsp), %rdi
leaq 0x7a0(%rsp), %rsi
callq 0x8d0e70
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %ymm1
vmovaps 0x940(%rsp), %ymm0
vmovaps %ymm1, 0xf60(%rsp)
vmovaps %ymm0, 0xf40(%rsp)
vmovaps 0xf60(%rsp), %ymm0
vmovaps 0xf40(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x930(%rsp), %xmm0
vmovaps %ymm1, 0xc60(%rsp)
vmovdqa %xmm0, 0xc50(%rsp)
vmovaps 0xc60(%rsp), %ymm0
vmovdqa 0xc50(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %ymm1
vmovaps 0x900(%rsp), %ymm0
vmovaps %ymm1, 0xf20(%rsp)
vmovaps %ymm0, 0xf00(%rsp)
vmovaps 0xf20(%rsp), %ymm0
vmovaps 0xf00(%rsp), %ymm1
vpaddw %ymm1, %ymm0, %ymm1
vmovdqa 0x960(%rsp), %xmm0
vmovaps %ymm1, 0xc20(%rsp)
vmovdqa %xmm0, 0xc10(%rsp)
vmovaps 0xc20(%rsp), %ymm0
vmovdqa 0xc10(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0x1220(%rsp)
vmovaps %ymm0, 0x1200(%rsp)
vmovaps 0x1220(%rsp), %ymm0
vmovaps 0x1200(%rsp), %ymm1
vpackuswb %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x20(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm0, 0x1400(%rsp)
vmovdqa 0x1400(%rsp), %xmm0
vmovdqa %xmm0, 0x10(%rsp)
movq 0x988(%rsp), %rax
movl 0x8fc(%rsp), %ecx
imull 0x984(%rsp), %ecx
addl 0x8c(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
vmovdqa 0x10(%rsp), %xmm0
movq %rax, 0x14d8(%rsp)
vmovdqa %xmm0, 0x14c0(%rsp)
vmovdqa 0x14c0(%rsp), %xmm0
movq 0x14d8(%rsp), %rax
vmovdqu %xmm0, (%rax)
movl 0x8c(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x8c(%rsp)
jmp 0x8ca488
jmp 0x8ca6ef
movl 0x8fc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x8fc(%rsp)
jmp 0x8ca469
jmp 0x8ca707
jmp 0x8ca709
jmp 0x8ca70b
jmp 0x8ca70d
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/convolve_avx2.c |
calc_mask_d16_avx2 | static inline __m256i calc_mask_d16_avx2(const __m256i *data_src0,
const __m256i *data_src1,
const __m256i *round_const,
const __m256i *mask_base_16,
const __m256i *clip_diff, int round) {
const __m256i diffa = _mm256_subs_epu16(*data_src0, *data_src1);
const __m256i diffb = _mm256_subs_epu16(*data_src1, *data_src0);
const __m256i diff = _mm256_max_epu16(diffa, diffb);
const __m256i diff_round =
_mm256_srli_epi16(_mm256_adds_epu16(diff, *round_const), round);
const __m256i diff_factor = _mm256_srli_epi16(diff_round, DIFF_FACTOR_LOG2);
const __m256i diff_mask = _mm256_adds_epi16(diff_factor, *mask_base_16);
const __m256i diff_clamp = _mm256_min_epi16(diff_mask, *clip_diff);
return diff_clamp;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x340, %rsp # imm = 0x340
movq %rdi, 0x118(%rsp)
movq %rsi, 0x110(%rsp)
movq %rdx, 0x108(%rsp)
movq %rcx, 0x100(%rsp)
movq %r8, 0xf8(%rsp)
movl %r9d, 0xf4(%rsp)
movq 0x118(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0x110(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x240(%rsp)
vmovdqa %ymm0, 0x220(%rsp)
vmovdqa 0x240(%rsp), %ymm0
vmovdqa 0x220(%rsp), %ymm1
vpsubusw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xc0(%rsp)
movq 0x110(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0x118(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x200(%rsp)
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x200(%rsp), %ymm0
vmovdqa 0x1e0(%rsp), %ymm1
vpsubusw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xa0(%rsp)
vmovdqa 0xc0(%rsp), %ymm1
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa %ymm1, 0x280(%rsp)
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x280(%rsp), %ymm0
vmovdqa 0x260(%rsp), %ymm1
vpmaxuw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0x80(%rsp), %ymm1
movq 0x108(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x2c0(%rsp)
vmovdqa %ymm0, 0x2a0(%rsp)
vmovdqa 0x2c0(%rsp), %ymm0
vmovdqa 0x2a0(%rsp), %ymm1
vpaddusw %ymm1, %ymm0, %ymm0
movl 0xf4(%rsp), %eax
vmovdqa %ymm0, 0x1c0(%rsp)
movl %eax, 0x1bc(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
movl 0x1bc(%rsp), %eax
vmovd %eax, %xmm1
vpsrlw %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
vmovdqa 0x60(%rsp), %ymm0
vmovdqa %ymm0, 0x180(%rsp)
movl $0x4, 0x17c(%rsp)
vmovdqa 0x180(%rsp), %ymm0
movl 0x17c(%rsp), %eax
vmovd %eax, %xmm1
vpsrlw %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0x40(%rsp), %ymm1
movq 0x100(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x300(%rsp)
vmovdqa %ymm0, 0x2e0(%rsp)
vmovdqa 0x300(%rsp), %ymm0
vmovdqa 0x2e0(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x20(%rsp), %ymm1
movq 0xf8(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x140(%rsp)
vmovdqa %ymm0, 0x120(%rsp)
vmovdqa 0x140(%rsp), %ymm0
vmovdqa 0x120(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/reconinter_avx2.c |
calc_mask_d16_inv_avx2 | static inline __m256i calc_mask_d16_inv_avx2(const __m256i *data_src0,
const __m256i *data_src1,
const __m256i *round_const,
const __m256i *mask_base_16,
const __m256i *clip_diff,
int round) {
const __m256i diffa = _mm256_subs_epu16(*data_src0, *data_src1);
const __m256i diffb = _mm256_subs_epu16(*data_src1, *data_src0);
const __m256i diff = _mm256_max_epu16(diffa, diffb);
const __m256i diff_round =
_mm256_srli_epi16(_mm256_adds_epu16(diff, *round_const), round);
const __m256i diff_factor = _mm256_srli_epi16(diff_round, DIFF_FACTOR_LOG2);
const __m256i diff_mask = _mm256_adds_epi16(diff_factor, *mask_base_16);
const __m256i diff_clamp = _mm256_min_epi16(diff_mask, *clip_diff);
const __m256i diff_const_16 = _mm256_sub_epi16(*clip_diff, diff_clamp);
return diff_const_16;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x3a0, %rsp # imm = 0x3A0
movq %rdi, 0x138(%rsp)
movq %rsi, 0x130(%rsp)
movq %rdx, 0x128(%rsp)
movq %rcx, 0x120(%rsp)
movq %r8, 0x118(%rsp)
movl %r9d, 0x114(%rsp)
movq 0x138(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0x130(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x2a0(%rsp)
vmovdqa %ymm0, 0x280(%rsp)
vmovdqa 0x2a0(%rsp), %ymm0
vmovdqa 0x280(%rsp), %ymm1
vpsubusw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xe0(%rsp)
movq 0x130(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0x138(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x260(%rsp)
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x260(%rsp), %ymm0
vmovdqa 0x240(%rsp), %ymm1
vpsubusw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xc0(%rsp)
vmovdqa 0xe0(%rsp), %ymm1
vmovdqa 0xc0(%rsp), %ymm0
vmovdqa %ymm1, 0x2e0(%rsp)
vmovdqa %ymm0, 0x2c0(%rsp)
vmovdqa 0x2e0(%rsp), %ymm0
vmovdqa 0x2c0(%rsp), %ymm1
vpmaxuw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xa0(%rsp)
vmovdqa 0xa0(%rsp), %ymm1
movq 0x128(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x320(%rsp)
vmovdqa %ymm0, 0x300(%rsp)
vmovdqa 0x320(%rsp), %ymm0
vmovdqa 0x300(%rsp), %ymm1
vpaddusw %ymm1, %ymm0, %ymm0
movl 0x114(%rsp), %eax
vmovdqa %ymm0, 0x220(%rsp)
movl %eax, 0x21c(%rsp)
vmovdqa 0x220(%rsp), %ymm0
movl 0x21c(%rsp), %eax
vmovd %eax, %xmm1
vpsrlw %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0x80(%rsp), %ymm0
vmovdqa %ymm0, 0x1e0(%rsp)
movl $0x4, 0x1dc(%rsp)
vmovdqa 0x1e0(%rsp), %ymm0
movl 0x1dc(%rsp), %eax
vmovd %eax, %xmm1
vpsrlw %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
vmovdqa 0x60(%rsp), %ymm1
movq 0x120(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x360(%rsp)
vmovdqa %ymm0, 0x340(%rsp)
vmovdqa 0x360(%rsp), %ymm0
vmovdqa 0x340(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0x40(%rsp), %ymm1
movq 0x118(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x1a0(%rsp)
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
vmovdqa 0x180(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
movq 0x118(%rsp), %rax
vmovdqa (%rax), %ymm1
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm1, 0x160(%rsp)
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x160(%rsp), %ymm0
vmovdqa 0x140(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
movq %rbp, %rsp
popq %rbp
retq
nop
| /m-ab-s[P]aom/av1/common/x86/reconinter_avx2.c |
cross_sum_fast_even_row | static inline __m256i cross_sum_fast_even_row(const int32_t *buf, int stride) {
const __m256i xtl = yy_loadu_256(buf - 1 - stride);
const __m256i xt = yy_loadu_256(buf - stride);
const __m256i xtr = yy_loadu_256(buf + 1 - stride);
const __m256i xbl = yy_loadu_256(buf - 1 + stride);
const __m256i xb = yy_loadu_256(buf + stride);
const __m256i xbr = yy_loadu_256(buf + 1 + stride);
const __m256i fives =
_mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl)));
const __m256i sixes = _mm256_add_epi32(xt, xb);
const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes);
return _mm256_add_epi32(
_mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2),
fives_plus_sixes),
sixes);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x340, %rsp # imm = 0x340
movq %rdi, 0x130(%rsp)
movl %esi, 0x12c(%rsp)
movq 0x130(%rsp), %rdi
addq $-0x4, %rdi
movslq 0x12c(%rsp), %rcx
xorl %eax, %eax
subq %rcx, %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x100(%rsp)
movq 0x130(%rsp), %rdi
movslq 0x12c(%rsp), %rcx
xorl %eax, %eax
subq %rcx, %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0xe0(%rsp)
movq 0x130(%rsp), %rdi
addq $0x4, %rdi
movslq 0x12c(%rsp), %rcx
xorl %eax, %eax
subq %rcx, %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0xc0(%rsp)
movq 0x130(%rsp), %rdi
addq $-0x4, %rdi
movslq 0x12c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0xa0(%rsp)
movq 0x130(%rsp), %rdi
movslq 0x12c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x80(%rsp)
movq 0x130(%rsp), %rdi
addq $0x4, %rdi
movslq 0x12c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x60(%rsp)
vmovdqa 0x100(%rsp), %ymm1
vmovdqa 0xc0(%rsp), %ymm2
vmovdqa 0x60(%rsp), %ymm3
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa %ymm3, 0x300(%rsp)
vmovdqa %ymm0, 0x2e0(%rsp)
vmovdqa 0x300(%rsp), %ymm0
vmovdqa 0x2e0(%rsp), %ymm3
vpaddd %ymm3, %ymm0, %ymm0
vmovdqa %ymm2, 0x2c0(%rsp)
vmovdqa %ymm0, 0x2a0(%rsp)
vmovdqa 0x2c0(%rsp), %ymm0
vmovdqa 0x2a0(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0x280(%rsp)
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x280(%rsp), %ymm0
vmovdqa 0x260(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0xe0(%rsp), %ymm1
vmovdqa 0x80(%rsp), %ymm0
vmovdqa %ymm1, 0x240(%rsp)
vmovdqa %ymm0, 0x220(%rsp)
vmovdqa 0x240(%rsp), %ymm0
vmovdqa 0x220(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x40(%rsp), %ymm1
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm1, 0x200(%rsp)
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x200(%rsp), %ymm0
vmovdqa 0x1e0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
vmovdqa %ymm0, 0x140(%rsp)
movl $0x2, 0x13c(%rsp)
vmovdqa 0x140(%rsp), %ymm0
movl 0x13c(%rsp), %eax
vmovd %eax, %xmm1
vpslld %xmm1, %ymm0, %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x1c0(%rsp)
vmovdqa %ymm0, 0x1a0(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa 0x1a0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm1, 0x180(%rsp)
vmovdqa %ymm0, 0x160(%rsp)
vmovdqa 0x180(%rsp), %ymm0
vmovdqa 0x160(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/selfguided_avx2.c |
cross_sum_fast_odd_row | static inline __m256i cross_sum_fast_odd_row(const int32_t *buf) {
const __m256i xl = yy_loadu_256(buf - 1);
const __m256i x = yy_loadu_256(buf);
const __m256i xr = yy_loadu_256(buf + 1);
const __m256i fives = _mm256_add_epi32(xl, xr);
const __m256i sixes = x;
const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes);
return _mm256_add_epi32(
_mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2),
fives_plus_sixes),
sixes);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x220, %rsp # imm = 0x220
movq %rdi, 0xd0(%rsp)
movq 0xd0(%rsp), %rdi
addq $-0x4, %rdi
callq 0x930e70
vmovdqa %ymm0, 0xa0(%rsp)
movq 0xd0(%rsp), %rdi
callq 0x930e70
vmovdqa %ymm0, 0x80(%rsp)
movq 0xd0(%rsp), %rdi
addq $0x4, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x60(%rsp)
vmovdqa 0xa0(%rsp), %ymm1
vmovdqa 0x60(%rsp), %ymm0
vmovdqa %ymm1, 0x1e0(%rsp)
vmovdqa %ymm0, 0x1c0(%rsp)
vmovdqa 0x1e0(%rsp), %ymm0
vmovdqa 0x1c0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0x80(%rsp), %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x40(%rsp), %ymm1
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm1, 0x1a0(%rsp)
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
vmovdqa 0x180(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
vmovdqa %ymm0, 0xe0(%rsp)
movl $0x2, 0xdc(%rsp)
vmovdqa 0xe0(%rsp), %ymm0
movl 0xdc(%rsp), %eax
vmovd %eax, %xmm1
vpslld %xmm1, %ymm0, %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x160(%rsp)
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x160(%rsp), %ymm0
vmovdqa 0x140(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm1, 0x120(%rsp)
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x120(%rsp), %ymm0
vmovdqa 0x100(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/selfguided_avx2.c |
cross_sum | static inline __m256i cross_sum(const int32_t *buf, int stride) {
const __m256i xtl = yy_loadu_256(buf - 1 - stride);
const __m256i xt = yy_loadu_256(buf - stride);
const __m256i xtr = yy_loadu_256(buf + 1 - stride);
const __m256i xl = yy_loadu_256(buf - 1);
const __m256i x = yy_loadu_256(buf);
const __m256i xr = yy_loadu_256(buf + 1);
const __m256i xbl = yy_loadu_256(buf - 1 + stride);
const __m256i xb = yy_loadu_256(buf + stride);
const __m256i xbr = yy_loadu_256(buf + 1 + stride);
const __m256i fours = _mm256_add_epi32(
xl, _mm256_add_epi32(xt, _mm256_add_epi32(xr, _mm256_add_epi32(xb, x))));
const __m256i threes =
_mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl)));
return _mm256_sub_epi32(_mm256_slli_epi32(_mm256_add_epi32(fours, threes), 2),
threes);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x400, %rsp # imm = 0x400
movq %rdi, 0x170(%rsp)
movl %esi, 0x16c(%rsp)
movq 0x170(%rsp), %rdi
addq $-0x4, %rdi
movslq 0x16c(%rsp), %rcx
xorl %eax, %eax
subq %rcx, %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x140(%rsp)
movq 0x170(%rsp), %rdi
movslq 0x16c(%rsp), %rcx
xorl %eax, %eax
subq %rcx, %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x120(%rsp)
movq 0x170(%rsp), %rdi
addq $0x4, %rdi
movslq 0x16c(%rsp), %rcx
xorl %eax, %eax
subq %rcx, %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x100(%rsp)
movq 0x170(%rsp), %rdi
addq $-0x4, %rdi
callq 0x930e70
vmovdqa %ymm0, 0xe0(%rsp)
movq 0x170(%rsp), %rdi
callq 0x930e70
vmovdqa %ymm0, 0xc0(%rsp)
movq 0x170(%rsp), %rdi
addq $0x4, %rdi
callq 0x930e70
vmovdqa %ymm0, 0xa0(%rsp)
movq 0x170(%rsp), %rdi
addq $-0x4, %rdi
movslq 0x16c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x80(%rsp)
movq 0x170(%rsp), %rdi
movslq 0x16c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x60(%rsp)
movq 0x170(%rsp), %rdi
addq $0x4, %rdi
movslq 0x16c(%rsp), %rax
shlq $0x2, %rax
addq %rax, %rdi
callq 0x930e70
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0xe0(%rsp), %ymm1
vmovdqa 0x120(%rsp), %ymm2
vmovdqa 0xa0(%rsp), %ymm3
vmovdqa 0x60(%rsp), %ymm4
vmovdqa 0xc0(%rsp), %ymm0
vmovdqa %ymm4, 0x3c0(%rsp)
vmovdqa %ymm0, 0x3a0(%rsp)
vmovdqa 0x3c0(%rsp), %ymm0
vmovdqa 0x3a0(%rsp), %ymm4
vpaddd %ymm4, %ymm0, %ymm0
vmovdqa %ymm3, 0x380(%rsp)
vmovdqa %ymm0, 0x360(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa 0x360(%rsp), %ymm3
vpaddd %ymm3, %ymm0, %ymm0
vmovdqa %ymm2, 0x340(%rsp)
vmovdqa %ymm0, 0x320(%rsp)
vmovdqa 0x340(%rsp), %ymm0
vmovdqa 0x320(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0x300(%rsp)
vmovdqa %ymm0, 0x2e0(%rsp)
vmovdqa 0x300(%rsp), %ymm0
vmovdqa 0x2e0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x140(%rsp), %ymm1
vmovdqa 0x100(%rsp), %ymm2
vmovdqa 0x40(%rsp), %ymm3
vmovdqa 0x80(%rsp), %ymm0
vmovdqa %ymm3, 0x2c0(%rsp)
vmovdqa %ymm0, 0x2a0(%rsp)
vmovdqa 0x2c0(%rsp), %ymm0
vmovdqa 0x2a0(%rsp), %ymm3
vpaddd %ymm3, %ymm0, %ymm0
vmovdqa %ymm2, 0x280(%rsp)
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x280(%rsp), %ymm0
vmovdqa 0x260(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0x240(%rsp)
vmovdqa %ymm0, 0x220(%rsp)
vmovdqa 0x240(%rsp), %ymm0
vmovdqa 0x220(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa 0x20(%rsp), %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x200(%rsp)
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x200(%rsp), %ymm0
vmovdqa 0x1e0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x180(%rsp)
movl $0x2, 0x17c(%rsp)
vmovdqa 0x180(%rsp), %ymm0
movl 0x17c(%rsp), %eax
vmovd %eax, %xmm1
vpslld %xmm1, %ymm0, %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x1c0(%rsp)
vmovdqa %ymm0, 0x1a0(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa 0x1a0(%rsp), %ymm1
vpsubd %ymm1, %ymm0, %ymm0
movq %rbp, %rsp
popq %rbp
retq
nop
| /m-ab-s[P]aom/av1/common/x86/selfguided_avx2.c |
av1_warp_affine_avx2 | void av1_warp_affine_avx2(const int32_t *mat, const uint8_t *ref, int width,
int height, int stride, uint8_t *pred, int p_col,
int p_row, int p_width, int p_height, int p_stride,
int subsampling_x, int subsampling_y,
ConvolveParams *conv_params, int16_t alpha,
int16_t beta, int16_t gamma, int16_t delta) {
__m256i horz_out[8];
int i, j, k;
const int bd = 8;
const int reduce_bits_horiz = conv_params->round_0;
const int reduce_bits_vert = conv_params->is_compound
? conv_params->round_1
: 2 * FILTER_BITS - reduce_bits_horiz;
const int offset_bits_horiz = bd + FILTER_BITS - 1;
assert(IMPLIES(conv_params->is_compound, conv_params->dst != NULL));
const int offset_bits_vert = bd + 2 * FILTER_BITS - reduce_bits_horiz;
const __m256i reduce_bits_vert_const =
_mm256_set1_epi32(((1 << reduce_bits_vert) >> 1));
const __m256i res_add_const = _mm256_set1_epi32(1 << offset_bits_vert);
const int round_bits =
2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
assert(IMPLIES(conv_params->do_average, conv_params->is_compound));
const __m256i round_const = _mm256_set1_epi16(
(1 << offset_bits_horiz) + ((1 << reduce_bits_horiz) >> 1));
const __m128i shift = _mm_cvtsi32_si128(reduce_bits_horiz);
__m256i res_sub_const, round_bits_const, wt;
unpack_weights_and_set_round_const_avx2(conv_params, round_bits, offset_bits,
&res_sub_const, &round_bits_const,
&wt);
__m256i res_add_const_1;
if (conv_params->is_compound == 1) {
res_add_const_1 = _mm256_add_epi32(reduce_bits_vert_const, res_add_const);
} else {
res_add_const_1 = _mm256_set1_epi32(-(1 << (bd + reduce_bits_vert - 1)) +
((1 << reduce_bits_vert) >> 1));
}
const int32_t const1 = alpha * (-4) + beta * (-4) +
(1 << (WARPEDDIFF_PREC_BITS - 1)) +
(WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
const int32_t const2 = gamma * (-4) + delta * (-4) +
(1 << (WARPEDDIFF_PREC_BITS - 1)) +
(WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
const int32_t const3 = ((1 << WARP_PARAM_REDUCE_BITS) - 1);
const int16_t const4 = (1 << (bd + FILTER_BITS - reduce_bits_horiz - 1));
const int16_t const5 = (1 << (FILTER_BITS - reduce_bits_horiz));
__m256i shuffle_src[4];
shuffle_src[0] = _mm256_load_si256((__m256i *)shuffle_src0);
shuffle_src[1] = _mm256_load_si256((__m256i *)shuffle_src1);
shuffle_src[2] = _mm256_load_si256((__m256i *)shuffle_src2);
shuffle_src[3] = _mm256_load_si256((__m256i *)shuffle_src3);
for (i = 0; i < p_height; i += 8) {
for (j = 0; j < p_width; j += 8) {
const int32_t src_x = (p_col + j + 4) << subsampling_x;
const int32_t src_y = (p_row + i + 4) << subsampling_y;
const int64_t dst_x =
(int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
const int64_t dst_y =
(int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
const int64_t x4 = dst_x >> subsampling_x;
const int64_t y4 = dst_y >> subsampling_y;
int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
// Add in all the constant terms, including rounding and offset
sx4 += const1;
sy4 += const2;
sx4 &= ~const3;
sy4 &= ~const3;
// Horizontal filter
// If the block is aligned such that, after clamping, every sample
// would be taken from the leftmost/rightmost column, then we can
// skip the expensive horizontal filter.
if (ix4 <= -7) {
int iy, row = 0;
for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
const __m256i temp_0 =
_mm256_set1_epi16(const4 + ref[iy * stride] * const5);
iy = iy4 + k + 1;
iy = clamp(iy, 0, height - 1);
const __m256i temp_1 =
_mm256_set1_epi16(const4 + ref[iy * stride] * const5);
horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
row += 1;
}
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
horz_out[row] = _mm256_set1_epi16(const4 + ref[iy * stride] * const5);
} else if (ix4 >= width + 6) {
int iy, row = 0;
for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
const __m256i temp_0 = _mm256_set1_epi16(
const4 + ref[iy * stride + (width - 1)] * const5);
iy = iy4 + k + 1;
iy = clamp(iy, 0, height - 1);
const __m256i temp_1 = _mm256_set1_epi16(
const4 + ref[iy * stride + (width - 1)] * const5);
horz_out[row] = _mm256_blend_epi32(temp_0, temp_1, 0xf0);
row += 1;
}
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
horz_out[row] =
_mm256_set1_epi16(const4 + ref[iy * stride + (width - 1)] * const5);
} else if (((ix4 - 7) < 0) || ((ix4 + 9) > width)) {
const int out_of_boundary_left = -(ix4 - 6);
const int out_of_boundary_right = (ix4 + 8) - width;
int iy, sx, row = 0;
for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
__m128i src0 =
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
iy = iy4 + k + 1;
iy = clamp(iy, 0, height - 1);
__m128i src1 =
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
if (out_of_boundary_left >= 0) {
const __m128i shuffle_reg_left =
_mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
src0 = _mm_shuffle_epi8(src0, shuffle_reg_left);
src1 = _mm_shuffle_epi8(src1, shuffle_reg_left);
}
if (out_of_boundary_right >= 0) {
const __m128i shuffle_reg_right = _mm_loadu_si128(
(__m128i *)warp_pad_right[out_of_boundary_right]);
src0 = _mm_shuffle_epi8(src0, shuffle_reg_right);
src1 = _mm_shuffle_epi8(src1, shuffle_reg_right);
}
sx = sx4 + beta * (k + 4);
const __m256i src_01 =
_mm256_inserti128_si256(_mm256_castsi128_si256(src0), src1, 0x1);
horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row,
shuffle_src, &round_const, &shift);
row += 1;
}
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
__m128i src = _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
if (out_of_boundary_left >= 0) {
const __m128i shuffle_reg_left =
_mm_loadu_si128((__m128i *)warp_pad_left[out_of_boundary_left]);
src = _mm_shuffle_epi8(src, shuffle_reg_left);
}
if (out_of_boundary_right >= 0) {
const __m128i shuffle_reg_right =
_mm_loadu_si128((__m128i *)warp_pad_right[out_of_boundary_right]);
src = _mm_shuffle_epi8(src, shuffle_reg_right);
}
sx = sx4 + beta * (k + 4);
const __m256i src_01 = _mm256_castsi128_si256(src);
__m256i coeff[4];
prepare_horizontal_filter_coeff(alpha, sx, coeff);
filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src,
&round_const, &shift, row);
} else {
prepare_warp_horizontal_filter_avx2(
ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height,
i, &round_const, &shift, shuffle_src);
}
// Vertical filter
prepare_warp_vertical_filter_avx2(
pred, horz_out, conv_params, gamma, delta, p_height, p_stride,
p_width, i, j, sy4, reduce_bits_vert, &res_add_const_1, round_bits,
&res_sub_const, &round_bits_const, &wt);
}
}
} | pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xa80, %rsp # imm = 0xA80
movw 0x68(%rbp), %ax
movw 0x60(%rbp), %ax
movw 0x58(%rbp), %ax
movw 0x50(%rbp), %ax
movq 0x48(%rbp), %rax
movl 0x40(%rbp), %eax
movl 0x38(%rbp), %eax
movl 0x30(%rbp), %eax
movl 0x28(%rbp), %eax
movl 0x20(%rbp), %eax
movl 0x18(%rbp), %eax
movl 0x10(%rbp), %eax
movq %rdi, 0x618(%rsp)
movq %rsi, 0x610(%rsp)
movl %edx, 0x60c(%rsp)
movl %ecx, 0x608(%rsp)
movl %r8d, 0x604(%rsp)
movq %r9, 0x5f8(%rsp)
movl $0x8, 0x4d0(%rsp)
movq 0x48(%rbp), %rax
movl 0x14(%rax), %eax
movl %eax, 0x4cc(%rsp)
movq 0x48(%rbp), %rax
cmpl $0x0, 0x20(%rax)
je 0x932362
movq 0x48(%rbp), %rax
movl 0x18(%rax), %eax
movl %eax, 0xbc(%rsp)
jmp 0x932375
movl $0xe, %eax
subl 0x4cc(%rsp), %eax
movl %eax, 0xbc(%rsp)
movl 0xbc(%rsp), %eax
movl %eax, 0x4c8(%rsp)
movl $0xe, 0x4c4(%rsp)
movl 0x4cc(%rsp), %ecx
movl $0x16, %eax
movl %eax, 0xb0(%rsp)
subl %ecx, %eax
movl %eax, 0x4c0(%rsp)
movb 0x4c8(%rsp), %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movl %edx, %ecx
sarl %ecx
movl %ecx, 0x62c(%rsp)
movl 0x62c(%rsp), %ecx
movl %ecx, 0xac(%rsp)
movl %ecx, 0x81c(%rsp)
movl %ecx, 0x818(%rsp)
movl %ecx, 0x814(%rsp)
movl %ecx, 0x810(%rsp)
movl %ecx, 0x80c(%rsp)
movl %ecx, 0x808(%rsp)
movl %ecx, 0x804(%rsp)
movl %ecx, 0x800(%rsp)
movl 0x804(%rsp), %r9d
movl 0x808(%rsp), %r8d
movl 0x80c(%rsp), %edi
movl 0x814(%rsp), %esi
movl 0x818(%rsp), %edx
movl 0x81c(%rsp), %ecx
vmovd 0x800(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x810(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x7f0(%rsp)
vmovdqa %xmm0, 0x7e0(%rsp)
vmovaps 0x7e0(%rsp), %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
movb 0x4c0(%rsp), %cl
movl %eax, %edx
shll %cl, %edx
movl 0xb0(%rsp), %ecx
movl %edx, 0x628(%rsp)
movl 0x628(%rsp), %edx
movl %edx, 0xb4(%rsp)
movl %edx, 0x85c(%rsp)
movl %edx, 0x858(%rsp)
movl %edx, 0x854(%rsp)
movl %edx, 0x850(%rsp)
movl %edx, 0x84c(%rsp)
movl %edx, 0x848(%rsp)
movl %edx, 0x844(%rsp)
movl %edx, 0x840(%rsp)
movl 0x844(%rsp), %r10d
movl 0x848(%rsp), %r9d
movl 0x84c(%rsp), %r8d
movl 0x854(%rsp), %edi
movl 0x858(%rsp), %esi
movl 0x85c(%rsp), %edx
vmovd 0x840(%rsp), %xmm0
vpinsrd $0x1, %r10d, %xmm0, %xmm0
vpinsrd $0x2, %r9d, %xmm0, %xmm0
vpinsrd $0x3, %r8d, %xmm0, %xmm0
vmovd 0x850(%rsp), %xmm1
vpinsrd $0x1, %edi, %xmm1, %xmm1
vpinsrd $0x2, %esi, %xmm1, %xmm1
vpinsrd $0x3, %edx, %xmm1, %xmm1
vmovdqa %xmm1, 0x830(%rsp)
vmovdqa %xmm0, 0x820(%rsp)
vmovaps 0x820(%rsp), %ymm0
vmovaps %ymm0, 0x480(%rsp)
movq 0x48(%rbp), %rdx
movl 0x14(%rdx), %esi
movl 0x18(%rdx), %edx
addl %edx, %esi
movl $0xe, %edx
subl %esi, %edx
movl %edx, 0x47c(%rsp)
movq 0x48(%rbp), %rdx
movl 0x14(%rdx), %edx
subl %edx, %ecx
movl %ecx, 0x478(%rsp)
movb 0x4cc(%rsp), %cl
shll %cl, %eax
shrl %eax
addl $0x4000, %eax # imm = 0x4000
movw %ax, 0x63e(%rsp)
movw 0x63e(%rsp), %ax
movw %ax, 0xba(%rsp)
movw %ax, 0x8de(%rsp)
movw %ax, 0x8dc(%rsp)
movw %ax, 0x8da(%rsp)
movw %ax, 0x8d8(%rsp)
movw %ax, 0x8d6(%rsp)
movw %ax, 0x8d4(%rsp)
movw %ax, 0x8d2(%rsp)
movw %ax, 0x8d0(%rsp)
movw %ax, 0x8ce(%rsp)
movw %ax, 0x8cc(%rsp)
movw %ax, 0x8ca(%rsp)
movw %ax, 0x8c8(%rsp)
movw %ax, 0x8c6(%rsp)
movw %ax, 0x8c4(%rsp)
movw %ax, 0x8c2(%rsp)
movw %ax, 0x8c0(%rsp)
movzwl 0x8c0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x8c2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x8c4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x8c6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x8c8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x8ca(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x8cc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x8ce(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0x8d0(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0x8d2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0x8d4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0x8d6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0x8d8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0x8da(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0x8dc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0x8de(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x8b0(%rsp)
vmovdqa %xmm0, 0x8a0(%rsp)
vmovaps 0x8a0(%rsp), %ymm0
vmovaps %ymm0, 0x440(%rsp)
movl 0x4cc(%rsp), %eax
movl %eax, 0x65c(%rsp)
vmovd 0x65c(%rsp), %xmm0
vmovdqa %xmm0, 0x640(%rsp)
vmovdqa 0x640(%rsp), %xmm0
vmovdqa %xmm0, 0x430(%rsp)
movq 0x48(%rbp), %rdi
movl 0x47c(%rsp), %esi
movl 0x478(%rsp), %edx
leaq 0x400(%rsp), %rcx
leaq 0x3e0(%rsp), %r8
leaq 0x3c0(%rsp), %r9
vzeroupper
callq 0x9341b0
movq 0x48(%rbp), %rax
cmpl $0x1, 0x20(%rax)
jne 0x9327f1
vmovdqa 0x4a0(%rsp), %ymm1
vmovdqa 0x480(%rsp), %ymm0
vmovdqa %ymm1, 0x680(%rsp)
vmovdqa %ymm0, 0x660(%rsp)
vmovdqa 0x680(%rsp), %ymm0
vmovdqa 0x660(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x3a0(%rsp)
jmp 0x9328f9
movb 0x4c8(%rsp), %cl
movb %cl, 0xa3(%rsp)
addb $0x7, %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movb 0xa3(%rsp), %cl
movl %edx, 0xa4(%rsp)
shll %cl, %eax
movl 0xa4(%rsp), %ecx
sarl %eax
subl %ecx, %eax
movl %eax, 0x624(%rsp)
movl 0x624(%rsp), %eax
movl %eax, 0xa8(%rsp)
movl %eax, 0x89c(%rsp)
movl %eax, 0x898(%rsp)
movl %eax, 0x894(%rsp)
movl %eax, 0x890(%rsp)
movl %eax, 0x88c(%rsp)
movl %eax, 0x888(%rsp)
movl %eax, 0x884(%rsp)
movl %eax, 0x880(%rsp)
movl 0x884(%rsp), %edx
movl 0x888(%rsp), %ecx
movl 0x88c(%rsp), %eax
movl 0x894(%rsp), %r8d
movl 0x898(%rsp), %edi
movl 0x89c(%rsp), %esi
vmovd 0x890(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0x880(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x860(%rsp)
vmovdqa 0x860(%rsp), %ymm0
vmovdqa %ymm0, 0x3a0(%rsp)
movswl 0x50(%rbp), %eax
imull $-0x4, %eax, %eax
movswl 0x58(%rbp), %ecx
imull $-0x4, %ecx, %ecx
addl %ecx, %eax
addl $0x200, %eax # imm = 0x200
addl $0x10000, %eax # imm = 0x10000
movl %eax, 0x39c(%rsp)
movswl 0x60(%rbp), %eax
imull $-0x4, %eax, %eax
movswl 0x68(%rbp), %ecx
imull $-0x4, %ecx, %ecx
addl %ecx, %eax
addl $0x200, %eax # imm = 0x200
addl $0x10000, %eax # imm = 0x10000
movl %eax, 0x398(%rsp)
movl $0x3f, 0x394(%rsp)
movl $0xf, %ecx
subl 0x4cc(%rsp), %ecx
subl $0x1, %ecx
movl $0x1, %eax
shll %cl, %eax
movw %ax, 0x392(%rsp)
movl $0x7, %ecx
subl 0x4cc(%rsp), %ecx
movl $0x1, %eax
shll %cl, %eax
movw %ax, 0x390(%rsp)
leaq 0x1e84fa(%rip), %rax # 0xb1ae80
movq %rax, 0x6c0(%rsp)
movq 0x6c0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x300(%rsp)
leaq 0x1e84f6(%rip), %rax # 0xb1aea0
movq %rax, 0x6b8(%rsp)
movq 0x6b8(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x320(%rsp)
leaq 0x1e84f2(%rip), %rax # 0xb1aec0
movq %rax, 0x6b0(%rsp)
movq 0x6b0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x340(%rsp)
leaq 0x1e84ee(%rip), %rax # 0xb1aee0
movq %rax, 0x6a8(%rsp)
movq 0x6a8(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm0, 0x360(%rsp)
movl $0x0, 0x4dc(%rsp)
movl 0x4dc(%rsp), %eax
cmpl 0x28(%rbp), %eax
jge 0x93419e
movl $0x0, 0x4d8(%rsp)
movl 0x4d8(%rsp), %eax
cmpl 0x20(%rbp), %eax
jge 0x934186
movl 0x10(%rbp), %eax
addl 0x4d8(%rsp), %eax
addl $0x4, %eax
movl 0x38(%rbp), %ecx
shll %cl, %eax
movl %eax, 0x2fc(%rsp)
movl 0x18(%rbp), %eax
addl 0x4dc(%rsp), %eax
addl $0x4, %eax
movl 0x40(%rbp), %ecx
shll %cl, %eax
movl %eax, 0x2f8(%rsp)
movq 0x618(%rsp), %rax
movslq 0x8(%rax), %rax
movslq 0x2fc(%rsp), %rcx
imulq %rcx, %rax
movq 0x618(%rsp), %rcx
movslq 0xc(%rcx), %rcx
movslq 0x2f8(%rsp), %rdx
imulq %rdx, %rcx
addq %rcx, %rax
movq 0x618(%rsp), %rcx
movslq (%rcx), %rcx
addq %rcx, %rax
movq %rax, 0x2f0(%rsp)
movq 0x618(%rsp), %rax
movslq 0x10(%rax), %rax
movslq 0x2fc(%rsp), %rcx
imulq %rcx, %rax
movq 0x618(%rsp), %rcx
movslq 0x14(%rcx), %rcx
movslq 0x2f8(%rsp), %rdx
imulq %rdx, %rcx
addq %rcx, %rax
movq 0x618(%rsp), %rcx
movslq 0x4(%rcx), %rcx
addq %rcx, %rax
movq %rax, 0x2e8(%rsp)
movq 0x2f0(%rsp), %rax
movl 0x38(%rbp), %ecx
sarq %cl, %rax
movq %rax, 0x2e0(%rsp)
movq 0x2e8(%rsp), %rax
movl 0x40(%rbp), %ecx
sarq %cl, %rax
movq %rax, 0x2d8(%rsp)
movq 0x2e0(%rsp), %rax
sarq $0x10, %rax
movl %eax, 0x2d4(%rsp)
movq 0x2e0(%rsp), %rax
andq $0xffff, %rax # imm = 0xFFFF
movl %eax, 0x2d0(%rsp)
movq 0x2d8(%rsp), %rax
sarq $0x10, %rax
movl %eax, 0x2cc(%rsp)
movq 0x2d8(%rsp), %rax
andq $0xffff, %rax # imm = 0xFFFF
movl %eax, 0x2c8(%rsp)
movl 0x39c(%rsp), %eax
addl 0x2d0(%rsp), %eax
movl %eax, 0x2d0(%rsp)
movl 0x398(%rsp), %eax
addl 0x2c8(%rsp), %eax
movl %eax, 0x2c8(%rsp)
movl 0x2d0(%rsp), %eax
andl $-0x40, %eax
movl %eax, 0x2d0(%rsp)
movl 0x2c8(%rsp), %eax
andl $-0x40, %eax
movl %eax, 0x2c8(%rsp)
cmpl $-0x7, 0x2d4(%rsp)
jg 0x9332b8
movl $0x0, 0x2c0(%rsp)
movl $0xfffffff9, 0x4d4(%rsp) # imm = 0xFFFFFFF9
movl 0x4d4(%rsp), %eax
movl %eax, 0x9c(%rsp)
movl 0x28(%rbp), %ecx
subl 0x4dc(%rsp), %ecx
movl $0x8, %eax
cmpl %ecx, %eax
jge 0x932c25
movl $0x8, %eax
movl %eax, 0x98(%rsp)
jmp 0x932c36
movl 0x28(%rbp), %eax
subl 0x4dc(%rsp), %eax
movl %eax, 0x98(%rsp)
movl 0x9c(%rsp), %eax
movl 0x98(%rsp), %ecx
subl $0x2, %ecx
cmpl %ecx, %eax
jg 0x9330a9
movl 0x2cc(%rsp), %eax
movl 0x4d4(%rsp), %ecx
addl %ecx, %eax
movl %eax, 0x2c4(%rsp)
movl 0x2c4(%rsp), %edi
movl 0x608(%rsp), %edx
decl %edx
xorl %esi, %esi
movl %esi, 0x90(%rsp)
vzeroupper
callq 0x9348a0
movl 0x90(%rsp), %esi
movl %eax, 0x2c4(%rsp)
movzwl 0x392(%rsp), %eax
movq 0x610(%rsp), %rcx
movl 0x2c4(%rsp), %edx
movl 0x604(%rsp), %edi
imull %edi, %edx
movslq %edx, %rdx
movzbl (%rcx,%rdx), %ecx
movzwl 0x390(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
movw %ax, 0x63c(%rsp)
movw 0x63c(%rsp), %ax
movw %ax, 0x94(%rsp)
movw %ax, 0x91e(%rsp)
movw %ax, 0x91c(%rsp)
movw %ax, 0x91a(%rsp)
movw %ax, 0x918(%rsp)
movw %ax, 0x916(%rsp)
movw %ax, 0x914(%rsp)
movw %ax, 0x912(%rsp)
movw %ax, 0x910(%rsp)
movw %ax, 0x90e(%rsp)
movw %ax, 0x90c(%rsp)
movw %ax, 0x90a(%rsp)
movw %ax, 0x908(%rsp)
movw %ax, 0x906(%rsp)
movw %ax, 0x904(%rsp)
movw %ax, 0x902(%rsp)
movw %ax, 0x900(%rsp)
movzwl 0x900(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x902(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x904(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x906(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x908(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x90a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x90c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x90e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0x910(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0x912(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0x914(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0x916(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0x918(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0x91a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0x91c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0x91e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x8f0(%rsp)
vmovdqa %xmm0, 0x8e0(%rsp)
vmovaps 0x8e0(%rsp), %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
movl 0x2cc(%rsp), %edx
movl 0x4d4(%rsp), %eax
movl %eax, %ecx
movl %edx, %eax
leal 0x1(%rax,%rcx), %eax
movl %eax, 0x2c4(%rsp)
movl 0x2c4(%rsp), %edi
movl 0x608(%rsp), %edx
decl %edx
vzeroupper
callq 0x9348a0
movl %eax, 0x2c4(%rsp)
movzwl 0x392(%rsp), %eax
movq 0x610(%rsp), %rcx
movl 0x2c4(%rsp), %edx
movl 0x604(%rsp), %esi
imull %esi, %edx
movslq %edx, %rdx
movzbl (%rcx,%rdx), %ecx
movzwl 0x390(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
movw %ax, 0x63a(%rsp)
movw 0x63a(%rsp), %ax
movw %ax, 0x96(%rsp)
movw %ax, 0x95e(%rsp)
movw %ax, 0x95c(%rsp)
movw %ax, 0x95a(%rsp)
movw %ax, 0x958(%rsp)
movw %ax, 0x956(%rsp)
movw %ax, 0x954(%rsp)
movw %ax, 0x952(%rsp)
movw %ax, 0x950(%rsp)
movw %ax, 0x94e(%rsp)
movw %ax, 0x94c(%rsp)
movw %ax, 0x94a(%rsp)
movw %ax, 0x948(%rsp)
movw %ax, 0x946(%rsp)
movw %ax, 0x944(%rsp)
movw %ax, 0x942(%rsp)
movw %ax, 0x940(%rsp)
movzwl 0x940(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x942(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x944(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x946(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x948(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x94a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x94c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x94e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0x950(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0x952(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0x954(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0x956(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0x958(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0x95a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0x95c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0x95e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x930(%rsp)
vmovdqa %xmm0, 0x920(%rsp)
vmovaps 0x920(%rsp), %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps 0x280(%rsp), %ymm1
vpblendd $0xf0, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
movslq 0x2c0(%rsp), %rcx
leaq 0x4e0(%rsp), %rax
shlq $0x5, %rcx
addq %rcx, %rax
vmovdqa %ymm0, (%rax)
movl 0x2c0(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x2c0(%rsp)
movl 0x4d4(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x4d4(%rsp)
jmp 0x932bf6
movl 0x2cc(%rsp), %eax
movl 0x4d4(%rsp), %ecx
addl %ecx, %eax
movl %eax, 0x2c4(%rsp)
movl 0x2c4(%rsp), %edi
movl 0x608(%rsp), %edx
decl %edx
xorl %esi, %esi
vzeroupper
callq 0x9348a0
movl %eax, 0x2c4(%rsp)
movzwl 0x392(%rsp), %eax
movq 0x610(%rsp), %rcx
movl 0x2c4(%rsp), %edx
movl 0x604(%rsp), %esi
imull %esi, %edx
movslq %edx, %rdx
movzbl (%rcx,%rdx), %ecx
movzwl 0x390(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
movw %ax, 0x638(%rsp)
movw 0x638(%rsp), %ax
movw %ax, 0x8e(%rsp)
movw %ax, 0x99e(%rsp)
movw %ax, 0x99c(%rsp)
movw %ax, 0x99a(%rsp)
movw %ax, 0x998(%rsp)
movw %ax, 0x996(%rsp)
movw %ax, 0x994(%rsp)
movw %ax, 0x992(%rsp)
movw %ax, 0x990(%rsp)
movw %ax, 0x98e(%rsp)
movw %ax, 0x98c(%rsp)
movw %ax, 0x98a(%rsp)
movw %ax, 0x988(%rsp)
movw %ax, 0x986(%rsp)
movw %ax, 0x984(%rsp)
movw %ax, 0x982(%rsp)
movw %ax, 0x980(%rsp)
movzwl 0x990(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x992(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x994(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x996(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x998(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x99a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x99c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x99e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm1
movzwl 0x980(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x982(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x984(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x986(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x988(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x98a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x98c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x98e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x960(%rsp)
vmovdqa 0x960(%rsp), %ymm0
movslq 0x2c0(%rsp), %rcx
leaq 0x4e0(%rsp), %rax
shlq $0x5, %rcx
addq %rcx, %rax
vmovdqa %ymm0, (%rax)
jmp 0x9340a0
movl 0x2d4(%rsp), %eax
movl 0x60c(%rsp), %ecx
addl $0x6, %ecx
cmpl %ecx, %eax
jl 0x9339d0
movl $0x0, 0x278(%rsp)
movl $0xfffffff9, 0x4d4(%rsp) # imm = 0xFFFFFFF9
movl 0x4d4(%rsp), %eax
movl %eax, 0x88(%rsp)
movl 0x28(%rbp), %ecx
subl 0x4dc(%rsp), %ecx
movl $0x8, %eax
cmpl %ecx, %eax
jge 0x933316
movl $0x8, %eax
movl %eax, 0x84(%rsp)
jmp 0x933327
movl 0x28(%rbp), %eax
subl 0x4dc(%rsp), %eax
movl %eax, 0x84(%rsp)
movl 0x88(%rsp), %eax
movl 0x84(%rsp), %ecx
subl $0x2, %ecx
cmpl %ecx, %eax
jg 0x9337b5
movl 0x2cc(%rsp), %eax
movl 0x4d4(%rsp), %ecx
addl %ecx, %eax
movl %eax, 0x27c(%rsp)
movl 0x27c(%rsp), %edi
movl 0x608(%rsp), %edx
decl %edx
xorl %esi, %esi
movl %esi, 0x7c(%rsp)
vzeroupper
callq 0x9348a0
movl 0x7c(%rsp), %esi
movl %eax, 0x27c(%rsp)
movzwl 0x392(%rsp), %eax
movq 0x610(%rsp), %rcx
movl 0x27c(%rsp), %r8d
movl 0x604(%rsp), %edx
imull %edx, %r8d
movl 0x60c(%rsp), %edi
movl %edi, %edx
movl %r8d, %edi
leal -0x1(%rdx,%rdi), %edx
movslq %edx, %rdx
movzbl (%rcx,%rdx), %ecx
movzwl 0x390(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
movw %ax, 0x636(%rsp)
movw 0x636(%rsp), %ax
movw %ax, 0x80(%rsp)
movw %ax, 0x9de(%rsp)
movw %ax, 0x9dc(%rsp)
movw %ax, 0x9da(%rsp)
movw %ax, 0x9d8(%rsp)
movw %ax, 0x9d6(%rsp)
movw %ax, 0x9d4(%rsp)
movw %ax, 0x9d2(%rsp)
movw %ax, 0x9d0(%rsp)
movw %ax, 0x9ce(%rsp)
movw %ax, 0x9cc(%rsp)
movw %ax, 0x9ca(%rsp)
movw %ax, 0x9c8(%rsp)
movw %ax, 0x9c6(%rsp)
movw %ax, 0x9c4(%rsp)
movw %ax, 0x9c2(%rsp)
movw %ax, 0x9c0(%rsp)
movzwl 0x9c0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x9c2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x9c4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x9c6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x9c8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x9ca(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x9cc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x9ce(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0x9d0(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0x9d2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0x9d4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0x9d6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0x9d8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0x9da(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0x9dc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0x9de(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x9b0(%rsp)
vmovdqa %xmm0, 0x9a0(%rsp)
vmovaps 0x9a0(%rsp), %ymm0
vmovaps %ymm0, 0x240(%rsp)
movl 0x2cc(%rsp), %edx
movl 0x4d4(%rsp), %eax
movl %eax, %ecx
movl %edx, %eax
leal 0x1(%rax,%rcx), %eax
movl %eax, 0x27c(%rsp)
movl 0x27c(%rsp), %edi
movl 0x608(%rsp), %edx
decl %edx
vzeroupper
callq 0x9348a0
movl %eax, 0x27c(%rsp)
movzwl 0x392(%rsp), %eax
movq 0x610(%rsp), %rcx
movl 0x27c(%rsp), %edi
movl 0x604(%rsp), %edx
imull %edx, %edi
movl 0x60c(%rsp), %esi
movl %esi, %edx
movl %edi, %esi
leal -0x1(%rdx,%rsi), %edx
movslq %edx, %rdx
movzbl (%rcx,%rdx), %ecx
movzwl 0x390(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
movw %ax, 0x634(%rsp)
movw 0x634(%rsp), %ax
movw %ax, 0x82(%rsp)
movw %ax, 0xa1e(%rsp)
movw %ax, 0xa1c(%rsp)
movw %ax, 0xa1a(%rsp)
movw %ax, 0xa18(%rsp)
movw %ax, 0xa16(%rsp)
movw %ax, 0xa14(%rsp)
movw %ax, 0xa12(%rsp)
movw %ax, 0xa10(%rsp)
movw %ax, 0xa0e(%rsp)
movw %ax, 0xa0c(%rsp)
movw %ax, 0xa0a(%rsp)
movw %ax, 0xa08(%rsp)
movw %ax, 0xa06(%rsp)
movw %ax, 0xa04(%rsp)
movw %ax, 0xa02(%rsp)
movw %ax, 0xa00(%rsp)
movzwl 0xa00(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0xa02(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0xa04(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0xa06(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0xa08(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0xa0a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0xa0c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0xa0e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0xa10(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0xa12(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0xa14(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0xa16(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0xa18(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0xa1a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0xa1c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0xa1e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x9f0(%rsp)
vmovdqa %xmm0, 0x9e0(%rsp)
vmovaps 0x9e0(%rsp), %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x240(%rsp), %ymm0
vmovaps 0x220(%rsp), %ymm1
vpblendd $0xf0, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
movslq 0x278(%rsp), %rcx
leaq 0x4e0(%rsp), %rax
shlq $0x5, %rcx
addq %rcx, %rax
vmovdqa %ymm0, (%rax)
movl 0x278(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x278(%rsp)
movl 0x4d4(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x4d4(%rsp)
jmp 0x9332e7
movl 0x2cc(%rsp), %eax
movl 0x4d4(%rsp), %ecx
addl %ecx, %eax
movl %eax, 0x27c(%rsp)
movl 0x27c(%rsp), %edi
movl 0x608(%rsp), %edx
decl %edx
xorl %esi, %esi
vzeroupper
callq 0x9348a0
movl %eax, 0x27c(%rsp)
movzwl 0x392(%rsp), %eax
movq 0x610(%rsp), %rcx
movl 0x27c(%rsp), %edi
movl 0x604(%rsp), %edx
imull %edx, %edi
movl 0x60c(%rsp), %esi
movl %esi, %edx
movl %edi, %esi
leal -0x1(%rdx,%rsi), %edx
movslq %edx, %rdx
movzbl (%rcx,%rdx), %ecx
movzwl 0x390(%rsp), %edx
imull %edx, %ecx
addl %ecx, %eax
movw %ax, 0x632(%rsp)
movw 0x632(%rsp), %ax
movw %ax, 0x7a(%rsp)
movw %ax, 0xa66(%rsp)
movw %ax, 0xa64(%rsp)
movw %ax, 0xa62(%rsp)
movw %ax, 0xa60(%rsp)
movw %ax, 0xa5e(%rsp)
movw %ax, 0xa5c(%rsp)
movw %ax, 0xa5a(%rsp)
movw %ax, 0xa58(%rsp)
movw %ax, 0xa56(%rsp)
movw %ax, 0xa54(%rsp)
movw %ax, 0xa52(%rsp)
movw %ax, 0xa50(%rsp)
movw %ax, 0xa4e(%rsp)
movw %ax, 0xa4c(%rsp)
movw %ax, 0xa4a(%rsp)
movw %ax, 0xa48(%rsp)
movzwl 0xa58(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0xa5a(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0xa5c(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0xa5e(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0xa60(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0xa62(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0xa64(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0xa66(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm1
movzwl 0xa48(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0xa4a(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0xa4c(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0xa4e(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0xa50(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0xa52(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0xa54(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0xa56(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xa20(%rsp)
vmovdqa 0xa20(%rsp), %ymm0
movslq 0x278(%rsp), %rcx
leaq 0x4e0(%rsp), %rax
shlq $0x5, %rcx
addq %rcx, %rax
vmovdqa %ymm0, (%rax)
jmp 0x93409e
movl 0x2d4(%rsp), %eax
subl $0x7, %eax
cmpl $0x0, %eax
jl 0x9339f6
movl 0x2d4(%rsp), %eax
addl $0x9, %eax
cmpl 0x60c(%rsp), %eax
jle 0x93400b
movl 0x2d4(%rsp), %ecx
subl $0x6, %ecx
xorl %eax, %eax
subl %ecx, %eax
movl %eax, 0x21c(%rsp)
movl 0x2d4(%rsp), %eax
addl $0x8, %eax
subl 0x60c(%rsp), %eax
movl %eax, 0x218(%rsp)
movl $0x0, 0x20c(%rsp)
movl $0xfffffff9, 0x4d4(%rsp) # imm = 0xFFFFFFF9
movl 0x4d4(%rsp), %eax
movl %eax, 0x74(%rsp)
movl 0x28(%rbp), %ecx
subl 0x4dc(%rsp), %ecx
movl $0x8, %eax
cmpl %ecx, %eax
jge 0x933a62
movl $0x8, %eax
movl %eax, 0x70(%rsp)
jmp 0x933a70
movl 0x28(%rbp), %eax
subl 0x4dc(%rsp), %eax
movl %eax, 0x70(%rsp)
movl 0x74(%rsp), %eax
movl 0x70(%rsp), %ecx
subl $0x2, %ecx
cmpl %ecx, %eax
jg 0x933de2
movl 0x2cc(%rsp), %eax
addl 0x4d4(%rsp), %eax
movl %eax, 0x214(%rsp)
movl 0x214(%rsp), %edi
movl 0x608(%rsp), %edx
subl $0x1, %edx
xorl %esi, %esi
vzeroupper
callq 0x9348a0
movl %eax, 0x214(%rsp)
movq 0x610(%rsp), %rax
movl 0x214(%rsp), %ecx
imull 0x604(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movslq 0x2d4(%rsp), %rcx
addq %rcx, %rax
addq $-0x7, %rax
movq %rax, 0x6f8(%rsp)
movq 0x6f8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1f0(%rsp)
movl 0x2cc(%rsp), %eax
addl 0x4d4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x214(%rsp)
movl 0x214(%rsp), %edi
movl 0x608(%rsp), %edx
subl $0x1, %edx
xorl %esi, %esi
callq 0x9348a0
movl %eax, 0x214(%rsp)
movq 0x610(%rsp), %rax
movl 0x214(%rsp), %ecx
imull 0x604(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movslq 0x2d4(%rsp), %rcx
addq %rcx, %rax
addq $-0x7, %rax
movq %rax, 0x6f0(%rsp)
movq 0x6f0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
cmpl $0x0, 0x21c(%rsp)
jl 0x933c4c
movslq 0x21c(%rsp), %rcx
leaq 0x1e7360(%rip), %rax # 0xb1af00
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x6e8(%rsp)
movq 0x6e8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1d0(%rsp)
vmovdqa 0x1f0(%rsp), %xmm1
vmovdqa 0x1d0(%rsp), %xmm0
vmovdqa %xmm1, 0x7b0(%rsp)
vmovdqa %xmm0, 0x7a0(%rsp)
vmovdqa 0x7b0(%rsp), %xmm0
vmovdqa 0x7a0(%rsp), %xmm1
vpshufb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1f0(%rsp)
vmovdqa 0x1e0(%rsp), %xmm1
vmovdqa 0x1d0(%rsp), %xmm0
vmovdqa %xmm1, 0x790(%rsp)
vmovdqa %xmm0, 0x780(%rsp)
vmovdqa 0x790(%rsp), %xmm0
vmovdqa 0x780(%rsp), %xmm1
vpshufb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
cmpl $0x0, 0x218(%rsp)
jl 0x933d15
movslq 0x218(%rsp), %rcx
leaq 0x1e7377(%rip), %rax # 0xb1afe0
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x6e0(%rsp)
movq 0x6e0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1c0(%rsp)
vmovdqa 0x1f0(%rsp), %xmm1
vmovdqa 0x1c0(%rsp), %xmm0
vmovdqa %xmm1, 0x770(%rsp)
vmovdqa %xmm0, 0x760(%rsp)
vmovdqa 0x770(%rsp), %xmm0
vmovdqa 0x760(%rsp), %xmm1
vpshufb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1f0(%rsp)
vmovdqa 0x1e0(%rsp), %xmm1
vmovdqa 0x1c0(%rsp), %xmm0
vmovdqa %xmm1, 0x750(%rsp)
vmovdqa %xmm0, 0x740(%rsp)
vmovdqa 0x750(%rsp), %xmm0
vmovdqa 0x740(%rsp), %xmm1
vpshufb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
movl 0x2d0(%rsp), %eax
movswl 0x58(%rbp), %ecx
movl 0x4d4(%rsp), %edx
addl $0x4, %edx
imull %edx, %ecx
addl %ecx, %eax
movl %eax, 0x210(%rsp)
vmovdqa 0x1f0(%rsp), %xmm0
vmovdqa %xmm0, 0x7d0(%rsp)
vmovdqa 0x7d0(%rsp), %xmm2
vmovdqa 0x1e0(%rsp), %xmm1
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1a0(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
leaq 0x4e0(%rsp), %rdi
movl 0x210(%rsp), %esi
movswl 0x50(%rbp), %edx
movswl 0x58(%rbp), %ecx
movl 0x20c(%rsp), %r8d
leaq 0x300(%rsp), %r9
leaq 0x440(%rsp), %r10
leaq 0x430(%rsp), %rax
movq %r10, (%rsp)
movq %rax, 0x8(%rsp)
callq 0x9348f0
movl 0x20c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x20c(%rsp)
movl 0x4d4(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x4d4(%rsp)
jmp 0x933a39
movl 0x2cc(%rsp), %eax
addl 0x4d4(%rsp), %eax
movl %eax, 0x214(%rsp)
movl 0x214(%rsp), %edi
movl 0x608(%rsp), %edx
subl $0x1, %edx
xorl %esi, %esi
vzeroupper
callq 0x9348a0
movl %eax, 0x214(%rsp)
movq 0x610(%rsp), %rax
movl 0x214(%rsp), %ecx
imull 0x604(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movslq 0x2d4(%rsp), %rcx
addq %rcx, %rax
addq $-0x7, %rax
movq %rax, 0x6d8(%rsp)
movq 0x6d8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x190(%rsp)
cmpl $0x0, 0x21c(%rsp)
jl 0x933ee3
movslq 0x21c(%rsp), %rcx
leaq 0x1e7085(%rip), %rax # 0xb1af00
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x6d0(%rsp)
movq 0x6d0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x180(%rsp)
vmovdqa 0x190(%rsp), %xmm1
vmovdqa 0x180(%rsp), %xmm0
vmovdqa %xmm1, 0x730(%rsp)
vmovdqa %xmm0, 0x720(%rsp)
vmovdqa 0x730(%rsp), %xmm0
vmovdqa 0x720(%rsp), %xmm1
vpshufb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x190(%rsp)
cmpl $0x0, 0x218(%rsp)
jl 0x933f64
movslq 0x218(%rsp), %rcx
leaq 0x1e70e4(%rip), %rax # 0xb1afe0
shlq $0x4, %rcx
addq %rcx, %rax
movq %rax, 0x6c8(%rsp)
movq 0x6c8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x170(%rsp)
vmovdqa 0x190(%rsp), %xmm1
vmovdqa 0x170(%rsp), %xmm0
vmovdqa %xmm1, 0x710(%rsp)
vmovdqa %xmm0, 0x700(%rsp)
vmovdqa 0x710(%rsp), %xmm0
vmovdqa 0x700(%rsp), %xmm1
vpshufb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x190(%rsp)
movl 0x2d0(%rsp), %eax
movswl 0x58(%rbp), %ecx
movl 0x4d4(%rsp), %edx
addl $0x4, %edx
imull %edx, %ecx
addl %ecx, %eax
movl %eax, 0x210(%rsp)
vmovdqa 0x190(%rsp), %xmm0
vmovdqa %xmm0, 0x7c0(%rsp)
vmovdqa 0x7c0(%rsp), %xmm1
vmovaps %xmm1, %xmm0
vmovdqa %ymm0, 0x140(%rsp)
movswl 0x50(%rbp), %edi
movl 0x210(%rsp), %esi
leaq 0xc0(%rsp), %rdx
vzeroupper
callq 0x9349a0
vmovdqa 0x140(%rsp), %ymm0
leaq 0x4e0(%rsp), %rdi
leaq 0xc0(%rsp), %rsi
leaq 0x300(%rsp), %rdx
movl 0x20c(%rsp), %r9d
leaq 0x440(%rsp), %rcx
leaq 0x430(%rsp), %r8
callq 0x934f50
jmp 0x93409c
movq 0x610(%rsp), %rdi
leaq 0x4e0(%rsp), %rsi
movl 0x604(%rsp), %edx
movl 0x2d4(%rsp), %ecx
movl 0x2cc(%rsp), %r8d
movl 0x2d0(%rsp), %r9d
movswl 0x50(%rbp), %r13d
movswl 0x58(%rbp), %r12d
movl 0x28(%rbp), %r15d
movl 0x608(%rsp), %r14d
movl 0x4dc(%rsp), %ebx
leaq 0x300(%rsp), %rax
leaq 0x440(%rsp), %r11
leaq 0x430(%rsp), %r10
movl %r13d, (%rsp)
movl %r12d, 0x8(%rsp)
movl %r15d, 0x10(%rsp)
movl %r14d, 0x18(%rsp)
movl %ebx, 0x20(%rsp)
movq %r11, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movq %rax, 0x38(%rsp)
vzeroupper
callq 0x935320
jmp 0x93409e
jmp 0x9340a0
movq 0x5f8(%rsp), %rdi
leaq 0x4e0(%rsp), %rsi
movq 0x48(%rbp), %rdx
movw 0x60(%rbp), %cx
movw 0x68(%rbp), %r8w
movl 0x28(%rbp), %r9d
movl 0x30(%rbp), %eax
movl %eax, 0x5c(%rsp)
movl 0x20(%rbp), %eax
movl %eax, 0x60(%rsp)
movl 0x4dc(%rsp), %eax
movl %eax, 0x64(%rsp)
movl 0x4d8(%rsp), %r13d
movl 0x2c8(%rsp), %r12d
movl 0x4c8(%rsp), %r15d
movl 0x47c(%rsp), %ebx
leaq 0x3a0(%rsp), %r14
leaq 0x400(%rsp), %r11
leaq 0x3e0(%rsp), %r10
leaq 0x3c0(%rsp), %rax
movq %rax, 0x68(%rsp)
movl 0x5c(%rsp), %eax
movswl %cx, %ecx
movswl %r8w, %r8d
movl %eax, (%rsp)
movl 0x60(%rsp), %eax
movl %eax, 0x8(%rsp)
movl 0x64(%rsp), %eax
movl %eax, 0x10(%rsp)
movq 0x68(%rsp), %rax
movl %r13d, 0x18(%rsp)
movl %r12d, 0x20(%rsp)
movl %r15d, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movl %ebx, 0x38(%rsp)
movq %r11, 0x40(%rsp)
movq %r10, 0x48(%rsp)
movq %rax, 0x50(%rsp)
vzeroupper
callq 0x935610
movl 0x4d8(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x4d8(%rsp)
jmp 0x932a35
jmp 0x934188
movl 0x4dc(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x4dc(%rsp)
jmp 0x932a1a
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
| /m-ab-s[P]aom/av1/common/x86/warp_plane_avx2.c |
prepare_horizontal_filter_coeff | static inline void prepare_horizontal_filter_coeff(int alpha, int sx,
__m256i *coeff) {
const __m128i tmp_0 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_1 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_2 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_3 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_4 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_5 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_6 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_7 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2);
const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3);
const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6);
const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7);
const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10);
const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10);
const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11);
const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11);
coeff[0] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_12, tmp_14));
coeff[1] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_12, tmp_14));
coeff[2] = _mm256_castsi128_si256(_mm_unpacklo_epi64(tmp_13, tmp_15));
coeff[3] = _mm256_castsi128_si256(_mm_unpackhi_epi64(tmp_13, tmp_15));
} | subq $0x358, %rsp # imm = 0x358
movl %edi, 0x8c(%rsp)
movl %esi, 0x88(%rsp)
movq %rdx, 0x80(%rsp)
movl 0x88(%rsp), %eax
sarl $0xa, %eax
movslq %eax, %rcx
leaq 0x1e4b7f(%rip), %rax # 0xb19550
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x1c8(%rsp)
movq 0x1c8(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0x1b0(%rsp)
vmovdqa 0x1b0(%rsp), %xmm0
vmovdqa %xmm0, 0x70(%rsp)
movl 0x88(%rsp), %ecx
movl 0x8c(%rsp), %edx
addl %edx, %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x1a8(%rsp)
movq 0x1a8(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0x190(%rsp)
vmovdqa 0x190(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
movl 0x88(%rsp), %esi
movl 0x8c(%rsp), %ecx
movl %ecx, %edx
movl %esi, %ecx
leal (%rcx,%rdx,2), %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x188(%rsp)
movq 0x188(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0x170(%rsp)
vmovdqa 0x170(%rsp), %xmm0
vmovdqa %xmm0, 0x50(%rsp)
movl 0x88(%rsp), %ecx
movl 0x8c(%rsp), %esi
movl %esi, %edx
leal (%rdx,%rdx,2), %edx
addl %edx, %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x168(%rsp)
movq 0x168(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0x150(%rsp)
vmovdqa 0x150(%rsp), %xmm0
vmovdqa %xmm0, 0x40(%rsp)
movl 0x88(%rsp), %esi
movl 0x8c(%rsp), %ecx
movl %ecx, %edx
movl %esi, %ecx
leal (%rcx,%rdx,4), %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x148(%rsp)
movq 0x148(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0x130(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x30(%rsp)
movl 0x88(%rsp), %ecx
movl 0x8c(%rsp), %esi
movl %esi, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x128(%rsp)
movq 0x128(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0x110(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
movl 0x88(%rsp), %edx
movl 0x8c(%rsp), %esi
movl %esi, %ecx
leal (%rcx,%rcx,2), %esi
movl %edx, %ecx
movl %esi, %edx
leal (%rcx,%rdx,2), %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0x108(%rsp)
movq 0x108(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0xf0(%rsp)
vmovdqa 0xf0(%rsp), %xmm0
vmovdqa %xmm0, 0x10(%rsp)
movl 0x88(%rsp), %ecx
movl 0x8c(%rsp), %esi
movl %esi, %edx
shll $0x3, %edx
subl %esi, %edx
addl %edx, %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rax
movq %rax, 0xe8(%rsp)
movq 0xe8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0xd0(%rsp)
vmovdqa 0xd0(%rsp), %xmm0
vmovdqa %xmm0, (%rsp)
vmovdqa 0x70(%rsp), %xmm1
vmovdqa 0x50(%rsp), %xmm0
vmovdqa %xmm1, 0x240(%rsp)
vmovdqa %xmm0, 0x230(%rsp)
vmovdqa 0x240(%rsp), %xmm0
vmovdqa 0x230(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, -0x10(%rsp)
vmovdqa 0x60(%rsp), %xmm1
vmovdqa 0x40(%rsp), %xmm0
vmovdqa %xmm1, 0x220(%rsp)
vmovdqa %xmm0, 0x210(%rsp)
vmovdqa 0x220(%rsp), %xmm0
vmovdqa 0x210(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, -0x20(%rsp)
vmovdqa 0x30(%rsp), %xmm1
vmovdqa 0x10(%rsp), %xmm0
vmovdqa %xmm1, 0x200(%rsp)
vmovdqa %xmm0, 0x1f0(%rsp)
vmovdqa 0x200(%rsp), %xmm0
vmovdqa 0x1f0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, -0x30(%rsp)
vmovdqa 0x20(%rsp), %xmm1
vmovdqa (%rsp), %xmm0
vmovdqa %xmm1, 0x1e0(%rsp)
vmovdqa %xmm0, 0x1d0(%rsp)
vmovdqa 0x1e0(%rsp), %xmm0
vmovdqa 0x1d0(%rsp), %xmm1
vpunpcklwd %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, -0x40(%rsp)
vmovdqa -0x10(%rsp), %xmm1
vmovdqa -0x30(%rsp), %xmm0
vmovdqa %xmm1, 0x280(%rsp)
vmovdqa %xmm0, 0x270(%rsp)
vmovdqa 0x280(%rsp), %xmm0
vmovdqa 0x270(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, -0x50(%rsp)
vmovdqa -0x10(%rsp), %xmm1
vmovdqa -0x30(%rsp), %xmm0
vmovdqa %xmm1, 0x2c0(%rsp)
vmovdqa %xmm0, 0x2b0(%rsp)
vmovdqa 0x2c0(%rsp), %xmm0
vmovdqa 0x2b0(%rsp), %xmm1
vpunpckhdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, -0x60(%rsp)
vmovdqa -0x20(%rsp), %xmm1
vmovdqa -0x40(%rsp), %xmm0
vmovdqa %xmm1, 0x260(%rsp)
vmovdqa %xmm0, 0x250(%rsp)
vmovdqa 0x260(%rsp), %xmm0
vmovdqa 0x250(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, -0x70(%rsp)
vmovdqa -0x20(%rsp), %xmm1
vmovdqa -0x40(%rsp), %xmm0
vmovdqa %xmm1, 0x2a0(%rsp)
vmovdqa %xmm0, 0x290(%rsp)
vmovdqa 0x2a0(%rsp), %xmm0
vmovdqa 0x290(%rsp), %xmm1
vpunpckhdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, -0x80(%rsp)
vmovdqa -0x50(%rsp), %xmm1
vmovdqa -0x70(%rsp), %xmm0
vmovdqa %xmm1, 0x300(%rsp)
vmovdqa %xmm0, 0x2f0(%rsp)
vmovdqa 0x300(%rsp), %xmm0
vmovdqa 0x2f0(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0xc0(%rsp)
vmovdqa 0xc0(%rsp), %xmm0
movq 0x80(%rsp), %rax
vmovdqa %xmm1, 0x10(%rax)
vmovdqa %xmm0, (%rax)
vmovdqa -0x50(%rsp), %xmm2
vmovdqa -0x70(%rsp), %xmm0
vmovdqa %xmm2, 0x340(%rsp)
vmovdqa %xmm0, 0x330(%rsp)
vmovdqa 0x340(%rsp), %xmm0
vmovdqa 0x330(%rsp), %xmm2
vpunpckhqdq %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm2[1]
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xb0(%rsp), %xmm0
movq 0x80(%rsp), %rax
vmovdqa %xmm1, 0x30(%rax)
vmovdqa %xmm0, 0x20(%rax)
vmovdqa -0x60(%rsp), %xmm2
vmovdqa -0x80(%rsp), %xmm0
vmovdqa %xmm2, 0x2e0(%rsp)
vmovdqa %xmm0, 0x2d0(%rsp)
vmovdqa 0x2e0(%rsp), %xmm0
vmovdqa 0x2d0(%rsp), %xmm2
vpunpcklqdq %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovdqa %xmm0, 0xa0(%rsp)
vmovdqa 0xa0(%rsp), %xmm0
movq 0x80(%rsp), %rax
vmovdqa %xmm1, 0x50(%rax)
vmovdqa %xmm0, 0x40(%rax)
vmovdqa -0x60(%rsp), %xmm1
vmovdqa -0x80(%rsp), %xmm0
vmovdqa %xmm1, 0x320(%rsp)
vmovdqa %xmm0, 0x310(%rsp)
vmovdqa 0x320(%rsp), %xmm0
vmovdqa 0x310(%rsp), %xmm1
vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x90(%rsp)
vmovdqa 0x90(%rsp), %xmm0
vmovdqa %xmm0, %xmm0
movq 0x80(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
addq $0x358, %rsp # imm = 0x358
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/warp_plane_avx2.c |
prepare_warp_horizontal_filter_avx2 | static inline void prepare_warp_horizontal_filter_avx2(
const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
int32_t sx4, int alpha, int beta, int p_height, int height, int i,
const __m256i *round_const, const __m128i *shift,
const __m256i *shuffle_src) {
if (alpha == 0 && beta == 0)
warp_horizontal_filter_alpha0_beta0_avx2(
ref, horz_out, stride, ix4, iy4, sx4, alpha, beta, p_height, height, i,
round_const, shift, shuffle_src);
else if (alpha == 0 && beta != 0)
warp_horizontal_filter_alpha0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
alpha, beta, p_height, height, i,
round_const, shift, shuffle_src);
else if (alpha != 0 && beta == 0)
warp_horizontal_filter_beta0_avx2(ref, horz_out, stride, ix4, iy4, sx4,
alpha, beta, p_height, height, i,
round_const, shift, shuffle_src);
else
warp_horizontal_filter_avx2(ref, horz_out, stride, ix4, iy4, sx4, alpha,
beta, p_height, height, i, round_const, shift,
shuffle_src);
} | pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x60, %rsp
movq 0xc8(%rsp), %rax
movq 0xc0(%rsp), %rax
movq 0xb8(%rsp), %rax
movl 0xb0(%rsp), %eax
movl 0xa8(%rsp), %eax
movl 0xa0(%rsp), %eax
movl 0x98(%rsp), %eax
movl 0x90(%rsp), %eax
movq %rdi, 0x58(%rsp)
movq %rsi, 0x50(%rsp)
movl %edx, 0x4c(%rsp)
movl %ecx, 0x48(%rsp)
movl %r8d, 0x44(%rsp)
movl %r9d, 0x40(%rsp)
cmpl $0x0, 0x90(%rsp)
jne 0x935428
cmpl $0x0, 0x98(%rsp)
jne 0x935428
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
movl 0x4c(%rsp), %edx
movl 0x48(%rsp), %ecx
movl 0x44(%rsp), %r8d
movl 0x40(%rsp), %r9d
movl 0x90(%rsp), %r12d
movl 0x98(%rsp), %r15d
movl 0xa0(%rsp), %r14d
movl 0xa8(%rsp), %ebp
movl 0xb0(%rsp), %ebx
movq 0xb8(%rsp), %r11
movq 0xc0(%rsp), %r10
movq 0xc8(%rsp), %rax
movl %r12d, (%rsp)
movl %r15d, 0x8(%rsp)
movl %r14d, 0x10(%rsp)
movl %ebp, 0x18(%rsp)
movl %ebx, 0x20(%rsp)
movq %r11, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movq %rax, 0x38(%rsp)
callq 0x936570
jmp 0x9355fa
cmpl $0x0, 0x90(%rsp)
jne 0x9354cd
cmpl $0x0, 0x98(%rsp)
je 0x9354cd
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
movl 0x4c(%rsp), %edx
movl 0x48(%rsp), %ecx
movl 0x44(%rsp), %r8d
movl 0x40(%rsp), %r9d
movl 0x90(%rsp), %r12d
movl 0x98(%rsp), %r15d
movl 0xa0(%rsp), %r14d
movl 0xa8(%rsp), %ebp
movl 0xb0(%rsp), %ebx
movq 0xb8(%rsp), %r11
movq 0xc0(%rsp), %r10
movq 0xc8(%rsp), %rax
movl %r12d, (%rsp)
movl %r15d, 0x8(%rsp)
movl %r14d, 0x10(%rsp)
movl %ebp, 0x18(%rsp)
movl %ebx, 0x20(%rsp)
movq %r11, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movq %rax, 0x38(%rsp)
callq 0x936880
jmp 0x9355f8
cmpl $0x0, 0x90(%rsp)
je 0x935572
cmpl $0x0, 0x98(%rsp)
jne 0x935572
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
movl 0x4c(%rsp), %edx
movl 0x48(%rsp), %ecx
movl 0x44(%rsp), %r8d
movl 0x40(%rsp), %r9d
movl 0x90(%rsp), %r12d
movl 0x98(%rsp), %r15d
movl 0xa0(%rsp), %r14d
movl 0xa8(%rsp), %ebp
movl 0xb0(%rsp), %ebx
movq 0xb8(%rsp), %r11
movq 0xc0(%rsp), %r10
movq 0xc8(%rsp), %rax
movl %r12d, (%rsp)
movl %r15d, 0x8(%rsp)
movl %r14d, 0x10(%rsp)
movl %ebp, 0x18(%rsp)
movl %ebx, 0x20(%rsp)
movq %r11, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movq %rax, 0x38(%rsp)
callq 0x936bf0
jmp 0x9355f6
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
movl 0x4c(%rsp), %edx
movl 0x48(%rsp), %ecx
movl 0x44(%rsp), %r8d
movl 0x40(%rsp), %r9d
movl 0x90(%rsp), %r12d
movl 0x98(%rsp), %r15d
movl 0xa0(%rsp), %r14d
movl 0xa8(%rsp), %ebp
movl 0xb0(%rsp), %ebx
movq 0xb8(%rsp), %r11
movq 0xc0(%rsp), %r10
movq 0xc8(%rsp), %rax
movl %r12d, (%rsp)
movl %r15d, 0x8(%rsp)
movl %r14d, 0x10(%rsp)
movl %ebp, 0x18(%rsp)
movl %ebx, 0x20(%rsp)
movq %r11, 0x28(%rsp)
movq %r10, 0x30(%rsp)
movq %rax, 0x38(%rsp)
callq 0x936f00
jmp 0x9355f8
jmp 0x9355fa
addq $0x60, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/warp_plane_avx2.c |
warp_horizontal_filter_avx2 | static inline void warp_horizontal_filter_avx2(
const uint8_t *ref, __m256i *horz_out, int stride, int32_t ix4, int32_t iy4,
int32_t sx4, int alpha, int beta, int p_height, int height, int i,
const __m256i *round_const, const __m128i *shift,
const __m256i *shuffle_src) {
int k, iy, sx, row = 0;
__m256i coeff[4];
for (k = -7; k <= (AOMMIN(8, p_height - i) - 2); k += 2) {
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
const __m128i src_0 =
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
iy = iy4 + k + 1;
iy = clamp(iy, 0, height - 1);
const __m128i src_1 =
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
const __m256i src_01 =
_mm256_inserti128_si256(_mm256_castsi128_si256(src_0), src_1, 0x1);
sx = sx4 + beta * (k + 4);
horizontal_filter_avx2(src_01, horz_out, sx, alpha, beta, row, shuffle_src,
round_const, shift);
row += 1;
}
iy = iy4 + k;
iy = clamp(iy, 0, height - 1);
const __m256i src_01 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7)));
sx = sx4 + beta * (k + 4);
prepare_horizontal_filter_coeff(alpha, sx, coeff);
filter_src_pixels_avx2(src_01, horz_out, coeff, shuffle_src, round_const,
shift, row);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x180, %rsp # imm = 0x180
movq 0x48(%rbp), %rax
movq 0x40(%rbp), %rax
movq 0x38(%rbp), %rax
movl 0x30(%rbp), %eax
movl 0x28(%rbp), %eax
movl 0x20(%rbp), %eax
movl 0x18(%rbp), %eax
movl 0x10(%rbp), %eax
movq %rdi, 0x130(%rsp)
movq %rsi, 0x128(%rsp)
movl %edx, 0x124(%rsp)
movl %ecx, 0x120(%rsp)
movl %r8d, 0x11c(%rsp)
movl %r9d, 0x118(%rsp)
movl $0x0, 0x108(%rsp)
movl $0xfffffff9, 0x114(%rsp) # imm = 0xFFFFFFF9
movl 0x114(%rsp), %eax
movl %eax, 0x1c(%rsp)
movl 0x20(%rbp), %ecx
subl 0x30(%rbp), %ecx
movl $0x8, %eax
cmpl %ecx, %eax
jge 0x936f93
movl $0x8, %eax
movl %eax, 0x18(%rsp)
jmp 0x936f9d
movl 0x20(%rbp), %eax
subl 0x30(%rbp), %eax
movl %eax, 0x18(%rsp)
movl 0x1c(%rsp), %eax
movl 0x18(%rsp), %ecx
subl $0x2, %ecx
cmpl %ecx, %eax
jg 0x93715d
movl 0x11c(%rsp), %eax
movl 0x114(%rsp), %ecx
addl %ecx, %eax
movl %eax, 0x110(%rsp)
movl 0x110(%rsp), %edi
movl 0x28(%rbp), %edx
decl %edx
xorl %esi, %esi
movl %esi, 0x14(%rsp)
vzeroupper
callq 0x9348a0
movl 0x14(%rsp), %esi
movl %eax, 0x110(%rsp)
movq 0x130(%rsp), %rcx
movl 0x110(%rsp), %eax
movl 0x124(%rsp), %edx
imull %edx, %eax
cltq
addq %rax, %rcx
movslq 0x120(%rsp), %rax
leaq -0x7(%rax,%rcx), %rax
movq %rax, 0x148(%rsp)
movq 0x148(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x70(%rsp)
movl 0x11c(%rsp), %edx
movl 0x114(%rsp), %eax
movl %eax, %ecx
movl %edx, %eax
leal 0x1(%rax,%rcx), %eax
movl %eax, 0x110(%rsp)
movl 0x110(%rsp), %edi
movl 0x28(%rbp), %edx
decl %edx
callq 0x9348a0
movl %eax, 0x110(%rsp)
movq 0x130(%rsp), %rcx
movl 0x110(%rsp), %eax
movl 0x124(%rsp), %edx
imull %edx, %eax
cltq
addq %rax, %rcx
movslq 0x120(%rsp), %rax
leaq -0x7(%rax,%rcx), %rax
movq %rax, 0x140(%rsp)
movq 0x140(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
vmovdqa 0x70(%rsp), %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovdqa 0x160(%rsp), %xmm2
vmovdqa 0x60(%rsp), %xmm1
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
movl 0x118(%rsp), %eax
movl 0x18(%rbp), %ecx
movl 0x114(%rsp), %edx
addl $0x4, %edx
imull %edx, %ecx
addl %ecx, %eax
movl %eax, 0x10c(%rsp)
vmovdqa 0x40(%rsp), %ymm0
movq 0x128(%rsp), %rdi
movl 0x10c(%rsp), %esi
movl 0x10(%rbp), %edx
movl 0x18(%rbp), %ecx
movl 0x108(%rsp), %r8d
movq 0x48(%rbp), %r9
movq 0x38(%rbp), %r10
movq 0x40(%rbp), %rax
movq %r10, (%rsp)
movq %rax, 0x8(%rsp)
callq 0x9348f0
movl 0x108(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x108(%rsp)
movl 0x114(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x114(%rsp)
jmp 0x936f6e
movl 0x11c(%rsp), %eax
movl 0x114(%rsp), %ecx
addl %ecx, %eax
movl %eax, 0x110(%rsp)
movl 0x110(%rsp), %edi
movl 0x28(%rbp), %edx
decl %edx
xorl %esi, %esi
vzeroupper
callq 0x9348a0
movl %eax, 0x110(%rsp)
movq 0x130(%rsp), %rcx
movl 0x110(%rsp), %eax
movl 0x124(%rsp), %edx
imull %edx, %eax
cltq
addq %rax, %rcx
movslq 0x120(%rsp), %rax
leaq -0x7(%rax,%rcx), %rax
movq %rax, 0x138(%rsp)
movq 0x138(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x150(%rsp)
vmovdqa 0x150(%rsp), %xmm1
vmovaps %xmm1, %xmm0
vmovdqa %ymm0, 0x20(%rsp)
movl 0x118(%rsp), %eax
movl 0x18(%rbp), %ecx
movl 0x114(%rsp), %edx
addl $0x4, %edx
imull %edx, %ecx
addl %ecx, %eax
movl %eax, 0x10c(%rsp)
movl 0x10(%rbp), %edi
movl 0x10c(%rsp), %esi
leaq 0x80(%rsp), %rdx
vzeroupper
callq 0x9349a0
vmovdqa 0x20(%rsp), %ymm0
movq 0x128(%rsp), %rdi
leaq 0x80(%rsp), %rsi
movq 0x48(%rbp), %rdx
movq 0x38(%rbp), %rcx
movq 0x40(%rbp), %r8
movl 0x108(%rsp), %r9d
callq 0x934f50
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/common/x86/warp_plane_avx2.c |
prepare_horizontal_filter_coeff_alpha0_avx2 | static inline void prepare_horizontal_filter_coeff_alpha0_avx2(int beta, int sx,
__m256i *coeff) {
const __m128i tmp_0 =
_mm_loadl_epi64((__m128i *)&av1_filter_8bit[sx >> WARPEDDIFF_PREC_BITS]);
const __m128i tmp_1 = _mm_loadl_epi64(
(__m128i *)&av1_filter_8bit[(sx + beta) >> WARPEDDIFF_PREC_BITS]);
const __m256i res_0 =
_mm256_inserti128_si256(_mm256_castsi128_si256(tmp_0), tmp_1, 0x1);
coeff[0] = _mm256_shuffle_epi8(
res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask01_avx2));
coeff[1] = _mm256_shuffle_epi8(
res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask23_avx2));
coeff[2] = _mm256_shuffle_epi8(
res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask45_avx2));
coeff[3] = _mm256_shuffle_epi8(
res_0, _mm256_load_si256((__m256i *)shuffle_alpha0_mask67_avx2));
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x1e0, %rsp # imm = 0x1E0
movl %edi, 0x4c(%rsp)
movl %esi, 0x48(%rsp)
movq %rdx, 0x40(%rsp)
movl 0x48(%rsp), %eax
sarl $0xa, %eax
movslq %eax, %rcx
leaq 0x1e22c3(%rip), %rax # 0xb19550
leaq (%rax,%rcx,8), %rcx
movq %rcx, 0xb8(%rsp)
movq 0xb8(%rsp), %rcx
vmovq (%rcx), %xmm0
vmovdqa %xmm0, 0xa0(%rsp)
vmovdqa 0xa0(%rsp), %xmm0
vmovdqa %xmm0, 0x30(%rsp)
movl 0x48(%rsp), %ecx
movl 0x4c(%rsp), %edx
addl %edx, %ecx
sarl $0xa, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rax
movq %rax, 0x98(%rsp)
movq 0x98(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x80(%rsp)
vmovdqa 0x80(%rsp), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
vmovdqa 0x30(%rsp), %xmm0
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x70(%rsp), %xmm2
vmovdqa 0x20(%rsp), %xmm1
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm1
leaq 0x1e3d90(%rip), %rax # 0xb1b0c0
movq %rax, 0x68(%rsp)
movq 0x68(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x1a0(%rsp)
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
vmovdqa 0x180(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x40(%rsp), %rax
vmovdqa %ymm0, (%rax)
vmovdqa (%rsp), %ymm1
leaq 0x1e3d64(%rip), %rax # 0xb1b0e0
movq %rax, 0x60(%rsp)
movq 0x60(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x160(%rsp)
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x160(%rsp), %ymm0
vmovdqa 0x140(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x40(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
vmovdqa (%rsp), %ymm1
leaq 0x1e3d37(%rip), %rax # 0xb1b100
movq %rax, 0x58(%rsp)
movq 0x58(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x120(%rsp)
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x120(%rsp), %ymm0
vmovdqa 0x100(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x40(%rsp), %rax
vmovdqa %ymm0, 0x40(%rax)
vmovdqa (%rsp), %ymm1
leaq 0x1e3d0a(%rip), %rax # 0xb1b120
movq %rax, 0x50(%rsp)
movq 0x50(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0xe0(%rsp)
vmovdqa %ymm0, 0xc0(%rsp)
vmovdqa 0xe0(%rsp), %ymm0
vmovdqa 0xc0(%rsp), %ymm1
vpshufb %ymm1, %ymm0, %ymm0
movq 0x40(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nop
| /m-ab-s[P]aom/av1/common/x86/warp_plane_avx2.c |
convolve | static inline __m256i convolve(const __m256i *const s,
const __m256i *const coeffs) {
const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
const __m256i res_1 = _mm256_madd_epi16(s[1], coeffs[1]);
const __m256i res_2 = _mm256_madd_epi16(s[2], coeffs[2]);
const __m256i res_3 = _mm256_madd_epi16(s[3], coeffs[3]);
const __m256i res = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
_mm256_add_epi32(res_2, res_3));
return res;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x2a0, %rsp # imm = 0x2A0
movq %rdi, 0xb8(%rsp)
movq %rsi, 0xb0(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x260(%rsp)
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x260(%rsp), %ymm0
vmovdqa 0x240(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa 0x20(%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
vmovdqa %ymm1, 0x220(%rsp)
vmovdqa %ymm0, 0x200(%rsp)
vmovdqa 0x220(%rsp), %ymm0
vmovdqa 0x200(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa 0x40(%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
vmovdqa %ymm1, 0x1e0(%rsp)
vmovdqa %ymm0, 0x1c0(%rsp)
vmovdqa 0x1e0(%rsp), %ymm0
vmovdqa 0x1c0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa 0x60(%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
vmovdqa %ymm1, 0x1a0(%rsp)
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
vmovdqa 0x180(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x80(%rsp), %ymm1
vmovdqa 0x60(%rsp), %ymm0
vmovdqa %ymm1, 0x160(%rsp)
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x160(%rsp), %ymm0
vmovdqa 0x140(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x40(%rsp), %ymm2
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm2, 0x120(%rsp)
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x120(%rsp), %ymm0
vmovdqa 0x100(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0xe0(%rsp)
vmovdqa %ymm0, 0xc0(%rsp)
vmovdqa 0xe0(%rsp), %ymm0
vmovdqa 0xc0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/convolve_avx2.h |
av1_highbd_dist_wtd_convolve_2d_copy_avx2 | void av1_highbd_dist_wtd_convolve_2d_copy_avx2(const uint16_t *src,
int src_stride, uint16_t *dst0,
int dst_stride0, int w, int h,
ConvolveParams *conv_params,
int bd) {
CONV_BUF_TYPE *dst = conv_params->dst;
int dst_stride = conv_params->dst_stride;
const int bits =
FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0;
const __m128i left_shift = _mm_cvtsi32_si128(bits);
const int do_average = conv_params->do_average;
const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
const int w0 = conv_params->fwd_offset;
const int w1 = conv_params->bck_offset;
const __m256i wt0 = _mm256_set1_epi32(w0);
const __m256i wt1 = _mm256_set1_epi32(w1);
const __m256i zero = _mm256_setzero_si256();
int i, j;
const int offset_0 =
bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
const __m256i offset_const = _mm256_set1_epi32(offset);
const __m256i offset_const_16b = _mm256_set1_epi16(offset);
const int rounding_shift =
2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const __m256i rounding_const = _mm256_set1_epi32((1 << rounding_shift) >> 1);
const __m256i clip_pixel_to_bd =
_mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
assert(bits <= 4);
if (!(w % 16)) {
for (i = 0; i < h; i += 1) {
for (j = 0; j < w; j += 16) {
const __m256i src_16bit =
_mm256_loadu_si256((__m256i *)(&src[i * src_stride + j]));
const __m256i res = _mm256_sll_epi16(src_16bit, left_shift);
if (do_average) {
const __m256i data_0 =
_mm256_loadu_si256((__m256i *)(&dst[i * dst_stride + j]));
const __m256i data_ref_0_lo = _mm256_unpacklo_epi16(data_0, zero);
const __m256i data_ref_0_hi = _mm256_unpackhi_epi16(data_0, zero);
const __m256i res_32b_lo = _mm256_unpacklo_epi16(res, zero);
const __m256i res_unsigned_lo =
_mm256_add_epi32(res_32b_lo, offset_const);
const __m256i comp_avg_res_lo =
highbd_comp_avg(&data_ref_0_lo, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i res_32b_hi = _mm256_unpackhi_epi16(res, zero);
const __m256i res_unsigned_hi =
_mm256_add_epi32(res_32b_hi, offset_const);
const __m256i comp_avg_res_hi =
highbd_comp_avg(&data_ref_0_hi, &res_unsigned_hi, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result_lo = highbd_convolve_rounding(
&comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
const __m256i round_result_hi = highbd_convolve_rounding(
&comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result_lo, round_result_hi);
const __m256i res_clip = _mm256_min_epi16(res_16b, clip_pixel_to_bd);
_mm256_store_si256((__m256i *)(&dst0[i * dst_stride0 + j]), res_clip);
} else {
const __m256i res_unsigned_16b =
_mm256_adds_epu16(res, offset_const_16b);
_mm256_store_si256((__m256i *)(&dst[i * dst_stride + j]),
res_unsigned_16b);
}
}
}
} else if (!(w % 4)) {
for (i = 0; i < h; i += 2) {
for (j = 0; j < w; j += 8) {
const __m128i src_row_0 =
_mm_loadu_si128((__m128i *)(&src[i * src_stride + j]));
const __m128i src_row_1 =
_mm_loadu_si128((__m128i *)(&src[i * src_stride + j + src_stride]));
// since not all compilers yet support _mm256_set_m128i()
const __m256i src_10 = _mm256_insertf128_si256(
_mm256_castsi128_si256(src_row_0), src_row_1, 1);
const __m256i res = _mm256_sll_epi16(src_10, left_shift);
if (w - j < 8) {
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadl_epi64((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadl_epi64(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0 = _mm256_unpacklo_epi16(data_01, zero);
const __m256i res_32b = _mm256_unpacklo_epi16(res, zero);
const __m256i res_unsigned_lo =
_mm256_add_epi32(res_32b, offset_const);
const __m256i comp_avg_res =
highbd_comp_avg(&data_ref_0, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result = highbd_convolve_rounding(
&comp_avg_res, &offset_const, &rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result, round_result);
const __m256i res_clip =
_mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_storel_epi64(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
const __m256i res_unsigned_16b =
_mm256_adds_epu16(res, offset_const_16b);
const __m128i res_0 = _mm256_castsi256_si128(res_unsigned_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_unsigned_16b, 1);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
} else {
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0_lo = _mm256_unpacklo_epi16(data_01, zero);
const __m256i data_ref_0_hi = _mm256_unpackhi_epi16(data_01, zero);
const __m256i res_32b_lo = _mm256_unpacklo_epi16(res, zero);
const __m256i res_unsigned_lo =
_mm256_add_epi32(res_32b_lo, offset_const);
const __m256i comp_avg_res_lo =
highbd_comp_avg(&data_ref_0_lo, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i res_32b_hi = _mm256_unpackhi_epi16(res, zero);
const __m256i res_unsigned_hi =
_mm256_add_epi32(res_32b_hi, offset_const);
const __m256i comp_avg_res_hi =
highbd_comp_avg(&data_ref_0_hi, &res_unsigned_hi, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result_lo =
highbd_convolve_rounding(&comp_avg_res_lo, &offset_const,
&rounding_const, rounding_shift);
const __m256i round_result_hi =
highbd_convolve_rounding(&comp_avg_res_hi, &offset_const,
&rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result_lo, round_result_hi);
const __m256i res_clip =
_mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_store_si128(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
const __m256i res_unsigned_16b =
_mm256_adds_epu16(res, offset_const_16b);
const __m128i res_0 = _mm256_castsi256_si128(res_unsigned_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_unsigned_16b, 1);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
}
}
}
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x1380, %rsp # imm = 0x1380
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x828(%rsp)
movl %esi, 0x824(%rsp)
movq %rdx, 0x818(%rsp)
movl %ecx, 0x814(%rsp)
movl %r8d, 0x810(%rsp)
movl %r9d, 0x80c(%rsp)
movq 0x10(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, 0x800(%rsp)
movq 0x10(%rbp), %rax
movl 0x10(%rax), %eax
movl %eax, 0x7fc(%rsp)
movq 0x10(%rbp), %rcx
movl 0x14(%rcx), %eax
movl 0x18(%rcx), %ecx
addl %eax, %ecx
movl $0xe, %eax
movl %eax, 0x50(%rsp)
subl %ecx, %eax
movl %eax, 0x7f8(%rsp)
movl 0x7f8(%rsp), %eax
movl %eax, 0x84c(%rsp)
vmovd 0x84c(%rsp), %xmm0
vmovdqa %xmm0, 0x830(%rsp)
vmovdqa 0x830(%rsp), %xmm0
vmovdqa %xmm0, 0x7e0(%rsp)
movq 0x10(%rbp), %rax
movl (%rax), %eax
movl %eax, 0x7dc(%rsp)
movq 0x10(%rbp), %rax
movl 0x24(%rax), %eax
movl %eax, 0x7d8(%rsp)
movq 0x10(%rbp), %rax
movl 0x28(%rax), %eax
movl %eax, 0x7d4(%rsp)
movq 0x10(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, 0x7d0(%rsp)
movl 0x7d4(%rsp), %eax
movl %eax, 0x85c(%rsp)
movl 0x85c(%rsp), %eax
movl %eax, 0x44(%rsp)
movl %eax, 0x121c(%rsp)
movl %eax, 0x1218(%rsp)
movl %eax, 0x1214(%rsp)
movl %eax, 0x1210(%rsp)
movl %eax, 0x120c(%rsp)
movl %eax, 0x1208(%rsp)
movl %eax, 0x1204(%rsp)
movl %eax, 0x1200(%rsp)
movl 0x1204(%rsp), %r8d
movl 0x1208(%rsp), %edi
movl 0x120c(%rsp), %esi
movl 0x1214(%rsp), %edx
movl 0x1218(%rsp), %ecx
movl 0x121c(%rsp), %eax
vmovd 0x1200(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1210(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x11f0(%rsp)
vmovdqa %xmm0, 0x11e0(%rsp)
vmovaps 0x11e0(%rsp), %ymm0
vmovaps %ymm0, 0x7a0(%rsp)
movl 0x7d0(%rsp), %eax
movl %eax, 0x858(%rsp)
movl 0x858(%rsp), %eax
movl %eax, 0x48(%rsp)
movl %eax, 0x125c(%rsp)
movl %eax, 0x1258(%rsp)
movl %eax, 0x1254(%rsp)
movl %eax, 0x1250(%rsp)
movl %eax, 0x124c(%rsp)
movl %eax, 0x1248(%rsp)
movl %eax, 0x1244(%rsp)
movl %eax, 0x1240(%rsp)
movl 0x1244(%rsp), %r8d
movl 0x1248(%rsp), %edi
movl 0x124c(%rsp), %esi
movl 0x1254(%rsp), %edx
movl 0x1258(%rsp), %ecx
movl 0x125c(%rsp), %eax
vmovd 0x1240(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1250(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1230(%rsp)
vmovdqa %xmm0, 0x1220(%rsp)
vmovaps 0x1220(%rsp), %ymm0
vmovaps %ymm0, 0x780(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x860(%rsp)
vmovaps 0x860(%rsp), %ymm0
vmovaps %ymm0, 0x760(%rsp)
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rcx
movl 0x14(%rcx), %edx
movl 0x18(%rcx), %ecx
subl %edx, %eax
subl %ecx, %eax
addl $0xe, %eax
movl %eax, 0x754(%rsp)
movb 0x754(%rsp), %cl
movb %cl, 0x4f(%rsp)
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movb 0x4f(%rsp), %cl
decb %cl
movl %eax, %esi
shll %cl, %esi
movl 0x50(%rsp), %ecx
addl %esi, %edx
movl %edx, 0x750(%rsp)
movl 0x750(%rsp), %edx
movl %edx, 0x854(%rsp)
movl 0x854(%rsp), %edx
movl %edx, 0x54(%rsp)
movl %edx, 0x129c(%rsp)
movl %edx, 0x1298(%rsp)
movl %edx, 0x1294(%rsp)
movl %edx, 0x1290(%rsp)
movl %edx, 0x128c(%rsp)
movl %edx, 0x1288(%rsp)
movl %edx, 0x1284(%rsp)
movl %edx, 0x1280(%rsp)
movl 0x1284(%rsp), %r10d
movl 0x1288(%rsp), %r9d
movl 0x128c(%rsp), %r8d
movl 0x1294(%rsp), %edi
movl 0x1298(%rsp), %esi
movl 0x129c(%rsp), %edx
vmovd 0x1280(%rsp), %xmm0
vpinsrd $0x1, %r10d, %xmm0, %xmm0
vpinsrd $0x2, %r9d, %xmm0, %xmm0
vpinsrd $0x3, %r8d, %xmm0, %xmm0
vmovd 0x1290(%rsp), %xmm1
vpinsrd $0x1, %edi, %xmm1, %xmm1
vpinsrd $0x2, %esi, %xmm1, %xmm1
vpinsrd $0x3, %edx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1270(%rsp)
vmovdqa %xmm0, 0x1260(%rsp)
vmovaps 0x1260(%rsp), %ymm0
vmovaps %ymm0, 0x720(%rsp)
movw 0x750(%rsp), %dx
movw %dx, 0x89e(%rsp)
movw 0x89e(%rsp), %dx
movw %dx, 0x5a(%rsp)
movw %dx, 0x131e(%rsp)
movw %dx, 0x131c(%rsp)
movw %dx, 0x131a(%rsp)
movw %dx, 0x1318(%rsp)
movw %dx, 0x1316(%rsp)
movw %dx, 0x1314(%rsp)
movw %dx, 0x1312(%rsp)
movw %dx, 0x1310(%rsp)
movw %dx, 0x130e(%rsp)
movw %dx, 0x130c(%rsp)
movw %dx, 0x130a(%rsp)
movw %dx, 0x1308(%rsp)
movw %dx, 0x1306(%rsp)
movw %dx, 0x1304(%rsp)
movw %dx, 0x1302(%rsp)
movw %dx, 0x1300(%rsp)
movzwl 0x1300(%rsp), %edx
vmovd %edx, %xmm0
movzwl 0x1302(%rsp), %edx
vpinsrw $0x1, %edx, %xmm0, %xmm0
movzwl 0x1304(%rsp), %edx
vpinsrw $0x2, %edx, %xmm0, %xmm0
movzwl 0x1306(%rsp), %edx
vpinsrw $0x3, %edx, %xmm0, %xmm0
movzwl 0x1308(%rsp), %edx
vpinsrw $0x4, %edx, %xmm0, %xmm0
movzwl 0x130a(%rsp), %edx
vpinsrw $0x5, %edx, %xmm0, %xmm0
movzwl 0x130c(%rsp), %edx
vpinsrw $0x6, %edx, %xmm0, %xmm0
movzwl 0x130e(%rsp), %edx
vpinsrw $0x7, %edx, %xmm0, %xmm0
movzwl 0x1310(%rsp), %edx
vmovd %edx, %xmm1
movzwl 0x1312(%rsp), %edx
vpinsrw $0x1, %edx, %xmm1, %xmm1
movzwl 0x1314(%rsp), %edx
vpinsrw $0x2, %edx, %xmm1, %xmm1
movzwl 0x1316(%rsp), %edx
vpinsrw $0x3, %edx, %xmm1, %xmm1
movzwl 0x1318(%rsp), %edx
vpinsrw $0x4, %edx, %xmm1, %xmm1
movzwl 0x131a(%rsp), %edx
vpinsrw $0x5, %edx, %xmm1, %xmm1
movzwl 0x131c(%rsp), %edx
vpinsrw $0x6, %edx, %xmm1, %xmm1
movzwl 0x131e(%rsp), %edx
vpinsrw $0x7, %edx, %xmm1, %xmm1
vmovdqa %xmm1, 0x12f0(%rsp)
vmovdqa %xmm0, 0x12e0(%rsp)
vmovaps 0x12e0(%rsp), %ymm0
vmovaps %ymm0, 0x700(%rsp)
movq 0x10(%rbp), %rsi
movl 0x14(%rsi), %edx
movl 0x18(%rsi), %esi
addl %esi, %edx
subl %edx, %ecx
movl %ecx, 0x6fc(%rsp)
movb 0x6fc(%rsp), %cl
shll %cl, %eax
sarl %eax
movl %eax, 0x850(%rsp)
movl 0x850(%rsp), %eax
movl %eax, 0x5c(%rsp)
movl %eax, 0x12dc(%rsp)
movl %eax, 0x12d8(%rsp)
movl %eax, 0x12d4(%rsp)
movl %eax, 0x12d0(%rsp)
movl %eax, 0x12cc(%rsp)
movl %eax, 0x12c8(%rsp)
movl %eax, 0x12c4(%rsp)
movl %eax, 0x12c0(%rsp)
movl 0x12c4(%rsp), %edx
movl 0x12c8(%rsp), %ecx
movl 0x12cc(%rsp), %eax
movl 0x12d4(%rsp), %r8d
movl 0x12d8(%rsp), %edi
movl 0x12dc(%rsp), %esi
vmovd 0x12d0(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0x12c0(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x12a0(%rsp)
vmovdqa 0x12a0(%rsp), %ymm0
vmovdqa %ymm0, 0x6c0(%rsp)
cmpl $0xa, 0x18(%rbp)
jne 0x93f8f2
movl $0x3ff, %eax # imm = 0x3FF
movl %eax, 0x40(%rsp)
jmp 0x93f909
movl 0x18(%rbp), %edx
movl $0xff, %eax
movl $0xfff, %ecx # imm = 0xFFF
cmpl $0xc, %edx
cmovel %ecx, %eax
movl %eax, 0x40(%rsp)
movl 0x40(%rsp), %eax
movw %ax, 0x89c(%rsp)
movw 0x89c(%rsp), %ax
movw %ax, 0x3e(%rsp)
movw %ax, 0x136e(%rsp)
movw %ax, 0x136c(%rsp)
movw %ax, 0x136a(%rsp)
movw %ax, 0x1368(%rsp)
movw %ax, 0x1366(%rsp)
movw %ax, 0x1364(%rsp)
movw %ax, 0x1362(%rsp)
movw %ax, 0x1360(%rsp)
movw %ax, 0x135e(%rsp)
movw %ax, 0x135c(%rsp)
movw %ax, 0x135a(%rsp)
movw %ax, 0x1358(%rsp)
movw %ax, 0x1356(%rsp)
movw %ax, 0x1354(%rsp)
movw %ax, 0x1352(%rsp)
movw %ax, 0x1350(%rsp)
movzwl 0x1360(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1362(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1364(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1366(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1368(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x136a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x136c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x136e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm1
movzwl 0x1350(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1352(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1354(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1356(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1358(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x135a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x135c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x135e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1320(%rsp)
vmovdqa 0x1320(%rsp), %ymm0
vmovdqa %ymm0, 0x6a0(%rsp)
movl 0x810(%rsp), %eax
movl $0x10, %ecx
cltd
idivl %ecx
cmpl $0x0, %edx
jne 0x93ffce
movl $0x0, 0x75c(%rsp)
movl 0x75c(%rsp), %eax
cmpl 0x80c(%rsp), %eax
jge 0x93ffc9
movl $0x0, 0x758(%rsp)
movl 0x758(%rsp), %eax
cmpl 0x810(%rsp), %eax
jge 0x93ffb1
movq 0x828(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x824(%rsp), %ecx
addl 0x758(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x8a8(%rsp)
movq 0x8a8(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x680(%rsp)
vmovdqa 0x680(%rsp), %ymm1
vmovdqa 0x7e0(%rsp), %xmm0
vmovdqa %ymm1, 0x900(%rsp)
vmovdqa %xmm0, 0x8f0(%rsp)
vmovdqa 0x900(%rsp), %ymm0
vmovdqa 0x8f0(%rsp), %xmm1
vpsllw %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x660(%rsp)
cmpl $0x0, 0x7dc(%rsp)
je 0x93ff00
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x8a0(%rsp)
movq 0x8a0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x640(%rsp)
vmovaps 0x640(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xa80(%rsp)
vmovaps %ymm0, 0xa60(%rsp)
vmovaps 0xa80(%rsp), %ymm0
vmovaps 0xa60(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x640(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xb80(%rsp)
vmovaps %ymm0, 0xb60(%rsp)
vmovaps 0xb80(%rsp), %ymm0
vmovaps 0xb60(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x660(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xa40(%rsp)
vmovaps %ymm0, 0xa20(%rsp)
vmovaps 0xa40(%rsp), %ymm0
vmovaps 0xa20(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x5e0(%rsp), %ymm1
vmovaps 0x720(%rsp), %ymm0
vmovaps %ymm1, 0xcc0(%rsp)
vmovaps %ymm0, 0xca0(%rsp)
vmovaps 0xcc0(%rsp), %ymm0
vmovaps 0xca0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
movl 0x7d8(%rsp), %r8d
leaq 0x620(%rsp), %rdi
leaq 0x5c0(%rsp), %rsi
leaq 0x7a0(%rsp), %rdx
leaq 0x780(%rsp), %rcx
callq 0x940c50
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x660(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xb40(%rsp)
vmovaps %ymm0, 0xb20(%rsp)
vmovaps 0xb40(%rsp), %ymm0
vmovaps 0xb20(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x580(%rsp)
vmovdqa 0x580(%rsp), %ymm1
vmovdqa 0x720(%rsp), %ymm0
vmovdqa %ymm1, 0xc80(%rsp)
vmovdqa %ymm0, 0xc60(%rsp)
vmovdqa 0xc80(%rsp), %ymm0
vmovdqa 0xc60(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x560(%rsp)
movl 0x7d8(%rsp), %r8d
leaq 0x600(%rsp), %rdi
leaq 0x560(%rsp), %rsi
leaq 0x7a0(%rsp), %rdx
leaq 0x780(%rsp), %rcx
callq 0x940c50
vmovdqa %ymm0, 0x540(%rsp)
movl 0x6fc(%rsp), %ecx
leaq 0x5a0(%rsp), %rdi
leaq 0x720(%rsp), %rsi
leaq 0x6c0(%rsp), %rdx
callq 0x940e30
vmovdqa %ymm0, 0x520(%rsp)
movl 0x6fc(%rsp), %ecx
leaq 0x540(%rsp), %rdi
leaq 0x720(%rsp), %rsi
leaq 0x6c0(%rsp), %rdx
callq 0x940e30
vmovdqa %ymm0, 0x500(%rsp)
vmovdqa 0x520(%rsp), %ymm1
vmovdqa 0x500(%rsp), %ymm0
vmovdqa %ymm1, 0xd80(%rsp)
vmovdqa %ymm0, 0xd60(%rsp)
vmovdqa 0xd80(%rsp), %ymm0
vmovdqa 0xd60(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
vmovdqa 0x4e0(%rsp), %ymm1
vmovdqa 0x6a0(%rsp), %ymm0
vmovdqa %ymm1, 0xe40(%rsp)
vmovdqa %ymm0, 0xe20(%rsp)
vmovdqa 0xe40(%rsp), %ymm0
vmovdqa 0xe20(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x4c0(%rsp)
movq 0x818(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x814(%rsp), %ecx
addl 0x758(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x4c0(%rsp), %ymm0
movq %rax, 0xed8(%rsp)
vmovdqa %ymm0, 0xea0(%rsp)
vmovdqa 0xea0(%rsp), %ymm0
movq 0xed8(%rsp), %rax
vmovdqa %ymm0, (%rax)
jmp 0x93ff99
vmovdqa 0x660(%rsp), %ymm1
vmovdqa 0x700(%rsp), %ymm0
vmovdqa %ymm1, 0xf80(%rsp)
vmovdqa %ymm0, 0xf60(%rsp)
vmovdqa 0xf80(%rsp), %ymm0
vmovdqa 0xf60(%rsp), %ymm1
vpaddusw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x4a0(%rsp)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x7fc(%rsp), %ecx
addl 0x758(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x4a0(%rsp), %ymm0
movq %rax, 0xe98(%rsp)
vmovdqa %ymm0, 0xe60(%rsp)
vmovdqa 0xe60(%rsp), %ymm0
movq 0xe98(%rsp), %rax
vmovdqa %ymm0, (%rax)
jmp 0x93ff9b
movl 0x758(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x758(%rsp)
jmp 0x93fad7
jmp 0x93ffb3
movl 0x75c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x75c(%rsp)
jmp 0x93fab8
jmp 0x940c3b
movl 0x810(%rsp), %eax
movl $0x4, %ecx
cltd
idivl %ecx
cmpl $0x0, %edx
jne 0x940c39
movl $0x0, 0x75c(%rsp)
movl 0x75c(%rsp), %eax
cmpl 0x80c(%rsp), %eax
jge 0x940c37
movl $0x0, 0x758(%rsp)
movl 0x758(%rsp), %eax
cmpl 0x810(%rsp), %eax
jge 0x940c1f
movq 0x828(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x824(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xfc8(%rsp)
movq 0xfc8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x490(%rsp)
movq 0x828(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x824(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xfc0(%rsp)
movq 0xfc0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x480(%rsp)
vmovdqa 0x490(%rsp), %xmm0
vmovdqa %xmm0, 0x1010(%rsp)
vmovdqa 0x1010(%rsp), %xmm2
vmovdqa 0x480(%rsp), %xmm1
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x460(%rsp)
vmovdqa 0x460(%rsp), %ymm1
vmovdqa 0x7e0(%rsp), %xmm0
vmovdqa %ymm1, 0x8c0(%rsp)
vmovdqa %xmm0, 0x8b0(%rsp)
vmovdqa 0x8c0(%rsp), %ymm0
vmovdqa 0x8b0(%rsp), %xmm1
vpsllw %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movl 0x810(%rsp), %eax
subl 0x758(%rsp), %eax
cmpl $0x8, %eax
jge 0x94061d
cmpl $0x0, 0x7dc(%rsp)
je 0x9404ef
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1058(%rsp)
movq 0x1058(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x1040(%rsp)
vmovdqa 0x1040(%rsp), %xmm0
vmovdqa %xmm0, 0x1000(%rsp)
vmovdqa 0x1000(%rsp), %xmm0
vmovdqa %xmm1, 0x430(%rsp)
vmovdqa %xmm0, 0x420(%rsp)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1038(%rsp)
movq 0x1038(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x1020(%rsp)
vmovdqa 0x1020(%rsp), %xmm0
vmovdqa %xmm0, 0xff0(%rsp)
vmovdqa 0xff0(%rsp), %xmm0
vmovdqa %xmm1, 0x410(%rsp)
vmovdqa %xmm0, 0x400(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps 0x400(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3e0(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xa00(%rsp)
vmovaps %ymm0, 0x9e0(%rsp)
vmovaps 0xa00(%rsp), %ymm0
vmovaps 0x9e0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0x9c0(%rsp)
vmovaps %ymm0, 0x9a0(%rsp)
vmovaps 0x9c0(%rsp), %ymm0
vmovaps 0x9a0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x3a0(%rsp), %ymm1
vmovaps 0x720(%rsp), %ymm0
vmovaps %ymm1, 0xc40(%rsp)
vmovaps %ymm0, 0xc20(%rsp)
vmovaps 0xc40(%rsp), %ymm0
vmovaps 0xc20(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x380(%rsp)
movl 0x7d8(%rsp), %r8d
leaq 0x3c0(%rsp), %rdi
leaq 0x380(%rsp), %rsi
leaq 0x7a0(%rsp), %rdx
leaq 0x780(%rsp), %rcx
callq 0x940c50
vmovaps %ymm0, 0x360(%rsp)
movl 0x6fc(%rsp), %ecx
leaq 0x360(%rsp), %rdi
leaq 0x720(%rsp), %rsi
leaq 0x6c0(%rsp), %rdx
callq 0x940e30
vmovaps %ymm0, 0x340(%rsp)
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm0, 0xd40(%rsp)
vmovaps %ymm0, 0xd20(%rsp)
vmovaps 0xd40(%rsp), %ymm0
vmovaps 0xd20(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x320(%rsp), %ymm1
vmovaps 0x6a0(%rsp), %ymm0
vmovaps %ymm1, 0xe00(%rsp)
vmovaps %ymm0, 0xde0(%rsp)
vmovaps 0xe00(%rsp), %ymm0
vmovaps 0xde0(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm0, 0x10c0(%rsp)
vmovaps 0x10c0(%rsp), %ymm0
vmovdqa %xmm0, 0x2f0(%rsp)
vmovaps 0x300(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x2e0(%rsp)
movq 0x818(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x814(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2f0(%rsp), %xmm0
movq %rax, 0x1158(%rsp)
vmovdqa %xmm0, 0x1140(%rsp)
movq 0x1140(%rsp), %rcx
movq 0x1158(%rsp), %rax
movq %rcx, (%rax)
movq 0x818(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x814(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2e0(%rsp), %xmm0
movq %rax, 0x1138(%rsp)
vmovdqa %xmm0, 0x1120(%rsp)
movq 0x1120(%rsp), %rcx
movq 0x1138(%rsp), %rax
movq %rcx, (%rax)
jmp 0x940618
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x700(%rsp), %ymm0
vmovaps %ymm1, 0xf40(%rsp)
vmovaps %ymm0, 0xf20(%rsp)
vmovaps 0xf40(%rsp), %ymm0
vmovaps 0xf20(%rsp), %ymm1
vpaddusw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0x10a0(%rsp)
vmovaps 0x10a0(%rsp), %ymm0
vmovdqa %xmm0, 0x2b0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x2a0(%rsp)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2b0(%rsp), %xmm0
movq %rax, 0x1118(%rsp)
vmovdqa %xmm0, 0x1100(%rsp)
movq 0x1100(%rsp), %rcx
movq 0x1118(%rsp), %rax
movq %rcx, (%rax)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2a0(%rsp), %xmm0
movq %rax, 0x10f8(%rsp)
vmovdqa %xmm0, 0x10e0(%rsp)
movq 0x10e0(%rsp), %rcx
movq 0x10f8(%rsp), %rax
movq %rcx, (%rax)
jmp 0x940c07
cmpl $0x0, 0x7dc(%rsp)
je 0x940ae5
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xfb8(%rsp)
movq 0xfb8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xfe0(%rsp)
vmovdqa 0xfe0(%rsp), %xmm0
vmovdqa %xmm1, 0x290(%rsp)
vmovdqa %xmm0, 0x280(%rsp)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
movl 0x7fc(%rsp), %edx
imull %edx, %ecx
movl 0x758(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xfb0(%rsp)
movq 0xfb0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xfd0(%rsp)
vmovdqa 0xfd0(%rsp), %xmm0
vmovdqa %xmm1, 0x270(%rsp)
vmovdqa %xmm0, 0x260(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps 0x260(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x240(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0x980(%rsp)
vmovaps %ymm0, 0x960(%rsp)
vmovaps 0x980(%rsp), %ymm0
vmovaps 0x960(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x240(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xb00(%rsp)
vmovaps %ymm0, 0xae0(%rsp)
vmovaps 0xb00(%rsp), %ymm0
vmovaps 0xae0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0x940(%rsp)
vmovaps %ymm0, 0x920(%rsp)
vmovaps 0x940(%rsp), %ymm0
vmovaps 0x920(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x1e0(%rsp), %ymm1
vmovaps 0x720(%rsp), %ymm0
vmovaps %ymm1, 0xc00(%rsp)
vmovaps %ymm0, 0xbe0(%rsp)
vmovaps 0xc00(%rsp), %ymm0
vmovaps 0xbe0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
movl 0x7d8(%rsp), %r8d
leaq 0x220(%rsp), %rdi
leaq 0x1c0(%rsp), %rsi
leaq 0x7a0(%rsp), %rdx
movq %rdx, 0x18(%rsp)
leaq 0x780(%rsp), %rcx
movq %rcx, 0x20(%rsp)
callq 0x940c50
movq 0x18(%rsp), %rdx
movq 0x20(%rsp), %rcx
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0xac0(%rsp)
vmovaps %ymm0, 0xaa0(%rsp)
vmovaps 0xac0(%rsp), %ymm0
vmovaps 0xaa0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x180(%rsp), %ymm1
vmovaps 0x720(%rsp), %ymm0
vmovaps %ymm1, 0xbc0(%rsp)
vmovaps %ymm0, 0xba0(%rsp)
vmovaps 0xbc0(%rsp), %ymm0
vmovaps 0xba0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
movl 0x7d8(%rsp), %r8d
leaq 0x200(%rsp), %rdi
leaq 0x160(%rsp), %rsi
callq 0x940c50
vmovaps %ymm0, 0x140(%rsp)
movl 0x6fc(%rsp), %ecx
leaq 0x1a0(%rsp), %rdi
leaq 0x720(%rsp), %rsi
movq %rsi, 0x28(%rsp)
leaq 0x6c0(%rsp), %rdx
movq %rdx, 0x30(%rsp)
callq 0x940e30
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
vmovaps %ymm0, 0x120(%rsp)
movl 0x6fc(%rsp), %ecx
leaq 0x140(%rsp), %rdi
callq 0x940e30
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm1, 0xd00(%rsp)
vmovaps %ymm0, 0xce0(%rsp)
vmovaps 0xd00(%rsp), %ymm0
vmovaps 0xce0(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x6a0(%rsp), %ymm0
vmovaps %ymm1, 0xdc0(%rsp)
vmovaps %ymm0, 0xda0(%rsp)
vmovaps 0xdc0(%rsp), %ymm0
vmovaps 0xda0(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0x1080(%rsp)
vmovaps 0x1080(%rsp), %ymm0
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xd0(%rsp), %xmm0
vmovdqa %xmm0, 0xa0(%rsp)
movq 0x818(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x814(%rsp), %ecx
addl 0x758(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xb0(%rsp), %xmm0
movq %rax, 0x11d8(%rsp)
vmovdqa %xmm0, 0x11c0(%rsp)
vmovdqa 0x11c0(%rsp), %xmm0
movq 0x11d8(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x818(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x814(%rsp), %ecx
addl 0x758(%rsp), %ecx
addl 0x814(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xa0(%rsp), %xmm0
movq %rax, 0x11b8(%rsp)
vmovdqa %xmm0, 0x11a0(%rsp)
vmovdqa 0x11a0(%rsp), %xmm0
movq 0x11b8(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x940c05
vmovaps 0x440(%rsp), %ymm1
vmovaps 0x700(%rsp), %ymm0
vmovaps %ymm1, 0xf00(%rsp)
vmovaps %ymm0, 0xee0(%rsp)
vmovaps 0xf00(%rsp), %ymm0
vmovaps 0xee0(%rsp), %ymm1
vpaddusw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm0, 0x1060(%rsp)
vmovaps 0x1060(%rsp), %ymm0
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x90(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x7fc(%rsp), %ecx
addl 0x758(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x70(%rsp), %xmm0
movq %rax, 0x1198(%rsp)
vmovdqa %xmm0, 0x1180(%rsp)
vmovdqa 0x1180(%rsp), %xmm0
movq 0x1198(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x800(%rsp), %rax
movl 0x75c(%rsp), %ecx
imull 0x7fc(%rsp), %ecx
addl 0x758(%rsp), %ecx
addl 0x7fc(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x60(%rsp), %xmm0
movq %rax, 0x1178(%rsp)
vmovdqa %xmm0, 0x1160(%rsp)
vmovdqa 0x1160(%rsp), %xmm0
movq 0x1178(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x940c07
jmp 0x940c09
movl 0x758(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x758(%rsp)
jmp 0x940010
jmp 0x940c21
movl 0x75c(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x75c(%rsp)
jmp 0x93fff1
jmp 0x940c39
jmp 0x940c3b
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/highbd_jnt_convolve_avx2.c |
highbd_comp_avg | static inline __m256i highbd_comp_avg(const __m256i *const data_ref_0,
const __m256i *const res_unsigned,
const __m256i *const wt0,
const __m256i *const wt1,
const int use_dist_wtd_comp_avg) {
__m256i res;
if (use_dist_wtd_comp_avg) {
const __m256i wt0_res = _mm256_mullo_epi32(*data_ref_0, *wt0);
const __m256i wt1_res = _mm256_mullo_epi32(*res_unsigned, *wt1);
const __m256i wt_res = _mm256_add_epi32(wt0_res, wt1_res);
res = _mm256_srai_epi32(wt_res, DIST_PRECISION_BITS);
} else {
const __m256i wt_res = _mm256_add_epi32(*data_ref_0, *res_unsigned);
res = _mm256_srai_epi32(wt_res, 1);
}
return res;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x280, %rsp # imm = 0x280
movq %rdi, 0xd8(%rsp)
movq %rsi, 0xd0(%rsp)
movq %rdx, 0xc8(%rsp)
movq %rcx, 0xc0(%rsp)
movl %r8d, 0xbc(%rsp)
cmpl $0x0, 0xbc(%rsp)
je 0x940d9a
movq 0xd8(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0xc8(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x1c0(%rsp)
vmovdqa %ymm0, 0x1a0(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa 0x1a0(%rsp), %ymm1
vpmulld %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
movq 0xd0(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0xc0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x180(%rsp)
vmovdqa %ymm0, 0x160(%rsp)
vmovdqa 0x180(%rsp), %ymm0
vmovdqa 0x160(%rsp), %ymm1
vpmulld %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0x60(%rsp), %ymm1
vmovdqa 0x40(%rsp), %ymm0
vmovdqa %ymm1, 0x140(%rsp)
vmovdqa %ymm0, 0x120(%rsp)
vmovdqa 0x140(%rsp), %ymm0
vmovdqa 0x120(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm0, 0x240(%rsp)
movl $0x4, 0x23c(%rsp)
vmovdqa 0x240(%rsp), %ymm0
movl 0x23c(%rsp), %eax
vmovd %eax, %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
jmp 0x940e19
movq 0xd8(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0xd0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x100(%rsp)
vmovdqa %ymm0, 0xe0(%rsp)
vmovdqa 0x100(%rsp), %ymm0
vmovdqa 0xe0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
vmovdqa %ymm0, 0x200(%rsp)
movl $0x1, 0x1fc(%rsp)
vmovdqa 0x200(%rsp), %ymm0
movl 0x1fc(%rsp), %eax
vmovd %eax, %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0x80(%rsp), %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/convolve_avx2.h |
av1_highbd_dist_wtd_convolve_2d_avx2 | void av1_highbd_dist_wtd_convolve_2d_avx2(
const uint16_t *src, int src_stride, uint16_t *dst0, int dst_stride0, int w,
int h, const InterpFilterParams *filter_params_x,
const InterpFilterParams *filter_params_y, const int subpel_x_qn,
const int subpel_y_qn, ConvolveParams *conv_params, int bd) {
DECLARE_ALIGNED(32, int16_t, im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * 8]);
CONV_BUF_TYPE *dst = conv_params->dst;
int dst_stride = conv_params->dst_stride;
int im_h = h + filter_params_y->taps - 1;
int im_stride = 8;
int i, j;
const int fo_vert = filter_params_y->taps / 2 - 1;
const int fo_horiz = filter_params_x->taps / 2 - 1;
const uint16_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
// Check that, even with 12-bit input, the intermediate values will fit
// into an unsigned 16-bit intermediate array.
assert(bd + FILTER_BITS + 2 - conv_params->round_0 <= 16);
__m256i s[8], coeffs_y[4], coeffs_x[4];
const int do_average = conv_params->do_average;
const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
const int w0 = conv_params->fwd_offset;
const int w1 = conv_params->bck_offset;
const __m256i wt0 = _mm256_set1_epi32(w0);
const __m256i wt1 = _mm256_set1_epi32(w1);
const __m256i zero = _mm256_setzero_si256();
const __m256i round_const_x = _mm256_set1_epi32(
((1 << conv_params->round_0) >> 1) + (1 << (bd + FILTER_BITS - 1)));
const __m128i round_shift_x = _mm_cvtsi32_si128(conv_params->round_0);
const __m256i round_const_y = _mm256_set1_epi32(
((1 << conv_params->round_1) >> 1) -
(1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)));
const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1);
const int offset_0 =
bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
const __m256i offset_const = _mm256_set1_epi32(offset);
const int rounding_shift =
2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const __m256i rounding_const = _mm256_set1_epi32((1 << rounding_shift) >> 1);
const __m256i clip_pixel_to_bd =
_mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
prepare_coeffs(filter_params_x, subpel_x_qn, coeffs_x);
prepare_coeffs(filter_params_y, subpel_y_qn, coeffs_y);
for (j = 0; j < w; j += 8) {
/* Horizontal filter */
{
for (i = 0; i < im_h; i += 2) {
const __m256i row0 =
_mm256_loadu_si256((__m256i *)&src_ptr[i * src_stride + j]);
__m256i row1 = _mm256_setzero_si256();
if (i + 1 < im_h)
row1 =
_mm256_loadu_si256((__m256i *)&src_ptr[(i + 1) * src_stride + j]);
const __m256i r0 = _mm256_permute2x128_si256(row0, row1, 0x20);
const __m256i r1 = _mm256_permute2x128_si256(row0, row1, 0x31);
// even pixels
s[0] = _mm256_alignr_epi8(r1, r0, 0);
s[1] = _mm256_alignr_epi8(r1, r0, 4);
s[2] = _mm256_alignr_epi8(r1, r0, 8);
s[3] = _mm256_alignr_epi8(r1, r0, 12);
__m256i res_even = convolve(s, coeffs_x);
res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x),
round_shift_x);
// odd pixels
s[0] = _mm256_alignr_epi8(r1, r0, 2);
s[1] = _mm256_alignr_epi8(r1, r0, 6);
s[2] = _mm256_alignr_epi8(r1, r0, 10);
s[3] = _mm256_alignr_epi8(r1, r0, 14);
__m256i res_odd = convolve(s, coeffs_x);
res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x),
round_shift_x);
__m256i res_even1 = _mm256_packs_epi32(res_even, res_even);
__m256i res_odd1 = _mm256_packs_epi32(res_odd, res_odd);
__m256i res = _mm256_unpacklo_epi16(res_even1, res_odd1);
_mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
}
}
/* Vertical filter */
{
__m256i s0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));
__m256i s1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));
__m256i s2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));
__m256i s3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));
__m256i s4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));
__m256i s5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));
s[0] = _mm256_unpacklo_epi16(s0, s1);
s[1] = _mm256_unpacklo_epi16(s2, s3);
s[2] = _mm256_unpacklo_epi16(s4, s5);
s[4] = _mm256_unpackhi_epi16(s0, s1);
s[5] = _mm256_unpackhi_epi16(s2, s3);
s[6] = _mm256_unpackhi_epi16(s4, s5);
for (i = 0; i < h; i += 2) {
const int16_t *data = &im_block[i * im_stride];
const __m256i s6 =
_mm256_loadu_si256((__m256i *)(data + 6 * im_stride));
const __m256i s7 =
_mm256_loadu_si256((__m256i *)(data + 7 * im_stride));
s[3] = _mm256_unpacklo_epi16(s6, s7);
s[7] = _mm256_unpackhi_epi16(s6, s7);
const __m256i res_a = convolve(s, coeffs_y);
const __m256i res_a_round = _mm256_sra_epi32(
_mm256_add_epi32(res_a, round_const_y), round_shift_y);
const __m256i res_unsigned_lo =
_mm256_add_epi32(res_a_round, offset_const);
if (w - j < 8) {
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadl_epi64((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadl_epi64(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0 = _mm256_unpacklo_epi16(data_01, zero);
const __m256i comp_avg_res =
highbd_comp_avg(&data_ref_0, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result = highbd_convolve_rounding(
&comp_avg_res, &offset_const, &rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result, round_result);
const __m256i res_clip =
_mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_storel_epi64(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
__m256i res_16b =
_mm256_packus_epi32(res_unsigned_lo, res_unsigned_lo);
const __m128i res_0 = _mm256_castsi256_si128(res_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_16b, 1);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
} else {
const __m256i res_b = convolve(s + 4, coeffs_y);
const __m256i res_b_round = _mm256_sra_epi32(
_mm256_add_epi32(res_b, round_const_y), round_shift_y);
__m256i res_unsigned_hi = _mm256_add_epi32(res_b_round, offset_const);
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0_lo = _mm256_unpacklo_epi16(data_01, zero);
const __m256i data_ref_0_hi = _mm256_unpackhi_epi16(data_01, zero);
const __m256i comp_avg_res_lo =
highbd_comp_avg(&data_ref_0_lo, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i comp_avg_res_hi =
highbd_comp_avg(&data_ref_0_hi, &res_unsigned_hi, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result_lo =
highbd_convolve_rounding(&comp_avg_res_lo, &offset_const,
&rounding_const, rounding_shift);
const __m256i round_result_hi =
highbd_convolve_rounding(&comp_avg_res_hi, &offset_const,
&rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result_lo, round_result_hi);
const __m256i res_clip =
_mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_store_si128(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
__m256i res_16b =
_mm256_packus_epi32(res_unsigned_lo, res_unsigned_hi);
const __m128i res_0 = _mm256_castsi256_si128(res_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_16b, 1);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
}
s[0] = s[1];
s[1] = s[2];
s[2] = s[3];
s[4] = s[5];
s[5] = s[6];
s[6] = s[7];
}
}
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x1fc0, %rsp # imm = 0x1FC0
movl 0x38(%rbp), %eax
movq 0x30(%rbp), %rax
movl 0x28(%rbp), %eax
movl 0x20(%rbp), %eax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, 0x1328(%rsp)
movl %esi, 0x1324(%rsp)
movq %rdx, 0x1318(%rsp)
movl %ecx, 0x1314(%rsp)
movl %r8d, 0x1310(%rsp)
movl %r9d, 0x130c(%rsp)
movq 0x30(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, 0xa38(%rsp)
movq 0x30(%rbp), %rax
movl 0x10(%rax), %eax
movl %eax, 0xa34(%rsp)
movl 0x130c(%rsp), %ecx
movq 0x18(%rbp), %rax
movzwl 0x8(%rax), %edx
movl %ecx, %eax
movl %edx, %ecx
leal -0x1(%rax,%rcx), %eax
movl %eax, 0xa30(%rsp)
movl $0x8, 0xa2c(%rsp)
movq 0x18(%rbp), %rax
movzwl 0x8(%rax), %eax
shrl %eax
decl %eax
movl %eax, 0xa20(%rsp)
movq 0x10(%rbp), %rax
movzwl 0x8(%rax), %eax
shrl %eax
decl %eax
movl %eax, 0xa1c(%rsp)
movq 0x1328(%rsp), %rax
movl 0xa20(%rsp), %ecx
movl 0x1324(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rcx
subq %rcx, %rax
movslq 0xa1c(%rsp), %rcx
addq %rcx, %rcx
subq %rcx, %rax
movq %rax, 0xa10(%rsp)
movq 0x30(%rbp), %rax
movl (%rax), %eax
movl %eax, 0x7fc(%rsp)
movq 0x30(%rbp), %rax
movl 0x24(%rax), %eax
movl %eax, 0x7f8(%rsp)
movq 0x30(%rbp), %rax
movl 0x28(%rax), %eax
movl %eax, 0x7f4(%rsp)
movq 0x30(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, 0x7f0(%rsp)
movl 0x7f4(%rsp), %eax
movl %eax, 0x137c(%rsp)
movl 0x137c(%rsp), %eax
movl %eax, 0x38(%rsp)
movl %eax, 0x1e1c(%rsp)
movl %eax, 0x1e18(%rsp)
movl %eax, 0x1e14(%rsp)
movl %eax, 0x1e10(%rsp)
movl %eax, 0x1e0c(%rsp)
movl %eax, 0x1e08(%rsp)
movl %eax, 0x1e04(%rsp)
movl %eax, 0x1e00(%rsp)
movl 0x1e04(%rsp), %r8d
movl 0x1e08(%rsp), %edi
movl 0x1e0c(%rsp), %esi
movl 0x1e14(%rsp), %edx
movl 0x1e18(%rsp), %ecx
movl 0x1e1c(%rsp), %eax
vmovd 0x1e00(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1e10(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1df0(%rsp)
vmovdqa %xmm0, 0x1de0(%rsp)
vmovaps 0x1de0(%rsp), %ymm0
vmovaps %ymm0, 0x7c0(%rsp)
movl 0x7f0(%rsp), %eax
movl %eax, 0x1378(%rsp)
movl 0x1378(%rsp), %eax
movl %eax, 0x3c(%rsp)
movl %eax, 0x1e5c(%rsp)
movl %eax, 0x1e58(%rsp)
movl %eax, 0x1e54(%rsp)
movl %eax, 0x1e50(%rsp)
movl %eax, 0x1e4c(%rsp)
movl %eax, 0x1e48(%rsp)
movl %eax, 0x1e44(%rsp)
movl %eax, 0x1e40(%rsp)
movl 0x1e44(%rsp), %r8d
movl 0x1e48(%rsp), %edi
movl 0x1e4c(%rsp), %esi
movl 0x1e54(%rsp), %edx
movl 0x1e58(%rsp), %ecx
movl 0x1e5c(%rsp), %eax
vmovd 0x1e40(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1e50(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1e30(%rsp)
vmovdqa %xmm0, 0x1e20(%rsp)
vmovaps 0x1e20(%rsp), %ymm0
vmovaps %ymm0, 0x7a0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x13a0(%rsp)
vmovaps 0x13a0(%rsp), %ymm0
vmovaps %ymm0, 0x780(%rsp)
movq 0x30(%rbp), %rax
movb 0x14(%rax), %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movl %edx, %ecx
sarl %ecx
movl %ecx, 0x40(%rsp)
movb 0x38(%rbp), %cl
addb $0x6, %cl
movl %eax, %edx
shll %cl, %edx
movl 0x40(%rsp), %ecx
addl %edx, %ecx
movl %ecx, 0x1374(%rsp)
movl 0x1374(%rsp), %ecx
movl %ecx, 0x44(%rsp)
movl %ecx, 0x1e9c(%rsp)
movl %ecx, 0x1e98(%rsp)
movl %ecx, 0x1e94(%rsp)
movl %ecx, 0x1e90(%rsp)
movl %ecx, 0x1e8c(%rsp)
movl %ecx, 0x1e88(%rsp)
movl %ecx, 0x1e84(%rsp)
movl %ecx, 0x1e80(%rsp)
movl 0x1e84(%rsp), %r9d
movl 0x1e88(%rsp), %r8d
movl 0x1e8c(%rsp), %edi
movl 0x1e94(%rsp), %esi
movl 0x1e98(%rsp), %edx
movl 0x1e9c(%rsp), %ecx
vmovd 0x1e80(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x1e90(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1e70(%rsp)
vmovdqa %xmm0, 0x1e60(%rsp)
vmovaps 0x1e60(%rsp), %ymm0
vmovaps %ymm0, 0x760(%rsp)
movq 0x30(%rbp), %rcx
movl 0x14(%rcx), %ecx
movl %ecx, 0x1364(%rsp)
vmovd 0x1364(%rsp), %xmm0
vmovdqa %xmm0, 0x1350(%rsp)
vmovdqa 0x1350(%rsp), %xmm0
vmovdqa %xmm0, 0x750(%rsp)
movq 0x30(%rbp), %rdx
movb 0x18(%rdx), %cl
movl %eax, %esi
shll %cl, %esi
movl %esi, %ecx
sarl %ecx
movl %ecx, 0x48(%rsp)
movl 0x38(%rbp), %ecx
movl 0x14(%rdx), %edx
subl %edx, %ecx
addb $0xd, %cl
movl %eax, %edx
shll %cl, %edx
movl 0x48(%rsp), %ecx
subl %edx, %ecx
movl %ecx, 0x1370(%rsp)
movl 0x1370(%rsp), %ecx
movl %ecx, 0x4c(%rsp)
movl %ecx, 0x1edc(%rsp)
movl %ecx, 0x1ed8(%rsp)
movl %ecx, 0x1ed4(%rsp)
movl %ecx, 0x1ed0(%rsp)
movl %ecx, 0x1ecc(%rsp)
movl %ecx, 0x1ec8(%rsp)
movl %ecx, 0x1ec4(%rsp)
movl %ecx, 0x1ec0(%rsp)
movl 0x1ec4(%rsp), %r9d
movl 0x1ec8(%rsp), %r8d
movl 0x1ecc(%rsp), %edi
movl 0x1ed4(%rsp), %esi
movl 0x1ed8(%rsp), %edx
movl 0x1edc(%rsp), %ecx
vmovd 0x1ec0(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x1ed0(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1eb0(%rsp)
vmovdqa %xmm0, 0x1ea0(%rsp)
vmovaps 0x1ea0(%rsp), %ymm0
vmovaps %ymm0, 0x720(%rsp)
movq 0x30(%rbp), %rcx
movl 0x18(%rcx), %ecx
movl %ecx, 0x134c(%rsp)
vmovd 0x134c(%rsp), %xmm0
vmovdqa %xmm0, 0x1330(%rsp)
vmovdqa 0x1330(%rsp), %xmm0
vmovdqa %xmm0, 0x710(%rsp)
movl 0x38(%rbp), %ecx
movq 0x30(%rbp), %rdx
movl 0x14(%rdx), %esi
movl 0x18(%rdx), %edx
subl %esi, %ecx
subl %edx, %ecx
addl $0xe, %ecx
movl %ecx, 0x70c(%rsp)
movb 0x70c(%rsp), %cl
movb %cl, 0x53(%rsp)
movl %eax, %edx
shll %cl, %edx
movb 0x53(%rsp), %cl
movl %edx, 0x54(%rsp)
decb %cl
movl %eax, %edx
shll %cl, %edx
movl 0x54(%rsp), %ecx
addl %edx, %ecx
movl %ecx, 0x708(%rsp)
movl 0x708(%rsp), %ecx
movl %ecx, 0x136c(%rsp)
movl 0x136c(%rsp), %ecx
movl %ecx, 0x58(%rsp)
movl %ecx, 0x1f1c(%rsp)
movl %ecx, 0x1f18(%rsp)
movl %ecx, 0x1f14(%rsp)
movl %ecx, 0x1f10(%rsp)
movl %ecx, 0x1f0c(%rsp)
movl %ecx, 0x1f08(%rsp)
movl %ecx, 0x1f04(%rsp)
movl %ecx, 0x1f00(%rsp)
movl 0x1f04(%rsp), %r9d
movl 0x1f08(%rsp), %r8d
movl 0x1f0c(%rsp), %edi
movl 0x1f14(%rsp), %esi
movl 0x1f18(%rsp), %edx
movl 0x1f1c(%rsp), %ecx
vmovd 0x1f00(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x1f10(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1ef0(%rsp)
vmovdqa %xmm0, 0x1ee0(%rsp)
vmovaps 0x1ee0(%rsp), %ymm0
vmovaps %ymm0, 0x6e0(%rsp)
movq 0x30(%rbp), %rcx
movl 0x14(%rcx), %edx
movl 0x18(%rcx), %ecx
addl %ecx, %edx
movl $0xe, %ecx
subl %edx, %ecx
movl %ecx, 0x6dc(%rsp)
movb 0x6dc(%rsp), %cl
shll %cl, %eax
sarl %eax
movl %eax, 0x1368(%rsp)
movl 0x1368(%rsp), %eax
movl %eax, 0x5c(%rsp)
movl %eax, 0x1f5c(%rsp)
movl %eax, 0x1f58(%rsp)
movl %eax, 0x1f54(%rsp)
movl %eax, 0x1f50(%rsp)
movl %eax, 0x1f4c(%rsp)
movl %eax, 0x1f48(%rsp)
movl %eax, 0x1f44(%rsp)
movl %eax, 0x1f40(%rsp)
movl 0x1f44(%rsp), %edx
movl 0x1f48(%rsp), %ecx
movl 0x1f4c(%rsp), %eax
movl 0x1f54(%rsp), %r8d
movl 0x1f58(%rsp), %edi
movl 0x1f5c(%rsp), %esi
vmovd 0x1f50(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0x1f40(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1f20(%rsp)
vmovdqa 0x1f20(%rsp), %ymm0
vmovdqa %ymm0, 0x6a0(%rsp)
cmpl $0xa, 0x38(%rbp)
jne 0x94166f
movl $0x3ff, %eax # imm = 0x3FF
movl %eax, 0x34(%rsp)
jmp 0x941686
movl 0x38(%rbp), %edx
movl $0xff, %eax
movl $0xfff, %ecx # imm = 0xFFF
cmpl $0xc, %edx
cmovel %ecx, %eax
movl %eax, 0x34(%rsp)
movl 0x34(%rsp), %eax
movw %ax, 0x13ce(%rsp)
movw 0x13ce(%rsp), %ax
movw %ax, 0x32(%rsp)
movw %ax, 0x1fae(%rsp)
movw %ax, 0x1fac(%rsp)
movw %ax, 0x1faa(%rsp)
movw %ax, 0x1fa8(%rsp)
movw %ax, 0x1fa6(%rsp)
movw %ax, 0x1fa4(%rsp)
movw %ax, 0x1fa2(%rsp)
movw %ax, 0x1fa0(%rsp)
movw %ax, 0x1f9e(%rsp)
movw %ax, 0x1f9c(%rsp)
movw %ax, 0x1f9a(%rsp)
movw %ax, 0x1f98(%rsp)
movw %ax, 0x1f96(%rsp)
movw %ax, 0x1f94(%rsp)
movw %ax, 0x1f92(%rsp)
movw %ax, 0x1f90(%rsp)
movzwl 0x1fa0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1fa2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1fa4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1fa6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1fa8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x1faa(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x1fac(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x1fae(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm1
movzwl 0x1f90(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1f92(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1f94(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1f96(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1f98(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x1f9a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x1f9c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x1f9e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1f60(%rsp)
vmovdqa 0x1f60(%rsp), %ymm0
vmovdqa %ymm0, 0x680(%rsp)
movq 0x10(%rbp), %rdi
movl 0x20(%rbp), %esi
leaq 0x800(%rsp), %rdx
vzeroupper
callq 0x942c40
movq 0x18(%rbp), %rdi
movl 0x28(%rbp), %esi
leaq 0x880(%rsp), %rdx
callq 0x942c40
movl $0x0, 0xa24(%rsp)
movl 0xa24(%rsp), %eax
cmpl 0x1310(%rsp), %eax
jge 0x942c2b
movl $0x0, 0xa28(%rsp)
movl 0xa28(%rsp), %eax
cmpl 0xa30(%rsp), %eax
jge 0x941cb9
movq 0xa10(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0x1324(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1418(%rsp)
movq 0x1418(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x660(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x1380(%rsp)
vmovdqa 0x1380(%rsp), %ymm0
vmovdqa %ymm0, 0x640(%rsp)
movl 0xa28(%rsp), %eax
addl $0x1, %eax
cmpl 0xa30(%rsp), %eax
jge 0x94193a
movq 0xa10(%rsp), %rax
movl 0xa28(%rsp), %ecx
addl $0x1, %ecx
imull 0x1324(%rsp), %ecx
addl 0xa24(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x1410(%rsp)
movq 0x1410(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x640(%rsp)
vmovaps 0x660(%rsp), %ymm0
vmovaps 0x640(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x660(%rsp), %ymm0
vmovaps 0x640(%rsp), %ymm1
vperm2i128 $0x31, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm1[2,3]
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x620(%rsp), %ymm0
vmovaps %ymm0, 0x900(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0x4, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
vmovaps %ymm0, 0x920(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0x8, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
vmovaps %ymm0, 0x940(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0xc, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
vmovaps %ymm0, 0x960(%rsp)
leaq 0x900(%rsp), %rdi
movq %rdi, 0x20(%rsp)
leaq 0x800(%rsp), %rsi
movq %rsi, 0x28(%rsp)
callq 0x942d00
movq 0x20(%rsp), %rdi
movq 0x28(%rsp), %rsi
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x5e0(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0x1880(%rsp)
vmovaps %ymm0, 0x1860(%rsp)
vmovaps 0x1880(%rsp), %ymm0
vmovaps 0x1860(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x750(%rsp), %xmm0
vmovaps %ymm1, 0x1d40(%rsp)
vmovdqa %xmm0, 0x1d30(%rsp)
vmovaps 0x1d40(%rsp), %ymm0
vmovdqa 0x1d30(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x900(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0x6, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5],ymm1[22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21]
vmovaps %ymm0, 0x920(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x940(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpalignr $0xe, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13],ymm1[30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29]
vmovaps %ymm0, 0x960(%rsp)
callq 0x942d00
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps 0x5c0(%rsp), %ymm1
vmovaps 0x760(%rsp), %ymm0
vmovaps %ymm1, 0x1840(%rsp)
vmovaps %ymm0, 0x1820(%rsp)
vmovaps 0x1840(%rsp), %ymm0
vmovaps 0x1820(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x750(%rsp), %xmm0
vmovaps %ymm1, 0x1d00(%rsp)
vmovdqa %xmm0, 0x1cf0(%rsp)
vmovaps 0x1d00(%rsp), %ymm0
vmovdqa 0x1cf0(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps 0x5e0(%rsp), %ymm0
vmovaps %ymm0, 0x1dc0(%rsp)
vmovaps %ymm0, 0x1da0(%rsp)
vmovaps 0x1dc0(%rsp), %ymm0
vmovaps 0x1da0(%rsp), %ymm1
vpackssdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5c0(%rsp), %ymm0
vmovaps %ymm0, 0x1d80(%rsp)
vmovaps %ymm0, 0x1d60(%rsp)
vmovaps 0x1d80(%rsp), %ymm0
vmovaps 0x1d60(%rsp), %ymm1
vpackssdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x5a0(%rsp), %ymm1
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm1, 0x15c0(%rsp)
vmovaps %ymm0, 0x15a0(%rsp)
vmovaps 0x15c0(%rsp), %ymm0
vmovaps 0x15a0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovdqa %ymm0, 0x560(%rsp)
movl 0xa28(%rsp), %eax
imull 0xa2c(%rsp), %eax
movslq %eax, %rcx
leaq 0xa40(%rsp), %rax
shlq %rcx
addq %rcx, %rax
vmovdqa 0x560(%rsp), %ymm0
movq %rax, 0x1a48(%rsp)
vmovdqa %ymm0, 0x1a20(%rsp)
vmovdqa 0x1a20(%rsp), %ymm0
movq 0x1a48(%rsp), %rax
vmovdqa %ymm0, (%rax)
movl 0xa28(%rsp), %eax
addl $0x2, %eax
movl %eax, 0xa28(%rsp)
jmp 0x941867
leaq 0xa40(%rsp), %rax
movq %rax, 0x1408(%rsp)
movq 0x1408(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x540(%rsp)
movslq 0xa2c(%rsp), %rax
leaq 0xa40(%rsp,%rax,2), %rax
movq %rax, 0x1400(%rsp)
movq 0x1400(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x520(%rsp)
movl 0xa2c(%rsp), %eax
addl %eax, %eax
cltq
leaq 0xa40(%rsp,%rax,2), %rax
movq %rax, 0x13f8(%rsp)
movq 0x13f8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x500(%rsp)
movl 0xa2c(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,2), %eax
cltq
leaq 0xa40(%rsp,%rax,2), %rax
movq %rax, 0x13f0(%rsp)
movq 0x13f0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
movl 0xa2c(%rsp), %eax
shll $0x2, %eax
cltq
leaq 0xa40(%rsp,%rax,2), %rax
movq %rax, 0x13e8(%rsp)
movq 0x13e8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
movl 0xa2c(%rsp), %ecx
movl %ecx, %eax
leal (%rax,%rax,4), %eax
cltq
leaq 0xa40(%rsp,%rax,2), %rax
movq %rax, 0x13e0(%rsp)
movq 0x13e0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x540(%rsp), %ymm1
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm1, 0x1580(%rsp)
vmovaps %ymm0, 0x1560(%rsp)
vmovaps 0x1580(%rsp), %ymm0
vmovaps 0x1560(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x900(%rsp)
vmovaps 0x500(%rsp), %ymm1
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm1, 0x1540(%rsp)
vmovaps %ymm0, 0x1520(%rsp)
vmovaps 0x1540(%rsp), %ymm0
vmovaps 0x1520(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x920(%rsp)
vmovaps 0x4c0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm1, 0x1500(%rsp)
vmovaps %ymm0, 0x14e0(%rsp)
vmovaps 0x1500(%rsp), %ymm0
vmovaps 0x14e0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x940(%rsp)
vmovaps 0x540(%rsp), %ymm1
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm1, 0x1700(%rsp)
vmovaps %ymm0, 0x16e0(%rsp)
vmovaps 0x1700(%rsp), %ymm0
vmovaps 0x16e0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x980(%rsp)
vmovaps 0x500(%rsp), %ymm1
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm1, 0x16c0(%rsp)
vmovaps %ymm0, 0x16a0(%rsp)
vmovaps 0x16c0(%rsp), %ymm0
vmovaps 0x16a0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x9a0(%rsp)
vmovaps 0x4c0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm1, 0x1680(%rsp)
vmovaps %ymm0, 0x1660(%rsp)
vmovaps 0x1680(%rsp), %ymm0
vmovaps 0x1660(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x9c0(%rsp)
movl $0x0, 0xa28(%rsp)
movl 0xa28(%rsp), %eax
cmpl 0x130c(%rsp), %eax
jge 0x942c13
movl 0xa28(%rsp), %eax
movl 0xa2c(%rsp), %ecx
imull %ecx, %eax
cltq
leaq 0xa40(%rsp,%rax,2), %rax
movq %rax, 0x498(%rsp)
movq 0x498(%rsp), %rax
movl 0xa2c(%rsp), %edx
addl %edx, %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x13d8(%rsp)
movq 0x13d8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x460(%rsp)
movq 0x498(%rsp), %rax
movl 0xa2c(%rsp), %edx
movl %edx, %ecx
shll $0x3, %ecx
subl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x13d0(%rsp)
movq 0x13d0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x440(%rsp)
vmovaps 0x460(%rsp), %ymm1
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm1, 0x14c0(%rsp)
vmovaps %ymm0, 0x14a0(%rsp)
vmovaps 0x14c0(%rsp), %ymm0
vmovaps 0x14a0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x960(%rsp)
vmovaps 0x460(%rsp), %ymm1
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm1, 0x1640(%rsp)
vmovaps %ymm0, 0x1620(%rsp)
vmovaps 0x1640(%rsp), %ymm0
vmovaps 0x1620(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x9e0(%rsp)
leaq 0x900(%rsp), %rdi
leaq 0x880(%rsp), %rsi
callq 0x942d00
vmovdqa %ymm0, 0x420(%rsp)
vmovdqa 0x420(%rsp), %ymm1
vmovdqa 0x720(%rsp), %ymm0
vmovdqa %ymm1, 0x1800(%rsp)
vmovdqa %ymm0, 0x17e0(%rsp)
vmovdqa 0x1800(%rsp), %ymm0
vmovdqa 0x17e0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x710(%rsp), %xmm0
vmovdqa %ymm1, 0x1cc0(%rsp)
vmovdqa %xmm0, 0x1cb0(%rsp)
vmovdqa 0x1cc0(%rsp), %ymm0
vmovdqa 0x1cb0(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x400(%rsp)
vmovdqa 0x400(%rsp), %ymm1
vmovdqa 0x6e0(%rsp), %ymm0
vmovdqa %ymm1, 0x17c0(%rsp)
vmovdqa %ymm0, 0x17a0(%rsp)
vmovdqa 0x17c0(%rsp), %ymm0
vmovdqa 0x17a0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x3e0(%rsp)
movl 0x1310(%rsp), %eax
subl 0xa24(%rsp), %eax
cmpl $0x8, %eax
jge 0x9425d8
cmpl $0x0, 0x7fc(%rsp)
je 0x9424b2
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0xa34(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1ad8(%rsp)
movq 0x1ad8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x1ac0(%rsp)
vmovdqa 0x1ac0(%rsp), %xmm0
vmovdqa %xmm0, 0x1a90(%rsp)
vmovdqa 0x1a90(%rsp), %xmm0
vmovdqa %xmm1, 0x3d0(%rsp)
vmovdqa %xmm0, 0x3c0(%rsp)
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0xa34(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1ab8(%rsp)
movq 0x1ab8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x1aa0(%rsp)
vmovdqa 0x1aa0(%rsp), %xmm0
vmovdqa %xmm0, 0x1a80(%rsp)
vmovdqa 0x1a80(%rsp), %xmm0
vmovdqa %xmm1, 0x3b0(%rsp)
vmovdqa %xmm0, 0x3a0(%rsp)
vmovaps 0x3c0(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x380(%rsp)
vmovaps 0x380(%rsp), %ymm1
vmovaps 0x780(%rsp), %ymm0
vmovaps %ymm1, 0x1480(%rsp)
vmovaps %ymm0, 0x1460(%rsp)
vmovaps 0x1480(%rsp), %ymm0
vmovaps 0x1460(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x360(%rsp)
movl 0x7f8(%rsp), %r8d
leaq 0x360(%rsp), %rdi
leaq 0x3e0(%rsp), %rsi
leaq 0x7c0(%rsp), %rdx
leaq 0x7a0(%rsp), %rcx
callq 0x940c50
vmovaps %ymm0, 0x340(%rsp)
movl 0x6dc(%rsp), %ecx
leaq 0x340(%rsp), %rdi
leaq 0x6e0(%rsp), %rsi
leaq 0x6a0(%rsp), %rdx
callq 0x940e30
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm0, 0x1980(%rsp)
vmovaps %ymm0, 0x1960(%rsp)
vmovaps 0x1980(%rsp), %ymm0
vmovaps 0x1960(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x680(%rsp), %ymm0
vmovaps %ymm1, 0x1a00(%rsp)
vmovaps %ymm0, 0x19e0(%rsp)
vmovaps 0x1a00(%rsp), %ymm0
vmovaps 0x19e0(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0x1b40(%rsp)
vmovaps 0x1b40(%rsp), %ymm0
vmovdqa %xmm0, 0x2d0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x2c0(%rsp)
movq 0x1318(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0x1314(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2d0(%rsp), %xmm0
movq %rax, 0x1be8(%rsp)
vmovdqa %xmm0, 0x1bd0(%rsp)
movq 0x1bd0(%rsp), %rcx
movq 0x1be8(%rsp), %rax
movq %rcx, (%rax)
movq 0x1318(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0x1314(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2c0(%rsp), %xmm0
movq %rax, 0x1bc8(%rsp)
vmovdqa %xmm0, 0x1bb0(%rsp)
movq 0x1bb0(%rsp), %rcx
movq 0x1bc8(%rsp), %rax
movq %rcx, (%rax)
jmp 0x9425d3
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm0, 0x1940(%rsp)
vmovaps %ymm0, 0x1920(%rsp)
vmovaps 0x1940(%rsp), %ymm0
vmovaps 0x1920(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm0, 0x1b20(%rsp)
vmovaps 0x1b20(%rsp), %ymm0
vmovdqa %xmm0, 0x290(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x280(%rsp)
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0xa34(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x290(%rsp), %xmm0
movq %rax, 0x1ba8(%rsp)
vmovdqa %xmm0, 0x1b90(%rsp)
movq 0x1b90(%rsp), %rcx
movq 0x1ba8(%rsp), %rax
movq %rcx, (%rax)
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0xa34(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x280(%rsp), %xmm0
movq %rax, 0x1b88(%rsp)
vmovdqa %xmm0, 0x1b70(%rsp)
movq 0x1b70(%rsp), %rcx
movq 0x1b88(%rsp), %rax
movq %rcx, (%rax)
jmp 0x942b91
leaq 0x900(%rsp), %rdi
addq $0x80, %rdi
leaq 0x880(%rsp), %rsi
callq 0x942d00
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x260(%rsp), %ymm1
vmovdqa 0x720(%rsp), %ymm0
vmovdqa %ymm1, 0x1780(%rsp)
vmovdqa %ymm0, 0x1760(%rsp)
vmovdqa 0x1780(%rsp), %ymm0
vmovdqa 0x1760(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x710(%rsp), %xmm0
vmovdqa %ymm1, 0x1c80(%rsp)
vmovdqa %xmm0, 0x1c70(%rsp)
vmovdqa 0x1c80(%rsp), %ymm0
vmovdqa 0x1c70(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x240(%rsp), %ymm1
vmovdqa 0x6e0(%rsp), %ymm0
vmovdqa %ymm1, 0x1740(%rsp)
vmovdqa %ymm0, 0x1720(%rsp)
vmovdqa 0x1740(%rsp), %ymm0
vmovdqa 0x1720(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x220(%rsp)
cmpl $0x0, 0x7fc(%rsp)
je 0x942a6e
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0xa34(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1a58(%rsp)
movq 0x1a58(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1a70(%rsp)
vmovdqa 0x1a70(%rsp), %xmm0
vmovdqa %xmm1, 0x210(%rsp)
vmovdqa %xmm0, 0x200(%rsp)
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
movl 0xa34(%rsp), %edx
imull %edx, %ecx
movl 0xa24(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1a50(%rsp)
movq 0x1a50(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1a60(%rsp)
vmovdqa 0x1a60(%rsp), %xmm0
vmovdqa %xmm1, 0x1f0(%rsp)
vmovdqa %xmm0, 0x1e0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x1e0(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x780(%rsp), %ymm0
vmovaps %ymm1, 0x1440(%rsp)
vmovaps %ymm0, 0x1420(%rsp)
vmovaps 0x1440(%rsp), %ymm0
vmovaps 0x1420(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x780(%rsp), %ymm0
vmovaps %ymm1, 0x1600(%rsp)
vmovaps %ymm0, 0x15e0(%rsp)
vmovaps 0x1600(%rsp), %ymm0
vmovaps 0x15e0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x180(%rsp)
movl 0x7f8(%rsp), %r8d
leaq 0x1a0(%rsp), %rdi
leaq 0x3e0(%rsp), %rsi
leaq 0x7c0(%rsp), %rdx
movq %rdx, (%rsp)
leaq 0x7a0(%rsp), %rcx
movq %rcx, 0x8(%rsp)
callq 0x940c50
movq (%rsp), %rdx
movq 0x8(%rsp), %rcx
vmovaps %ymm0, 0x160(%rsp)
movl 0x7f8(%rsp), %r8d
leaq 0x180(%rsp), %rdi
leaq 0x220(%rsp), %rsi
callq 0x940c50
vmovaps %ymm0, 0x140(%rsp)
movl 0x6dc(%rsp), %ecx
leaq 0x160(%rsp), %rdi
leaq 0x6e0(%rsp), %rsi
movq %rsi, 0x10(%rsp)
leaq 0x6a0(%rsp), %rdx
movq %rdx, 0x18(%rsp)
callq 0x940e30
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %rdx
vmovaps %ymm0, 0x120(%rsp)
movl 0x6dc(%rsp), %ecx
leaq 0x140(%rsp), %rdi
callq 0x940e30
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm1, 0x1900(%rsp)
vmovaps %ymm0, 0x18e0(%rsp)
vmovaps 0x1900(%rsp), %ymm0
vmovaps 0x18e0(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x680(%rsp), %ymm0
vmovaps %ymm1, 0x19c0(%rsp)
vmovaps %ymm0, 0x19a0(%rsp)
vmovaps 0x19c0(%rsp), %ymm0
vmovaps 0x19a0(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0x1b00(%rsp)
vmovaps 0x1b00(%rsp), %ymm0
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xd0(%rsp), %xmm0
vmovdqa %xmm0, 0xa0(%rsp)
movq 0x1318(%rsp), %rax
movl 0xa28(%rsp), %ecx
imull 0x1314(%rsp), %ecx
addl 0xa24(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xb0(%rsp), %xmm0
movq %rax, 0x1c68(%rsp)
vmovdqa %xmm0, 0x1c50(%rsp)
vmovdqa 0x1c50(%rsp), %xmm0
movq 0x1c68(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x1318(%rsp), %rax
movl 0xa28(%rsp), %ecx
imull 0x1314(%rsp), %ecx
addl 0xa24(%rsp), %ecx
addl 0x1314(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xa0(%rsp), %xmm0
movq %rax, 0x1c48(%rsp)
vmovdqa %xmm0, 0x1c30(%rsp)
vmovdqa 0x1c30(%rsp), %xmm0
movq 0x1c48(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x942b8f
vmovaps 0x3e0(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm1, 0x18c0(%rsp)
vmovaps %ymm0, 0x18a0(%rsp)
vmovaps 0x18c0(%rsp), %ymm0
vmovaps 0x18a0(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm0, 0x1ae0(%rsp)
vmovaps 0x1ae0(%rsp), %ymm0
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x90(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
imull 0xa34(%rsp), %ecx
addl 0xa24(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x70(%rsp), %xmm0
movq %rax, 0x1c28(%rsp)
vmovdqa %xmm0, 0x1c10(%rsp)
vmovdqa 0x1c10(%rsp), %xmm0
movq 0x1c28(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0xa38(%rsp), %rax
movl 0xa28(%rsp), %ecx
imull 0xa34(%rsp), %ecx
addl 0xa24(%rsp), %ecx
addl 0xa34(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x60(%rsp), %xmm0
movq %rax, 0x1c08(%rsp)
vmovdqa %xmm0, 0x1bf0(%rsp)
vmovdqa 0x1bf0(%rsp), %xmm0
movq 0x1c08(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x942b91
vmovdqa 0x920(%rsp), %ymm0
vmovdqa %ymm0, 0x900(%rsp)
vmovdqa 0x940(%rsp), %ymm0
vmovdqa %ymm0, 0x920(%rsp)
vmovdqa 0x960(%rsp), %ymm0
vmovdqa %ymm0, 0x940(%rsp)
vmovdqa 0x9a0(%rsp), %ymm0
vmovdqa %ymm0, 0x980(%rsp)
vmovdqa 0x9c0(%rsp), %ymm0
vmovdqa %ymm0, 0x9a0(%rsp)
vmovdqa 0x9e0(%rsp), %ymm0
vmovdqa %ymm0, 0x9c0(%rsp)
movl 0xa28(%rsp), %eax
addl $0x2, %eax
movl %eax, 0xa28(%rsp)
jmp 0x941f6f
jmp 0x942c15
movl 0xa24(%rsp), %eax
addl $0x8, %eax
movl %eax, 0xa24(%rsp)
jmp 0x941848
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/highbd_jnt_convolve_avx2.c |
prepare_coeffs | static inline void prepare_coeffs(const InterpFilterParams *const filter_params,
const int subpel_q4,
__m256i *const coeffs /* [4] */) {
const int16_t *filter = av1_get_interp_filter_subpel_kernel(
filter_params, subpel_q4 & SUBPEL_MASK);
const __m128i coeff_8 = _mm_loadu_si128((__m128i *)filter);
const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
// coeffs 0 1 0 1 0 1 0 1
coeffs[0] = _mm256_shuffle_epi32(coeff, 0x00);
// coeffs 2 3 2 3 2 3 2 3
coeffs[1] = _mm256_shuffle_epi32(coeff, 0x55);
// coeffs 4 5 4 5 4 5 4 5
coeffs[2] = _mm256_shuffle_epi32(coeff, 0xaa);
// coeffs 6 7 6 7 6 7 6 7
coeffs[3] = _mm256_shuffle_epi32(coeff, 0xff);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x80, %rsp
movq %rdi, 0x50(%rsp)
movl %esi, 0x4c(%rsp)
movq %rdx, 0x40(%rsp)
movq 0x50(%rsp), %rdi
movl 0x4c(%rsp), %esi
andl $0xf, %esi
callq 0x946020
movq %rax, 0x38(%rsp)
movq 0x38(%rsp), %rax
movq %rax, 0x58(%rsp)
movq 0x58(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
vmovdqa 0x20(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
vbroadcastf128 0x60(%rsp), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, (%rsp)
vmovaps (%rsp), %ymm0
vpshufd $0x0, %ymm0, %ymm0 # ymm0 = ymm0[0,0,0,0,4,4,4,4]
movq 0x40(%rsp), %rax
vmovaps %ymm0, (%rax)
vmovaps (%rsp), %ymm0
vpshufd $0x55, %ymm0, %ymm0 # ymm0 = ymm0[1,1,1,1,5,5,5,5]
movq 0x40(%rsp), %rax
vmovaps %ymm0, 0x20(%rax)
vmovaps (%rsp), %ymm0
vpshufd $0xaa, %ymm0, %ymm0 # ymm0 = ymm0[2,2,2,2,6,6,6,6]
movq 0x40(%rsp), %rax
vmovaps %ymm0, 0x40(%rax)
vmovaps (%rsp), %ymm0
vpshufd $0xff, %ymm0, %ymm0 # ymm0 = ymm0[3,3,3,3,7,7,7,7]
movq 0x40(%rsp), %rax
vmovdqa %ymm0, 0x60(%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/convolve_avx2.h |
convolve | static inline __m256i convolve(const __m256i *const s,
const __m256i *const coeffs) {
const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
const __m256i res_1 = _mm256_madd_epi16(s[1], coeffs[1]);
const __m256i res_2 = _mm256_madd_epi16(s[2], coeffs[2]);
const __m256i res_3 = _mm256_madd_epi16(s[3], coeffs[3]);
const __m256i res = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
_mm256_add_epi32(res_2, res_3));
return res;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x2a0, %rsp # imm = 0x2A0
movq %rdi, 0xb8(%rsp)
movq %rsi, 0xb0(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa (%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa (%rax), %ymm0
vmovdqa %ymm1, 0x260(%rsp)
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x260(%rsp), %ymm0
vmovdqa 0x240(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa 0x20(%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
vmovdqa %ymm1, 0x220(%rsp)
vmovdqa %ymm0, 0x200(%rsp)
vmovdqa 0x220(%rsp), %ymm0
vmovdqa 0x200(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa 0x40(%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa 0x40(%rax), %ymm0
vmovdqa %ymm1, 0x1e0(%rsp)
vmovdqa %ymm0, 0x1c0(%rsp)
vmovdqa 0x1e0(%rsp), %ymm0
vmovdqa 0x1c0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
movq 0xb8(%rsp), %rax
vmovdqa 0x60(%rax), %ymm1
movq 0xb0(%rsp), %rax
vmovdqa 0x60(%rax), %ymm0
vmovdqa %ymm1, 0x1a0(%rsp)
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x1a0(%rsp), %ymm0
vmovdqa 0x180(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x80(%rsp), %ymm1
vmovdqa 0x60(%rsp), %ymm0
vmovdqa %ymm1, 0x160(%rsp)
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x160(%rsp), %ymm0
vmovdqa 0x140(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x40(%rsp), %ymm2
vmovdqa 0x20(%rsp), %ymm0
vmovdqa %ymm2, 0x120(%rsp)
vmovdqa %ymm0, 0x100(%rsp)
vmovdqa 0x120(%rsp), %ymm0
vmovdqa 0x100(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0xe0(%rsp)
vmovdqa %ymm0, 0xc0(%rsp)
vmovdqa 0xe0(%rsp), %ymm0
vmovdqa 0xc0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
vmovdqa (%rsp), %ymm0
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/convolve_avx2.h |
av1_highbd_dist_wtd_convolve_x_avx2 | void av1_highbd_dist_wtd_convolve_x_avx2(
const uint16_t *src, int src_stride, uint16_t *dst0, int dst_stride0, int w,
int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn,
ConvolveParams *conv_params, int bd) {
CONV_BUF_TYPE *dst = conv_params->dst;
int dst_stride = conv_params->dst_stride;
const int fo_horiz = filter_params_x->taps / 2 - 1;
const uint16_t *const src_ptr = src - fo_horiz;
const int bits = FILTER_BITS - conv_params->round_1;
int i, j;
__m256i s[4], coeffs_x[4];
const int do_average = conv_params->do_average;
const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
const int w0 = conv_params->fwd_offset;
const int w1 = conv_params->bck_offset;
const __m256i wt0 = _mm256_set1_epi32(w0);
const __m256i wt1 = _mm256_set1_epi32(w1);
const __m256i zero = _mm256_setzero_si256();
const __m256i round_const_x =
_mm256_set1_epi32(((1 << conv_params->round_0) >> 1));
const __m128i round_shift_x = _mm_cvtsi32_si128(conv_params->round_0);
const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
const int offset_0 =
bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
const __m256i offset_const = _mm256_set1_epi32(offset);
const int rounding_shift =
2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const __m256i rounding_const = _mm256_set1_epi32((1 << rounding_shift) >> 1);
const __m256i clip_pixel_to_bd =
_mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
assert(bits >= 0);
prepare_coeffs(filter_params_x, subpel_x_qn, coeffs_x);
for (j = 0; j < w; j += 8) {
/* Horizontal filter */
for (i = 0; i < h; i += 2) {
const __m256i row0 =
_mm256_loadu_si256((__m256i *)&src_ptr[i * src_stride + j]);
__m256i row1 =
_mm256_loadu_si256((__m256i *)&src_ptr[(i + 1) * src_stride + j]);
const __m256i r0 = _mm256_permute2x128_si256(row0, row1, 0x20);
const __m256i r1 = _mm256_permute2x128_si256(row0, row1, 0x31);
// even pixels
s[0] = _mm256_alignr_epi8(r1, r0, 0);
s[1] = _mm256_alignr_epi8(r1, r0, 4);
s[2] = _mm256_alignr_epi8(r1, r0, 8);
s[3] = _mm256_alignr_epi8(r1, r0, 12);
__m256i res_even = convolve(s, coeffs_x);
res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x),
round_shift_x);
// odd pixels
s[0] = _mm256_alignr_epi8(r1, r0, 2);
s[1] = _mm256_alignr_epi8(r1, r0, 6);
s[2] = _mm256_alignr_epi8(r1, r0, 10);
s[3] = _mm256_alignr_epi8(r1, r0, 14);
__m256i res_odd = convolve(s, coeffs_x);
res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x),
round_shift_x);
res_even = _mm256_sll_epi32(res_even, round_shift_bits);
res_odd = _mm256_sll_epi32(res_odd, round_shift_bits);
__m256i res1 = _mm256_unpacklo_epi32(res_even, res_odd);
__m256i res_unsigned_lo = _mm256_add_epi32(res1, offset_const);
if (w - j < 8) {
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadl_epi64((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadl_epi64(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0 = _mm256_unpacklo_epi16(data_01, zero);
const __m256i comp_avg_res = highbd_comp_avg(
&data_ref_0, &res_unsigned_lo, &wt0, &wt1, use_dist_wtd_comp_avg);
const __m256i round_result = highbd_convolve_rounding(
&comp_avg_res, &offset_const, &rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result, round_result);
const __m256i res_clip = _mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_storel_epi64(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
__m256i res_16b =
_mm256_packus_epi32(res_unsigned_lo, res_unsigned_lo);
const __m128i res_0 = _mm256_castsi256_si128(res_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_16b, 1);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
} else {
__m256i res2 = _mm256_unpackhi_epi32(res_even, res_odd);
__m256i res_unsigned_hi = _mm256_add_epi32(res2, offset_const);
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0_lo = _mm256_unpacklo_epi16(data_01, zero);
const __m256i data_ref_0_hi = _mm256_unpackhi_epi16(data_01, zero);
const __m256i comp_avg_res_lo =
highbd_comp_avg(&data_ref_0_lo, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i comp_avg_res_hi =
highbd_comp_avg(&data_ref_0_hi, &res_unsigned_hi, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result_lo = highbd_convolve_rounding(
&comp_avg_res_lo, &offset_const, &rounding_const, rounding_shift);
const __m256i round_result_hi = highbd_convolve_rounding(
&comp_avg_res_hi, &offset_const, &rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result_lo, round_result_hi);
const __m256i res_clip = _mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]),
res_1);
} else {
__m256i res_16b =
_mm256_packus_epi32(res_unsigned_lo, res_unsigned_hi);
const __m128i res_0 = _mm256_castsi256_si128(res_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_16b, 1);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
}
}
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x1080, %rsp # imm = 0x1080
movl 0x28(%rbp), %eax
movq 0x20(%rbp), %rax
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x768(%rsp)
movl %esi, 0x764(%rsp)
movq %rdx, 0x758(%rsp)
movl %ecx, 0x754(%rsp)
movl %r8d, 0x750(%rsp)
movl %r9d, 0x74c(%rsp)
movq 0x20(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, 0x740(%rsp)
movq 0x20(%rbp), %rax
movl 0x10(%rax), %eax
movl %eax, 0x73c(%rsp)
movq 0x10(%rbp), %rax
movzwl 0x8(%rax), %eax
shrl %eax
decl %eax
movl %eax, 0x738(%rsp)
movq 0x768(%rsp), %rax
movslq 0x738(%rsp), %rcx
addq %rcx, %rcx
subq %rcx, %rax
movq %rax, 0x730(%rsp)
movq 0x20(%rbp), %rax
movl 0x18(%rax), %ecx
movl $0x7, %eax
subl %ecx, %eax
movl %eax, 0x72c(%rsp)
movq 0x20(%rbp), %rax
movl (%rax), %eax
movl %eax, 0x61c(%rsp)
movq 0x20(%rbp), %rax
movl 0x24(%rax), %eax
movl %eax, 0x618(%rsp)
movq 0x20(%rbp), %rax
movl 0x28(%rax), %eax
movl %eax, 0x614(%rsp)
movq 0x20(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, 0x610(%rsp)
movl 0x614(%rsp), %eax
movl %eax, 0x7bc(%rsp)
movl 0x7bc(%rsp), %eax
movl %eax, 0x44(%rsp)
movl %eax, 0xf1c(%rsp)
movl %eax, 0xf18(%rsp)
movl %eax, 0xf14(%rsp)
movl %eax, 0xf10(%rsp)
movl %eax, 0xf0c(%rsp)
movl %eax, 0xf08(%rsp)
movl %eax, 0xf04(%rsp)
movl %eax, 0xf00(%rsp)
movl 0xf04(%rsp), %r8d
movl 0xf08(%rsp), %edi
movl 0xf0c(%rsp), %esi
movl 0xf14(%rsp), %edx
movl 0xf18(%rsp), %ecx
movl 0xf1c(%rsp), %eax
vmovd 0xf00(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0xf10(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0xef0(%rsp)
vmovdqa %xmm0, 0xee0(%rsp)
vmovaps 0xee0(%rsp), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
movl 0x610(%rsp), %eax
movl %eax, 0x7b8(%rsp)
movl 0x7b8(%rsp), %eax
movl %eax, 0x48(%rsp)
movl %eax, 0xf5c(%rsp)
movl %eax, 0xf58(%rsp)
movl %eax, 0xf54(%rsp)
movl %eax, 0xf50(%rsp)
movl %eax, 0xf4c(%rsp)
movl %eax, 0xf48(%rsp)
movl %eax, 0xf44(%rsp)
movl %eax, 0xf40(%rsp)
movl 0xf44(%rsp), %r8d
movl 0xf48(%rsp), %edi
movl 0xf4c(%rsp), %esi
movl 0xf54(%rsp), %edx
movl 0xf58(%rsp), %ecx
movl 0xf5c(%rsp), %eax
vmovd 0xf40(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0xf50(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0xf30(%rsp)
vmovdqa %xmm0, 0xf20(%rsp)
vmovaps 0xf20(%rsp), %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x7c0(%rsp)
vmovaps 0x7c0(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
movq 0x20(%rbp), %rax
movb 0x14(%rax), %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movl %edx, %ecx
sarl %ecx
movl %ecx, 0x7b4(%rsp)
movl 0x7b4(%rsp), %ecx
movl %ecx, 0x4c(%rsp)
movl %ecx, 0xf9c(%rsp)
movl %ecx, 0xf98(%rsp)
movl %ecx, 0xf94(%rsp)
movl %ecx, 0xf90(%rsp)
movl %ecx, 0xf8c(%rsp)
movl %ecx, 0xf88(%rsp)
movl %ecx, 0xf84(%rsp)
movl %ecx, 0xf80(%rsp)
movl 0xf84(%rsp), %r9d
movl 0xf88(%rsp), %r8d
movl 0xf8c(%rsp), %edi
movl 0xf94(%rsp), %esi
movl 0xf98(%rsp), %edx
movl 0xf9c(%rsp), %ecx
vmovd 0xf80(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0xf90(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0xf70(%rsp)
vmovdqa %xmm0, 0xf60(%rsp)
vmovaps 0xf60(%rsp), %ymm0
vmovaps %ymm0, 0x580(%rsp)
movq 0x20(%rbp), %rcx
movl 0x14(%rcx), %ecx
movl %ecx, 0x7a8(%rsp)
vmovd 0x7a8(%rsp), %xmm0
vmovdqa %xmm0, 0x790(%rsp)
vmovdqa 0x790(%rsp), %xmm0
vmovdqa %xmm0, 0x570(%rsp)
movl 0x72c(%rsp), %ecx
movl %ecx, 0x78c(%rsp)
vmovd 0x78c(%rsp), %xmm0
vmovdqa %xmm0, 0x770(%rsp)
vmovdqa 0x770(%rsp), %xmm0
vmovdqa %xmm0, 0x560(%rsp)
movl 0x28(%rbp), %ecx
movq 0x20(%rbp), %rdx
movl 0x14(%rdx), %esi
movl 0x18(%rdx), %edx
subl %esi, %ecx
subl %edx, %ecx
addl $0xe, %ecx
movl %ecx, 0x55c(%rsp)
movb 0x55c(%rsp), %cl
movb %cl, 0x53(%rsp)
movl %eax, %edx
shll %cl, %edx
movb 0x53(%rsp), %cl
movl %edx, 0x54(%rsp)
decb %cl
movl %eax, %edx
shll %cl, %edx
movl 0x54(%rsp), %ecx
addl %edx, %ecx
movl %ecx, 0x558(%rsp)
movl 0x558(%rsp), %ecx
movl %ecx, 0x7b0(%rsp)
movl 0x7b0(%rsp), %ecx
movl %ecx, 0x58(%rsp)
movl %ecx, 0xfdc(%rsp)
movl %ecx, 0xfd8(%rsp)
movl %ecx, 0xfd4(%rsp)
movl %ecx, 0xfd0(%rsp)
movl %ecx, 0xfcc(%rsp)
movl %ecx, 0xfc8(%rsp)
movl %ecx, 0xfc4(%rsp)
movl %ecx, 0xfc0(%rsp)
movl 0xfc4(%rsp), %r9d
movl 0xfc8(%rsp), %r8d
movl 0xfcc(%rsp), %edi
movl 0xfd4(%rsp), %esi
movl 0xfd8(%rsp), %edx
movl 0xfdc(%rsp), %ecx
vmovd 0xfc0(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0xfd0(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0xfb0(%rsp)
vmovdqa %xmm0, 0xfa0(%rsp)
vmovaps 0xfa0(%rsp), %ymm0
vmovaps %ymm0, 0x520(%rsp)
movq 0x20(%rbp), %rcx
movl 0x14(%rcx), %edx
movl 0x18(%rcx), %ecx
addl %ecx, %edx
movl $0xe, %ecx
subl %edx, %ecx
movl %ecx, 0x51c(%rsp)
movb 0x51c(%rsp), %cl
shll %cl, %eax
sarl %eax
movl %eax, 0x7ac(%rsp)
movl 0x7ac(%rsp), %eax
movl %eax, 0x5c(%rsp)
movl %eax, 0x101c(%rsp)
movl %eax, 0x1018(%rsp)
movl %eax, 0x1014(%rsp)
movl %eax, 0x1010(%rsp)
movl %eax, 0x100c(%rsp)
movl %eax, 0x1008(%rsp)
movl %eax, 0x1004(%rsp)
movl %eax, 0x1000(%rsp)
movl 0x1004(%rsp), %edx
movl 0x1008(%rsp), %ecx
movl 0x100c(%rsp), %eax
movl 0x1014(%rsp), %r8d
movl 0x1018(%rsp), %edi
movl 0x101c(%rsp), %esi
vmovd 0x1010(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0x1000(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xfe0(%rsp)
vmovdqa 0xfe0(%rsp), %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
cmpl $0xa, 0x28(%rbp)
jne 0x94350b
movl $0x3ff, %eax # imm = 0x3FF
movl %eax, 0x40(%rsp)
jmp 0x943522
movl 0x28(%rbp), %edx
movl $0xff, %eax
movl $0xfff, %ecx # imm = 0xFFF
cmpl $0xc, %edx
cmovel %ecx, %eax
movl %eax, 0x40(%rsp)
movl 0x40(%rsp), %eax
movw %ax, 0x7ee(%rsp)
movw 0x7ee(%rsp), %ax
movw %ax, 0x3e(%rsp)
movw %ax, 0x106e(%rsp)
movw %ax, 0x106c(%rsp)
movw %ax, 0x106a(%rsp)
movw %ax, 0x1068(%rsp)
movw %ax, 0x1066(%rsp)
movw %ax, 0x1064(%rsp)
movw %ax, 0x1062(%rsp)
movw %ax, 0x1060(%rsp)
movw %ax, 0x105e(%rsp)
movw %ax, 0x105c(%rsp)
movw %ax, 0x105a(%rsp)
movw %ax, 0x1058(%rsp)
movw %ax, 0x1056(%rsp)
movw %ax, 0x1054(%rsp)
movw %ax, 0x1052(%rsp)
movw %ax, 0x1050(%rsp)
movzwl 0x1060(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1062(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1064(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1066(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1068(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x106a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x106c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x106e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm1
movzwl 0x1050(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1052(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1054(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1056(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1058(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x105a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x105c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x105e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1020(%rsp)
vmovdqa 0x1020(%rsp), %ymm0
vmovdqa %ymm0, 0x4c0(%rsp)
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
leaq 0x620(%rsp), %rdx
vzeroupper
callq 0x942c40
movl $0x0, 0x724(%rsp)
movl 0x724(%rsp), %eax
cmpl 0x750(%rsp), %eax
jge 0x9444f8
movl $0x0, 0x728(%rsp)
movl 0x728(%rsp), %eax
cmpl 0x74c(%rsp), %eax
jge 0x9444e0
movq 0x730(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x764(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x7f8(%rsp)
movq 0x7f8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
movq 0x730(%rsp), %rax
movl 0x728(%rsp), %ecx
incl %ecx
movl 0x764(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x7f0(%rsp)
movq 0x7f0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovaps 0x480(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovaps 0x480(%rsp), %ymm1
vperm2i128 $0x31, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2,3],ymm1[2,3]
vmovaps %ymm0, 0x440(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0x4, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
vmovaps %ymm0, 0x6c0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0x8, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0xc, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11],ymm1[28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27]
vmovaps %ymm0, 0x700(%rsp)
leaq 0x6a0(%rsp), %rdi
movq %rdi, 0x28(%rsp)
leaq 0x620(%rsp), %rsi
movq %rsi, 0x30(%rsp)
callq 0x942d00
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x420(%rsp), %ymm1
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm1, 0x9a0(%rsp)
vmovaps %ymm0, 0x980(%rsp)
vmovaps 0x9a0(%rsp), %ymm0
vmovaps 0x980(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x570(%rsp), %xmm0
vmovaps %ymm1, 0xdc0(%rsp)
vmovdqa %xmm0, 0xdb0(%rsp)
vmovaps 0xdc0(%rsp), %ymm0
vmovdqa 0xdb0(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0x2, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1],ymm1[18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17]
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0x6, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5],ymm1[22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21]
vmovaps %ymm0, 0x6c0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0xa, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9],ymm1[26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25]
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpalignr $0xe, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13],ymm1[30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29]
vmovaps %ymm0, 0x700(%rsp)
callq 0x942d00
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm1, 0x960(%rsp)
vmovaps %ymm0, 0x940(%rsp)
vmovaps 0x960(%rsp), %ymm0
vmovaps 0x940(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x570(%rsp), %xmm0
vmovaps %ymm1, 0xd80(%rsp)
vmovdqa %xmm0, 0xd70(%rsp)
vmovaps 0xd80(%rsp), %ymm0
vmovdqa 0xd70(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x420(%rsp), %ymm1
vmovdqa 0x560(%rsp), %xmm0
vmovaps %ymm1, 0xe40(%rsp)
vmovdqa %xmm0, 0xe30(%rsp)
vmovaps 0xe40(%rsp), %ymm0
vmovdqa 0xe30(%rsp), %xmm1
vpslld %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovdqa 0x560(%rsp), %xmm0
vmovaps %ymm1, 0xe00(%rsp)
vmovdqa %xmm0, 0xdf0(%rsp)
vmovaps 0xe00(%rsp), %ymm0
vmovdqa 0xdf0(%rsp), %xmm1
vpslld %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x420(%rsp), %ymm1
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm1, 0xe80(%rsp)
vmovaps %ymm0, 0xe60(%rsp)
vmovaps 0xe80(%rsp), %ymm0
vmovaps 0xe60(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovdqa %ymm0, 0x3e0(%rsp)
vmovdqa 0x3e0(%rsp), %ymm1
vmovdqa 0x520(%rsp), %ymm0
vmovdqa %ymm1, 0x920(%rsp)
vmovdqa %ymm0, 0x900(%rsp)
vmovdqa 0x920(%rsp), %ymm0
vmovdqa 0x900(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x3c0(%rsp)
movl 0x750(%rsp), %eax
subl 0x724(%rsp), %eax
cmpl $0x8, %eax
jge 0x943f63
cmpl $0x0, 0x61c(%rsp)
je 0x943e3d
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x73c(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xbd8(%rsp)
movq 0xbd8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0xbc0(%rsp)
vmovdqa 0xbc0(%rsp), %xmm0
vmovdqa %xmm0, 0xb90(%rsp)
vmovdqa 0xb90(%rsp), %xmm0
vmovdqa %xmm1, 0x3b0(%rsp)
vmovdqa %xmm0, 0x3a0(%rsp)
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x73c(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xbb8(%rsp)
movq 0xbb8(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0xba0(%rsp)
vmovdqa 0xba0(%rsp), %xmm0
vmovdqa %xmm0, 0xb80(%rsp)
vmovdqa 0xb80(%rsp), %xmm0
vmovdqa %xmm1, 0x390(%rsp)
vmovdqa %xmm0, 0x380(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps 0x380(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x360(%rsp), %ymm1
vmovaps 0x5a0(%rsp), %ymm0
vmovaps %ymm1, 0x860(%rsp)
vmovaps %ymm0, 0x840(%rsp)
vmovaps 0x860(%rsp), %ymm0
vmovaps 0x840(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x340(%rsp)
movl 0x618(%rsp), %r8d
leaq 0x340(%rsp), %rdi
leaq 0x3c0(%rsp), %rsi
leaq 0x5e0(%rsp), %rdx
leaq 0x5c0(%rsp), %rcx
callq 0x940c50
vmovaps %ymm0, 0x320(%rsp)
movl 0x51c(%rsp), %ecx
leaq 0x320(%rsp), %rdi
leaq 0x520(%rsp), %rsi
leaq 0x4e0(%rsp), %rdx
callq 0x940e30
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm0, 0xaa0(%rsp)
vmovaps %ymm0, 0xa80(%rsp)
vmovaps 0xaa0(%rsp), %ymm0
vmovaps 0xa80(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm1, 0xb20(%rsp)
vmovaps %ymm0, 0xb00(%rsp)
vmovaps 0xb20(%rsp), %ymm0
vmovaps 0xb00(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0xc40(%rsp)
vmovaps 0xc40(%rsp), %ymm0
vmovdqa %xmm0, 0x2b0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x2a0(%rsp)
movq 0x758(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x754(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2b0(%rsp), %xmm0
movq %rax, 0xce8(%rsp)
vmovdqa %xmm0, 0xcd0(%rsp)
movq 0xcd0(%rsp), %rcx
movq 0xce8(%rsp), %rax
movq %rcx, (%rax)
movq 0x758(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x754(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2a0(%rsp), %xmm0
movq %rax, 0xcc8(%rsp)
vmovdqa %xmm0, 0xcb0(%rsp)
movq 0xcb0(%rsp), %rcx
movq 0xcc8(%rsp), %rax
movq %rcx, (%rax)
jmp 0x943f5e
vmovaps 0x3c0(%rsp), %ymm0
vmovaps %ymm0, 0xa60(%rsp)
vmovaps %ymm0, 0xa40(%rsp)
vmovaps 0xa60(%rsp), %ymm0
vmovaps 0xa40(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm0, 0xc20(%rsp)
vmovaps 0xc20(%rsp), %ymm0
vmovdqa %xmm0, 0x270(%rsp)
vmovaps 0x280(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x260(%rsp)
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x73c(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x270(%rsp), %xmm0
movq %rax, 0xca8(%rsp)
vmovdqa %xmm0, 0xc90(%rsp)
movq 0xc90(%rsp), %rcx
movq 0xca8(%rsp), %rax
movq %rcx, (%rax)
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x73c(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x260(%rsp), %xmm0
movq %rax, 0xc88(%rsp)
vmovdqa %xmm0, 0xc70(%rsp)
movq 0xc70(%rsp), %rcx
movq 0xc88(%rsp), %rax
movq %rcx, (%rax)
jmp 0x9444c8
vmovaps 0x420(%rsp), %ymm1
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm1, 0xec0(%rsp)
vmovaps %ymm0, 0xea0(%rsp)
vmovaps 0xec0(%rsp), %ymm0
vmovaps 0xea0(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x240(%rsp), %ymm1
vmovdqa 0x520(%rsp), %ymm0
vmovdqa %ymm1, 0x8e0(%rsp)
vmovdqa %ymm0, 0x8c0(%rsp)
vmovdqa 0x8e0(%rsp), %ymm0
vmovdqa 0x8c0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x220(%rsp)
cmpl $0x0, 0x61c(%rsp)
je 0x9443a5
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x73c(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xb58(%rsp)
movq 0xb58(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xb70(%rsp)
vmovdqa 0xb70(%rsp), %xmm0
vmovdqa %xmm1, 0x210(%rsp)
vmovdqa %xmm0, 0x200(%rsp)
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
movl 0x73c(%rsp), %edx
imull %edx, %ecx
movl 0x724(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xb50(%rsp)
movq 0xb50(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xb60(%rsp)
vmovdqa 0xb60(%rsp), %xmm0
vmovdqa %xmm1, 0x1f0(%rsp)
vmovdqa %xmm0, 0x1e0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x1e0(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x5a0(%rsp), %ymm0
vmovaps %ymm1, 0x820(%rsp)
vmovaps %ymm0, 0x800(%rsp)
vmovaps 0x820(%rsp), %ymm0
vmovaps 0x800(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x5a0(%rsp), %ymm0
vmovaps %ymm1, 0x8a0(%rsp)
vmovaps %ymm0, 0x880(%rsp)
vmovaps 0x8a0(%rsp), %ymm0
vmovaps 0x880(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x180(%rsp)
movl 0x618(%rsp), %r8d
leaq 0x1a0(%rsp), %rdi
leaq 0x3c0(%rsp), %rsi
leaq 0x5e0(%rsp), %rdx
movq %rdx, 0x8(%rsp)
leaq 0x5c0(%rsp), %rcx
movq %rcx, 0x10(%rsp)
callq 0x940c50
movq 0x8(%rsp), %rdx
movq 0x10(%rsp), %rcx
vmovaps %ymm0, 0x160(%rsp)
movl 0x618(%rsp), %r8d
leaq 0x180(%rsp), %rdi
leaq 0x220(%rsp), %rsi
callq 0x940c50
vmovaps %ymm0, 0x140(%rsp)
movl 0x51c(%rsp), %ecx
leaq 0x160(%rsp), %rdi
leaq 0x520(%rsp), %rsi
movq %rsi, 0x18(%rsp)
leaq 0x4e0(%rsp), %rdx
movq %rdx, 0x20(%rsp)
callq 0x940e30
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %rdx
vmovaps %ymm0, 0x120(%rsp)
movl 0x51c(%rsp), %ecx
leaq 0x140(%rsp), %rdi
callq 0x940e30
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm1, 0xa20(%rsp)
vmovaps %ymm0, 0xa00(%rsp)
vmovaps 0xa20(%rsp), %ymm0
vmovaps 0xa00(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm1, 0xae0(%rsp)
vmovaps %ymm0, 0xac0(%rsp)
vmovaps 0xae0(%rsp), %ymm0
vmovaps 0xac0(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0xc00(%rsp)
vmovaps 0xc00(%rsp), %ymm0
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xd0(%rsp), %xmm0
vmovdqa %xmm0, 0xa0(%rsp)
movq 0x758(%rsp), %rax
movl 0x728(%rsp), %ecx
imull 0x754(%rsp), %ecx
addl 0x724(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xb0(%rsp), %xmm0
movq %rax, 0xd68(%rsp)
vmovdqa %xmm0, 0xd50(%rsp)
vmovdqa 0xd50(%rsp), %xmm0
movq 0xd68(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x758(%rsp), %rax
movl 0x728(%rsp), %ecx
imull 0x754(%rsp), %ecx
addl 0x724(%rsp), %ecx
addl 0x754(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xa0(%rsp), %xmm0
movq %rax, 0xd48(%rsp)
vmovdqa %xmm0, 0xd30(%rsp)
vmovdqa 0xd30(%rsp), %xmm0
movq 0xd48(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x9444c6
vmovaps 0x3c0(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm1, 0x9e0(%rsp)
vmovaps %ymm0, 0x9c0(%rsp)
vmovaps 0x9e0(%rsp), %ymm0
vmovaps 0x9c0(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm0, 0xbe0(%rsp)
vmovaps 0xbe0(%rsp), %ymm0
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x90(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
imull 0x73c(%rsp), %ecx
addl 0x724(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x70(%rsp), %xmm0
movq %rax, 0xd28(%rsp)
vmovdqa %xmm0, 0xd10(%rsp)
vmovdqa 0xd10(%rsp), %xmm0
movq 0xd28(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x740(%rsp), %rax
movl 0x728(%rsp), %ecx
imull 0x73c(%rsp), %ecx
addl 0x724(%rsp), %ecx
addl 0x73c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x60(%rsp), %xmm0
movq %rax, 0xd08(%rsp)
vmovdqa %xmm0, 0xcf0(%rsp)
vmovdqa 0xcf0(%rsp), %xmm0
movq 0xd08(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x9444c8
jmp 0x9444ca
movl 0x728(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x728(%rsp)
jmp 0x9436ef
jmp 0x9444e2
movl 0x724(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x724(%rsp)
jmp 0x9436d0
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
| /m-ab-s[P]aom/av1/common/x86/highbd_jnt_convolve_avx2.c |
av1_highbd_dist_wtd_convolve_y_avx2 | void av1_highbd_dist_wtd_convolve_y_avx2(
const uint16_t *src, int src_stride, uint16_t *dst0, int dst_stride0, int w,
int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn,
ConvolveParams *conv_params, int bd) {
CONV_BUF_TYPE *dst = conv_params->dst;
int dst_stride = conv_params->dst_stride;
const int fo_vert = filter_params_y->taps / 2 - 1;
const uint16_t *const src_ptr = src - fo_vert * src_stride;
const int bits = FILTER_BITS - conv_params->round_0;
assert(bits >= 0);
int i, j;
__m256i s[8], coeffs_y[4];
const int do_average = conv_params->do_average;
const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
const int w0 = conv_params->fwd_offset;
const int w1 = conv_params->bck_offset;
const __m256i wt0 = _mm256_set1_epi32(w0);
const __m256i wt1 = _mm256_set1_epi32(w1);
const __m256i round_const_y =
_mm256_set1_epi32(((1 << conv_params->round_1) >> 1));
const __m128i round_shift_y = _mm_cvtsi32_si128(conv_params->round_1);
const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
const int offset_0 =
bd + 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const int offset = (1 << offset_0) + (1 << (offset_0 - 1));
const __m256i offset_const = _mm256_set1_epi32(offset);
const int rounding_shift =
2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
const __m256i rounding_const = _mm256_set1_epi32((1 << rounding_shift) >> 1);
const __m256i clip_pixel_to_bd =
_mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
const __m256i zero = _mm256_setzero_si256();
prepare_coeffs(filter_params_y, subpel_y_qn, coeffs_y);
for (j = 0; j < w; j += 8) {
const uint16_t *data = &src_ptr[j];
/* Vertical filter */
{
__m256i src6;
__m256i s01 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 0 * src_stride))),
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
0x20);
__m256i s12 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
0x20);
__m256i s23 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
0x20);
__m256i s34 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
0x20);
__m256i s45 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
0x20);
src6 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 6 * src_stride)));
__m256i s56 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
src6, 0x20);
s[0] = _mm256_unpacklo_epi16(s01, s12);
s[1] = _mm256_unpacklo_epi16(s23, s34);
s[2] = _mm256_unpacklo_epi16(s45, s56);
s[4] = _mm256_unpackhi_epi16(s01, s12);
s[5] = _mm256_unpackhi_epi16(s23, s34);
s[6] = _mm256_unpackhi_epi16(s45, s56);
for (i = 0; i < h; i += 2) {
data = &src_ptr[i * src_stride + j];
const __m256i s67 = _mm256_permute2x128_si256(
src6,
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
0x20);
src6 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 8 * src_stride)));
const __m256i s78 = _mm256_permute2x128_si256(
_mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
src6, 0x20);
s[3] = _mm256_unpacklo_epi16(s67, s78);
s[7] = _mm256_unpackhi_epi16(s67, s78);
const __m256i res_a = convolve(s, coeffs_y);
__m256i res_a_round = _mm256_sll_epi32(res_a, round_shift_bits);
res_a_round = _mm256_sra_epi32(
_mm256_add_epi32(res_a_round, round_const_y), round_shift_y);
__m256i res_unsigned_lo = _mm256_add_epi32(res_a_round, offset_const);
if (w - j < 8) {
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadl_epi64((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadl_epi64(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0 = _mm256_unpacklo_epi16(data_01, zero);
const __m256i comp_avg_res =
highbd_comp_avg(&data_ref_0, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result = highbd_convolve_rounding(
&comp_avg_res, &offset_const, &rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result, round_result);
const __m256i res_clip =
_mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_storel_epi64(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
__m256i res_16b =
_mm256_packus_epi32(res_unsigned_lo, res_unsigned_lo);
const __m128i res_0 = _mm256_castsi256_si128(res_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_16b, 1);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_storel_epi64((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
} else {
const __m256i res_b = convolve(s + 4, coeffs_y);
__m256i res_b_round = _mm256_sll_epi32(res_b, round_shift_bits);
res_b_round = _mm256_sra_epi32(
_mm256_add_epi32(res_b_round, round_const_y), round_shift_y);
__m256i res_unsigned_hi = _mm256_add_epi32(res_b_round, offset_const);
if (do_average) {
const __m256i data_0 = _mm256_castsi128_si256(
_mm_loadu_si128((__m128i *)(&dst[i * dst_stride + j])));
const __m256i data_1 = _mm256_castsi128_si256(_mm_loadu_si128(
(__m128i *)(&dst[i * dst_stride + j + dst_stride])));
const __m256i data_01 =
_mm256_permute2x128_si256(data_0, data_1, 0x20);
const __m256i data_ref_0_lo = _mm256_unpacklo_epi16(data_01, zero);
const __m256i data_ref_0_hi = _mm256_unpackhi_epi16(data_01, zero);
const __m256i comp_avg_res_lo =
highbd_comp_avg(&data_ref_0_lo, &res_unsigned_lo, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i comp_avg_res_hi =
highbd_comp_avg(&data_ref_0_hi, &res_unsigned_hi, &wt0, &wt1,
use_dist_wtd_comp_avg);
const __m256i round_result_lo =
highbd_convolve_rounding(&comp_avg_res_lo, &offset_const,
&rounding_const, rounding_shift);
const __m256i round_result_hi =
highbd_convolve_rounding(&comp_avg_res_hi, &offset_const,
&rounding_const, rounding_shift);
const __m256i res_16b =
_mm256_packus_epi32(round_result_lo, round_result_hi);
const __m256i res_clip =
_mm256_min_epi16(res_16b, clip_pixel_to_bd);
const __m128i res_0 = _mm256_castsi256_si128(res_clip);
const __m128i res_1 = _mm256_extracti128_si256(res_clip, 1);
_mm_store_si128((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);
_mm_store_si128(
(__m128i *)(&dst0[i * dst_stride0 + j + dst_stride0]), res_1);
} else {
__m256i res_16b =
_mm256_packus_epi32(res_unsigned_lo, res_unsigned_hi);
const __m128i res_0 = _mm256_castsi256_si128(res_16b);
const __m128i res_1 = _mm256_extracti128_si256(res_16b, 1);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0);
_mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),
res_1);
}
}
s[0] = s[1];
s[1] = s[2];
s[2] = s[3];
s[4] = s[5];
s[5] = s[6];
s[6] = s[7];
}
}
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x14a0, %rsp # imm = 0x14A0
movl 0x28(%rbp), %eax
movq 0x20(%rbp), %rax
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x8a8(%rsp)
movl %esi, 0x8a4(%rsp)
movq %rdx, 0x898(%rsp)
movl %ecx, 0x894(%rsp)
movl %r8d, 0x890(%rsp)
movl %r9d, 0x88c(%rsp)
movq 0x20(%rbp), %rax
movq 0x8(%rax), %rax
movq %rax, 0x880(%rsp)
movq 0x20(%rbp), %rax
movl 0x10(%rax), %eax
movl %eax, 0x87c(%rsp)
movq 0x10(%rbp), %rax
movzwl 0x8(%rax), %eax
shrl %eax
decl %eax
movl %eax, 0x878(%rsp)
movq 0x8a8(%rsp), %rax
movl 0x878(%rsp), %ecx
movl 0x8a4(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
addq %rcx, %rcx
subq %rcx, %rax
movq %rax, 0x870(%rsp)
movq 0x20(%rbp), %rax
movl 0x14(%rax), %ecx
movl $0x7, %eax
subl %ecx, %eax
movl %eax, 0x86c(%rsp)
movq 0x20(%rbp), %rax
movl (%rax), %eax
movl %eax, 0x6dc(%rsp)
movq 0x20(%rbp), %rax
movl 0x24(%rax), %eax
movl %eax, 0x6d8(%rsp)
movq 0x20(%rbp), %rax
movl 0x28(%rax), %eax
movl %eax, 0x6d4(%rsp)
movq 0x20(%rbp), %rax
movl 0x2c(%rax), %eax
movl %eax, 0x6d0(%rsp)
movl 0x6d4(%rsp), %eax
movl %eax, 0x8fc(%rsp)
movl 0x8fc(%rsp), %eax
movl %eax, 0x44(%rsp)
movl %eax, 0x133c(%rsp)
movl %eax, 0x1338(%rsp)
movl %eax, 0x1334(%rsp)
movl %eax, 0x1330(%rsp)
movl %eax, 0x132c(%rsp)
movl %eax, 0x1328(%rsp)
movl %eax, 0x1324(%rsp)
movl %eax, 0x1320(%rsp)
movl 0x1324(%rsp), %r8d
movl 0x1328(%rsp), %edi
movl 0x132c(%rsp), %esi
movl 0x1334(%rsp), %edx
movl 0x1338(%rsp), %ecx
movl 0x133c(%rsp), %eax
vmovd 0x1320(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1330(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1310(%rsp)
vmovdqa %xmm0, 0x1300(%rsp)
vmovaps 0x1300(%rsp), %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
movl 0x6d0(%rsp), %eax
movl %eax, 0x8f8(%rsp)
movl 0x8f8(%rsp), %eax
movl %eax, 0x48(%rsp)
movl %eax, 0x137c(%rsp)
movl %eax, 0x1378(%rsp)
movl %eax, 0x1374(%rsp)
movl %eax, 0x1370(%rsp)
movl %eax, 0x136c(%rsp)
movl %eax, 0x1368(%rsp)
movl %eax, 0x1364(%rsp)
movl %eax, 0x1360(%rsp)
movl 0x1364(%rsp), %r8d
movl 0x1368(%rsp), %edi
movl 0x136c(%rsp), %esi
movl 0x1374(%rsp), %edx
movl 0x1378(%rsp), %ecx
movl 0x137c(%rsp), %eax
vmovd 0x1360(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm0
vmovd 0x1370(%rsp), %xmm1
vpinsrd $0x1, %edx, %xmm1, %xmm1
vpinsrd $0x2, %ecx, %xmm1, %xmm1
vpinsrd $0x3, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1350(%rsp)
vmovdqa %xmm0, 0x1340(%rsp)
vmovaps 0x1340(%rsp), %ymm0
vmovaps %ymm0, 0x680(%rsp)
movq 0x20(%rbp), %rax
movb 0x18(%rax), %cl
movl $0x1, %eax
movl %eax, %edx
shll %cl, %edx
movl %edx, %ecx
sarl %ecx
movl %ecx, 0x8f4(%rsp)
movl 0x8f4(%rsp), %ecx
movl %ecx, 0x4c(%rsp)
movl %ecx, 0x13bc(%rsp)
movl %ecx, 0x13b8(%rsp)
movl %ecx, 0x13b4(%rsp)
movl %ecx, 0x13b0(%rsp)
movl %ecx, 0x13ac(%rsp)
movl %ecx, 0x13a8(%rsp)
movl %ecx, 0x13a4(%rsp)
movl %ecx, 0x13a0(%rsp)
movl 0x13a4(%rsp), %r9d
movl 0x13a8(%rsp), %r8d
movl 0x13ac(%rsp), %edi
movl 0x13b4(%rsp), %esi
movl 0x13b8(%rsp), %edx
movl 0x13bc(%rsp), %ecx
vmovd 0x13a0(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x13b0(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x1390(%rsp)
vmovdqa %xmm0, 0x1380(%rsp)
vmovaps 0x1380(%rsp), %ymm0
vmovaps %ymm0, 0x660(%rsp)
movq 0x20(%rbp), %rcx
movl 0x18(%rcx), %ecx
movl %ecx, 0x8e8(%rsp)
vmovd 0x8e8(%rsp), %xmm0
vmovdqa %xmm0, 0x8d0(%rsp)
vmovdqa 0x8d0(%rsp), %xmm0
vmovdqa %xmm0, 0x650(%rsp)
movl 0x86c(%rsp), %ecx
movl %ecx, 0x8cc(%rsp)
vmovd 0x8cc(%rsp), %xmm0
vmovdqa %xmm0, 0x8b0(%rsp)
vmovdqa 0x8b0(%rsp), %xmm0
vmovdqa %xmm0, 0x640(%rsp)
movl 0x28(%rbp), %ecx
movq 0x20(%rbp), %rdx
movl 0x14(%rdx), %esi
movl 0x18(%rdx), %edx
subl %esi, %ecx
subl %edx, %ecx
addl $0xe, %ecx
movl %ecx, 0x63c(%rsp)
movb 0x63c(%rsp), %cl
movb %cl, 0x53(%rsp)
movl %eax, %edx
shll %cl, %edx
movb 0x53(%rsp), %cl
movl %edx, 0x54(%rsp)
decb %cl
movl %eax, %edx
shll %cl, %edx
movl 0x54(%rsp), %ecx
addl %edx, %ecx
movl %ecx, 0x638(%rsp)
movl 0x638(%rsp), %ecx
movl %ecx, 0x8f0(%rsp)
movl 0x8f0(%rsp), %ecx
movl %ecx, 0x58(%rsp)
movl %ecx, 0x13fc(%rsp)
movl %ecx, 0x13f8(%rsp)
movl %ecx, 0x13f4(%rsp)
movl %ecx, 0x13f0(%rsp)
movl %ecx, 0x13ec(%rsp)
movl %ecx, 0x13e8(%rsp)
movl %ecx, 0x13e4(%rsp)
movl %ecx, 0x13e0(%rsp)
movl 0x13e4(%rsp), %r9d
movl 0x13e8(%rsp), %r8d
movl 0x13ec(%rsp), %edi
movl 0x13f4(%rsp), %esi
movl 0x13f8(%rsp), %edx
movl 0x13fc(%rsp), %ecx
vmovd 0x13e0(%rsp), %xmm0
vpinsrd $0x1, %r9d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %edi, %xmm0, %xmm0
vmovd 0x13f0(%rsp), %xmm1
vpinsrd $0x1, %esi, %xmm1, %xmm1
vpinsrd $0x2, %edx, %xmm1, %xmm1
vpinsrd $0x3, %ecx, %xmm1, %xmm1
vmovdqa %xmm1, 0x13d0(%rsp)
vmovdqa %xmm0, 0x13c0(%rsp)
vmovaps 0x13c0(%rsp), %ymm0
vmovaps %ymm0, 0x600(%rsp)
movq 0x20(%rbp), %rcx
movl 0x14(%rcx), %edx
movl 0x18(%rcx), %ecx
addl %ecx, %edx
movl $0xe, %ecx
subl %edx, %ecx
movl %ecx, 0x5fc(%rsp)
movb 0x5fc(%rsp), %cl
shll %cl, %eax
sarl %eax
movl %eax, 0x8ec(%rsp)
movl 0x8ec(%rsp), %eax
movl %eax, 0x5c(%rsp)
movl %eax, 0x143c(%rsp)
movl %eax, 0x1438(%rsp)
movl %eax, 0x1434(%rsp)
movl %eax, 0x1430(%rsp)
movl %eax, 0x142c(%rsp)
movl %eax, 0x1428(%rsp)
movl %eax, 0x1424(%rsp)
movl %eax, 0x1420(%rsp)
movl 0x1424(%rsp), %edx
movl 0x1428(%rsp), %ecx
movl 0x142c(%rsp), %eax
movl 0x1434(%rsp), %r8d
movl 0x1438(%rsp), %edi
movl 0x143c(%rsp), %esi
vmovd 0x1430(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0x1420(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1400(%rsp)
vmovdqa 0x1400(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
cmpl $0xa, 0x28(%rbp)
jne 0x944b08
movl $0x3ff, %eax # imm = 0x3FF
movl %eax, 0x40(%rsp)
jmp 0x944b1f
movl 0x28(%rbp), %edx
movl $0xff, %eax
movl $0xfff, %ecx # imm = 0xFFF
cmpl $0xc, %edx
cmovel %ecx, %eax
movl %eax, 0x40(%rsp)
movl 0x40(%rsp), %eax
movw %ax, 0x93e(%rsp)
movw 0x93e(%rsp), %ax
movw %ax, 0x3e(%rsp)
movw %ax, 0x148e(%rsp)
movw %ax, 0x148c(%rsp)
movw %ax, 0x148a(%rsp)
movw %ax, 0x1488(%rsp)
movw %ax, 0x1486(%rsp)
movw %ax, 0x1484(%rsp)
movw %ax, 0x1482(%rsp)
movw %ax, 0x1480(%rsp)
movw %ax, 0x147e(%rsp)
movw %ax, 0x147c(%rsp)
movw %ax, 0x147a(%rsp)
movw %ax, 0x1478(%rsp)
movw %ax, 0x1476(%rsp)
movw %ax, 0x1474(%rsp)
movw %ax, 0x1472(%rsp)
movw %ax, 0x1470(%rsp)
movzwl 0x1470(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1472(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1474(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1476(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1478(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x147a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x147c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x147e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0x1480(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0x1482(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0x1484(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0x1486(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0x1488(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0x148a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0x148c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0x148e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0x1450(%rsp)
vmovdqa %xmm0, 0x1440(%rsp)
vmovaps 0x1440(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x900(%rsp)
vmovdqa 0x900(%rsp), %ymm0
vmovdqa %ymm0, 0x580(%rsp)
movq 0x10(%rbp), %rdi
movl 0x18(%rbp), %esi
leaq 0x6e0(%rsp), %rdx
vzeroupper
callq 0x942c40
movl $0x0, 0x864(%rsp)
movl 0x864(%rsp), %eax
cmpl 0x890(%rsp), %eax
jge 0x94600a
movq 0x870(%rsp), %rax
movslq 0x864(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x578(%rsp)
movq 0x578(%rsp), %rax
movq %rax, 0xf08(%rsp)
movq 0xf08(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1030(%rsp)
vmovdqa 0x1030(%rsp), %xmm0
movq 0x578(%rsp), %rax
movslq 0x8a4(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xf00(%rsp)
movq 0xf00(%rsp), %rax
vmovdqu (%rax), %xmm2
vmovdqa %xmm2, 0x1020(%rsp)
vmovdqa 0x1020(%rsp), %xmm2
vmovdqa %xmm2, 0x530(%rsp)
vmovdqa %xmm0, 0x520(%rsp)
movq 0x578(%rsp), %rax
movslq 0x8a4(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xef8(%rsp)
movq 0xef8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x1010(%rsp)
vmovdqa 0x1010(%rsp), %xmm0
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xef0(%rsp)
movq 0xef0(%rsp), %rax
vmovdqu (%rax), %xmm2
vmovdqa %xmm2, 0x1000(%rsp)
vmovdqa 0x1000(%rsp), %xmm2
vmovdqa %xmm2, 0x510(%rsp)
vmovdqa %xmm0, 0x500(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %ecx
addl %ecx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xee8(%rsp)
movq 0xee8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xff0(%rsp)
vmovdqa 0xff0(%rsp), %xmm0
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xee0(%rsp)
movq 0xee0(%rsp), %rax
vmovdqu (%rax), %xmm2
vmovdqa %xmm2, 0xfe0(%rsp)
vmovdqa 0xfe0(%rsp), %xmm2
vmovdqa %xmm2, 0x4f0(%rsp)
vmovdqa %xmm0, 0x4e0(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xed8(%rsp)
movq 0xed8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xfd0(%rsp)
vmovdqa 0xfd0(%rsp), %xmm0
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xed0(%rsp)
movq 0xed0(%rsp), %rax
vmovdqu (%rax), %xmm2
vmovdqa %xmm2, 0xfc0(%rsp)
vmovdqa 0xfc0(%rsp), %xmm2
vmovdqa %xmm2, 0x4d0(%rsp)
vmovdqa %xmm0, 0x4c0(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %ecx
shll $0x2, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xec8(%rsp)
movq 0xec8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xfb0(%rsp)
vmovdqa 0xfb0(%rsp), %xmm0
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,4), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xec0(%rsp)
movq 0xec0(%rsp), %rax
vmovdqu (%rax), %xmm2
vmovdqa %xmm2, 0xfa0(%rsp)
vmovdqa 0xfa0(%rsp), %xmm2
vmovdqa %xmm2, 0x4b0(%rsp)
vmovdqa %xmm0, 0x4a0(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
addl %edx, %edx
movl %edx, %ecx
leal (%rcx,%rcx,2), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xeb8(%rsp)
movq 0xeb8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf90(%rsp)
vmovdqa 0xf90(%rsp), %xmm0
vmovdqa %xmm1, 0x550(%rsp)
vmovdqa %xmm0, 0x540(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
movl %edx, %ecx
leal (%rcx,%rcx,4), %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xeb0(%rsp)
movq 0xeb0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf80(%rsp)
vmovdqa 0xf80(%rsp), %xmm0
vmovdqa 0x540(%rsp), %xmm1
vmovdqa %xmm1, 0x490(%rsp)
vmovdqa %xmm0, 0x480(%rsp)
vmovaps 0x520(%rsp), %ymm1
vmovaps 0x500(%rsp), %ymm0
vmovaps %ymm1, 0xaa0(%rsp)
vmovaps %ymm0, 0xa80(%rsp)
vmovaps 0xaa0(%rsp), %ymm0
vmovaps 0xa80(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x760(%rsp)
vmovaps 0x4e0(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm1, 0xa60(%rsp)
vmovaps %ymm0, 0xa40(%rsp)
vmovaps 0xa60(%rsp), %ymm0
vmovaps 0xa40(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x780(%rsp)
vmovaps 0x4a0(%rsp), %ymm1
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm1, 0xa20(%rsp)
vmovaps %ymm0, 0xa00(%rsp)
vmovaps 0xa20(%rsp), %ymm0
vmovaps 0xa00(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x7a0(%rsp)
vmovaps 0x520(%rsp), %ymm1
vmovaps 0x500(%rsp), %ymm0
vmovaps %ymm1, 0xbe0(%rsp)
vmovaps %ymm0, 0xbc0(%rsp)
vmovaps 0xbe0(%rsp), %ymm0
vmovaps 0xbc0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x7e0(%rsp)
vmovaps 0x4e0(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm1, 0xba0(%rsp)
vmovaps %ymm0, 0xb80(%rsp)
vmovaps 0xba0(%rsp), %ymm0
vmovaps 0xb80(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x800(%rsp)
vmovaps 0x4a0(%rsp), %ymm1
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm1, 0xb60(%rsp)
vmovaps %ymm0, 0xb40(%rsp)
vmovaps 0xb60(%rsp), %ymm0
vmovaps 0xb40(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x820(%rsp)
movl $0x0, 0x868(%rsp)
movl 0x868(%rsp), %eax
cmpl 0x88c(%rsp), %eax
jge 0x945ff2
movq 0x870(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x8a4(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x578(%rsp)
vmovaps 0x540(%rsp), %ymm0
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
movl %edx, %ecx
shll $0x3, %ecx
subl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xea8(%rsp)
movq 0xea8(%rsp), %rax
vmovdqu (%rax), %xmm1
vmovdqa %xmm1, 0xf70(%rsp)
vmovdqa 0xf70(%rsp), %xmm2
vinserti128 $0x1, %xmm2, %ymm0, %ymm0
vmovaps %ymm0, 0x460(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %ecx
shll $0x3, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xea0(%rsp)
movq 0xea0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf60(%rsp)
vmovdqa 0xf60(%rsp), %xmm0
vmovdqa %xmm1, 0x550(%rsp)
vmovdqa %xmm0, 0x540(%rsp)
movq 0x578(%rsp), %rax
movl 0x8a4(%rsp), %edx
movl %edx, %ecx
shll $0x3, %ecx
subl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xe98(%rsp)
movq 0xe98(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf50(%rsp)
vmovdqa 0xf50(%rsp), %xmm0
vmovdqa 0x540(%rsp), %xmm1
vmovdqa %xmm1, 0x450(%rsp)
vmovdqa %xmm0, 0x440(%rsp)
vmovaps 0x460(%rsp), %ymm1
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm1, 0x9e0(%rsp)
vmovaps %ymm0, 0x9c0(%rsp)
vmovaps 0x9e0(%rsp), %ymm0
vmovaps 0x9c0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x7c0(%rsp)
vmovaps 0x460(%rsp), %ymm1
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm1, 0xb20(%rsp)
vmovaps %ymm0, 0xb00(%rsp)
vmovaps 0xb20(%rsp), %ymm0
vmovaps 0xb00(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovdqa %ymm0, 0x840(%rsp)
leaq 0x760(%rsp), %rdi
leaq 0x6e0(%rsp), %rsi
callq 0x942d00
vmovdqa %ymm0, 0x420(%rsp)
vmovdqa 0x420(%rsp), %ymm1
vmovdqa 0x640(%rsp), %xmm0
vmovdqa %ymm1, 0x12e0(%rsp)
vmovdqa %xmm0, 0x12d0(%rsp)
vmovdqa 0x12e0(%rsp), %ymm0
vmovdqa 0x12d0(%rsp), %xmm1
vpslld %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x400(%rsp)
vmovdqa 0x400(%rsp), %ymm1
vmovdqa 0x660(%rsp), %ymm0
vmovdqa %ymm1, 0xce0(%rsp)
vmovdqa %ymm0, 0xcc0(%rsp)
vmovdqa 0xce0(%rsp), %ymm0
vmovdqa 0xcc0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x650(%rsp), %xmm0
vmovdqa %ymm1, 0x1260(%rsp)
vmovdqa %xmm0, 0x1250(%rsp)
vmovdqa 0x1260(%rsp), %ymm0
vmovdqa 0x1250(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x400(%rsp)
vmovdqa 0x400(%rsp), %ymm1
vmovdqa 0x600(%rsp), %ymm0
vmovdqa %ymm1, 0xca0(%rsp)
vmovdqa %ymm0, 0xc80(%rsp)
vmovdqa 0xca0(%rsp), %ymm0
vmovdqa 0xc80(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x3e0(%rsp)
movl 0x890(%rsp), %eax
subl 0x864(%rsp), %eax
cmpl $0x8, %eax
jge 0x945972
cmpl $0x0, 0x6dc(%rsp)
je 0x94584c
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x87c(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1078(%rsp)
movq 0x1078(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x1060(%rsp)
vmovdqa 0x1060(%rsp), %xmm0
vmovdqa %xmm0, 0xf40(%rsp)
vmovdqa 0xf40(%rsp), %xmm0
vmovdqa %xmm1, 0x3d0(%rsp)
vmovdqa %xmm0, 0x3c0(%rsp)
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x87c(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1058(%rsp)
movq 0x1058(%rsp), %rax
vmovq (%rax), %xmm0
vmovdqa %xmm0, 0x1040(%rsp)
vmovdqa 0x1040(%rsp), %xmm0
vmovdqa %xmm0, 0xf30(%rsp)
vmovdqa 0xf30(%rsp), %xmm0
vmovdqa %xmm1, 0x3b0(%rsp)
vmovdqa %xmm0, 0x3a0(%rsp)
vmovaps 0x3c0(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x380(%rsp)
vmovaps 0x380(%rsp), %ymm1
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm1, 0x9a0(%rsp)
vmovaps %ymm0, 0x980(%rsp)
vmovaps 0x9a0(%rsp), %ymm0
vmovaps 0x980(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x360(%rsp)
movl 0x6d8(%rsp), %r8d
leaq 0x360(%rsp), %rdi
leaq 0x3e0(%rsp), %rsi
leaq 0x6a0(%rsp), %rdx
leaq 0x680(%rsp), %rcx
callq 0x940c50
vmovaps %ymm0, 0x340(%rsp)
movl 0x5fc(%rsp), %ecx
leaq 0x340(%rsp), %rdi
leaq 0x600(%rsp), %rsi
leaq 0x5c0(%rsp), %rdx
callq 0x940e30
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm0, 0xde0(%rsp)
vmovaps %ymm0, 0xdc0(%rsp)
vmovaps 0xde0(%rsp), %ymm0
vmovaps 0xdc0(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x5a0(%rsp), %ymm0
vmovaps %ymm1, 0xe60(%rsp)
vmovaps %ymm0, 0xe40(%rsp)
vmovaps 0xe60(%rsp), %ymm0
vmovaps 0xe40(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0x10e0(%rsp)
vmovaps 0x10e0(%rsp), %ymm0
vmovdqa %xmm0, 0x2d0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x2c0(%rsp)
movq 0x898(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x894(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2d0(%rsp), %xmm0
movq %rax, 0x1188(%rsp)
vmovdqa %xmm0, 0x1170(%rsp)
movq 0x1170(%rsp), %rcx
movq 0x1188(%rsp), %rax
movq %rcx, (%rax)
movq 0x898(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x894(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x2c0(%rsp), %xmm0
movq %rax, 0x1168(%rsp)
vmovdqa %xmm0, 0x1150(%rsp)
movq 0x1150(%rsp), %rcx
movq 0x1168(%rsp), %rax
movq %rcx, (%rax)
jmp 0x94596d
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm0, 0xda0(%rsp)
vmovaps %ymm0, 0xd80(%rsp)
vmovaps 0xda0(%rsp), %ymm0
vmovaps 0xd80(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm0, 0x10c0(%rsp)
vmovaps 0x10c0(%rsp), %ymm0
vmovdqa %xmm0, 0x290(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vextracti128 $0x1, %ymm0, 0x280(%rsp)
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x87c(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x290(%rsp), %xmm0
movq %rax, 0x1148(%rsp)
vmovdqa %xmm0, 0x1130(%rsp)
movq 0x1130(%rsp), %rcx
movq 0x1148(%rsp), %rax
movq %rcx, (%rax)
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x87c(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
vmovdqa 0x280(%rsp), %xmm0
movq %rax, 0x1128(%rsp)
vmovdqa %xmm0, 0x1110(%rsp)
movq 0x1110(%rsp), %rcx
movq 0x1128(%rsp), %rax
movq %rcx, (%rax)
jmp 0x945f70
leaq 0x760(%rsp), %rdi
addq $0x80, %rdi
leaq 0x6e0(%rsp), %rsi
callq 0x942d00
vmovdqa %ymm0, 0x260(%rsp)
vmovdqa 0x260(%rsp), %ymm1
vmovdqa 0x640(%rsp), %xmm0
vmovdqa %ymm1, 0x12a0(%rsp)
vmovdqa %xmm0, 0x1290(%rsp)
vmovdqa 0x12a0(%rsp), %ymm0
vmovdqa 0x1290(%rsp), %xmm1
vpslld %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x240(%rsp), %ymm1
vmovdqa 0x660(%rsp), %ymm0
vmovdqa %ymm1, 0xc60(%rsp)
vmovdqa %ymm0, 0xc40(%rsp)
vmovdqa 0xc60(%rsp), %ymm0
vmovdqa 0xc40(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x650(%rsp), %xmm0
vmovdqa %ymm1, 0x1220(%rsp)
vmovdqa %xmm0, 0x1210(%rsp)
vmovdqa 0x1220(%rsp), %ymm0
vmovdqa 0x1210(%rsp), %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x240(%rsp)
vmovdqa 0x240(%rsp), %ymm1
vmovdqa 0x600(%rsp), %ymm0
vmovdqa %ymm1, 0xc20(%rsp)
vmovdqa %ymm0, 0xc00(%rsp)
vmovdqa 0xc20(%rsp), %ymm0
vmovdqa 0xc00(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x220(%rsp)
cmpl $0x0, 0x6dc(%rsp)
je 0x945e4d
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x87c(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xe90(%rsp)
movq 0xe90(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf20(%rsp)
vmovdqa 0xf20(%rsp), %xmm0
vmovdqa %xmm1, 0x210(%rsp)
vmovdqa %xmm0, 0x200(%rsp)
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
movl 0x87c(%rsp), %edx
imull %edx, %ecx
movl 0x864(%rsp), %esi
addl %esi, %ecx
addl %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0xe88(%rsp)
movq 0xe88(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0xf10(%rsp)
vmovdqa 0xf10(%rsp), %xmm0
vmovdqa %xmm1, 0x1f0(%rsp)
vmovdqa %xmm0, 0x1e0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps 0x1e0(%rsp), %ymm1
vperm2i128 $0x20, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0,1],ymm1[0,1]
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm1, 0x960(%rsp)
vmovaps %ymm0, 0x940(%rsp)
vmovaps 0x960(%rsp), %ymm0
vmovaps 0x940(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x580(%rsp), %ymm0
vmovaps %ymm1, 0xae0(%rsp)
vmovaps %ymm0, 0xac0(%rsp)
vmovaps 0xae0(%rsp), %ymm0
vmovaps 0xac0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x180(%rsp)
movl 0x6d8(%rsp), %r8d
leaq 0x1a0(%rsp), %rdi
leaq 0x3e0(%rsp), %rsi
leaq 0x6a0(%rsp), %rdx
movq %rdx, 0x18(%rsp)
leaq 0x680(%rsp), %rcx
movq %rcx, 0x20(%rsp)
callq 0x940c50
movq 0x18(%rsp), %rdx
movq 0x20(%rsp), %rcx
vmovaps %ymm0, 0x160(%rsp)
movl 0x6d8(%rsp), %r8d
leaq 0x180(%rsp), %rdi
leaq 0x220(%rsp), %rsi
callq 0x940c50
vmovaps %ymm0, 0x140(%rsp)
movl 0x5fc(%rsp), %ecx
leaq 0x160(%rsp), %rdi
leaq 0x600(%rsp), %rsi
movq %rsi, 0x28(%rsp)
leaq 0x5c0(%rsp), %rdx
movq %rdx, 0x30(%rsp)
callq 0x940e30
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
vmovaps %ymm0, 0x120(%rsp)
movl 0x5fc(%rsp), %ecx
leaq 0x140(%rsp), %rdi
callq 0x940e30
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm1, 0xd60(%rsp)
vmovaps %ymm0, 0xd40(%rsp)
vmovaps 0xd60(%rsp), %ymm0
vmovaps 0xd40(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x5a0(%rsp), %ymm0
vmovaps %ymm1, 0xe20(%rsp)
vmovaps %ymm0, 0xe00(%rsp)
vmovaps 0xe20(%rsp), %ymm0
vmovaps 0xe00(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0x10a0(%rsp)
vmovaps 0x10a0(%rsp), %ymm0
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xd0(%rsp), %xmm0
vmovdqa %xmm0, 0xa0(%rsp)
movq 0x898(%rsp), %rax
movl 0x868(%rsp), %ecx
imull 0x894(%rsp), %ecx
addl 0x864(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xb0(%rsp), %xmm0
movq %rax, 0x1208(%rsp)
vmovdqa %xmm0, 0x11f0(%rsp)
vmovdqa 0x11f0(%rsp), %xmm0
movq 0x1208(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x898(%rsp), %rax
movl 0x868(%rsp), %ecx
imull 0x894(%rsp), %ecx
addl 0x864(%rsp), %ecx
addl 0x894(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0xa0(%rsp), %xmm0
movq %rax, 0x11e8(%rsp)
vmovdqa %xmm0, 0x11d0(%rsp)
vmovdqa 0x11d0(%rsp), %xmm0
movq 0x11e8(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x945f6e
vmovaps 0x3e0(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm1, 0xd20(%rsp)
vmovaps %ymm0, 0xd00(%rsp)
vmovaps 0xd20(%rsp), %ymm0
vmovaps 0xd00(%rsp), %ymm1
vpackusdw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm0, 0x1080(%rsp)
vmovaps 0x1080(%rsp), %ymm0
vmovdqa %xmm0, 0x70(%rsp)
vmovdqa 0x90(%rsp), %xmm0
vmovdqa %xmm0, 0x60(%rsp)
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
imull 0x87c(%rsp), %ecx
addl 0x864(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x70(%rsp), %xmm0
movq %rax, 0x11c8(%rsp)
vmovdqa %xmm0, 0x11b0(%rsp)
vmovdqa 0x11b0(%rsp), %xmm0
movq 0x11c8(%rsp), %rax
vmovdqa %xmm0, (%rax)
movq 0x880(%rsp), %rax
movl 0x868(%rsp), %ecx
imull 0x87c(%rsp), %ecx
addl 0x864(%rsp), %ecx
addl 0x87c(%rsp), %ecx
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x60(%rsp), %xmm0
movq %rax, 0x11a8(%rsp)
vmovdqa %xmm0, 0x1190(%rsp)
vmovdqa 0x1190(%rsp), %xmm0
movq 0x11a8(%rsp), %rax
vmovdqa %xmm0, (%rax)
jmp 0x945f70
vmovdqa 0x780(%rsp), %ymm0
vmovdqa %ymm0, 0x760(%rsp)
vmovdqa 0x7a0(%rsp), %ymm0
vmovdqa %ymm0, 0x780(%rsp)
vmovdqa 0x7c0(%rsp), %ymm0
vmovdqa %ymm0, 0x7a0(%rsp)
vmovdqa 0x800(%rsp), %ymm0
vmovdqa %ymm0, 0x7e0(%rsp)
vmovdqa 0x820(%rsp), %ymm0
vmovdqa %ymm0, 0x800(%rsp)
vmovdqa 0x840(%rsp), %ymm0
vmovdqa %ymm0, 0x820(%rsp)
movl 0x868(%rsp), %eax
addl $0x2, %eax
movl %eax, 0x868(%rsp)
jmp 0x945222
jmp 0x945ff4
movl 0x864(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x864(%rsp)
jmp 0x944ceb
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/highbd_jnt_convolve_avx2.c |
av1_highbd_wiener_convolve_add_src_avx2 | void av1_highbd_wiener_convolve_add_src_avx2(
const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8,
ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4, int w, int h,
const WienerConvolveParams *conv_params, int bd) {
assert(x_step_q4 == 16 && y_step_q4 == 16);
assert(!(w & 7));
assert(bd + FILTER_BITS - conv_params->round_0 + 2 <= 16);
(void)x_step_q4;
(void)y_step_q4;
const uint16_t *const src = CONVERT_TO_SHORTPTR(src8);
uint16_t *const dst = CONVERT_TO_SHORTPTR(dst8);
DECLARE_ALIGNED(32, uint16_t,
temp[(MAX_SB_SIZE + SUBPEL_TAPS - 1) * MAX_SB_SIZE]);
int intermediate_height = h + SUBPEL_TAPS - 1;
const int center_tap = ((SUBPEL_TAPS - 1) / 2);
const uint16_t *const src_ptr = src - center_tap * src_stride - center_tap;
const __m128i zero_128 = _mm_setzero_si128();
const __m256i zero_256 = _mm256_setzero_si256();
// Add an offset to account for the "add_src" part of the convolve function.
const __m128i offset = _mm_insert_epi16(zero_128, 1 << FILTER_BITS, 3);
const __m256i clamp_low = zero_256;
/* Horizontal filter */
{
const __m256i clamp_high_ep =
_mm256_set1_epi16(WIENER_CLAMP_LIMIT(conv_params->round_0, bd) - 1);
// coeffs [ f7 f6 f5 f4 f3 f2 f1 f0 ]
const __m128i coeffs_x = _mm_add_epi16(xx_loadu_128(filter_x), offset);
// coeffs [ f3 f2 f3 f2 f1 f0 f1 f0 ]
const __m128i coeffs_0123 = _mm_unpacklo_epi32(coeffs_x, coeffs_x);
// coeffs [ f7 f6 f7 f6 f5 f4 f5 f4 ]
const __m128i coeffs_4567 = _mm_unpackhi_epi32(coeffs_x, coeffs_x);
// coeffs [ f1 f0 f1 f0 f1 f0 f1 f0 ]
const __m128i coeffs_01_128 = _mm_unpacklo_epi64(coeffs_0123, coeffs_0123);
// coeffs [ f3 f2 f3 f2 f3 f2 f3 f2 ]
const __m128i coeffs_23_128 = _mm_unpackhi_epi64(coeffs_0123, coeffs_0123);
// coeffs [ f5 f4 f5 f4 f5 f4 f5 f4 ]
const __m128i coeffs_45_128 = _mm_unpacklo_epi64(coeffs_4567, coeffs_4567);
// coeffs [ f7 f6 f7 f6 f7 f6 f7 f6 ]
const __m128i coeffs_67_128 = _mm_unpackhi_epi64(coeffs_4567, coeffs_4567);
// coeffs [ f1 f0 f1 f0 f1 f0 f1 f0 ][ f1 f0 f1 f0 f1 f0 f1 f0 ]
const __m256i coeffs_01 = yy_set_m128i(coeffs_01_128, coeffs_01_128);
// coeffs [ f3 f2 f3 f2 f3 f2 f3 f2 ][ f3 f2 f3 f2 f3 f2 f3 f2 ]
const __m256i coeffs_23 = yy_set_m128i(coeffs_23_128, coeffs_23_128);
// coeffs [ f5 f4 f5 f4 f5 f4 f5 f4 ][ f5 f4 f5 f4 f5 f4 f5 f4 ]
const __m256i coeffs_45 = yy_set_m128i(coeffs_45_128, coeffs_45_128);
// coeffs [ f7 f6 f7 f6 f7 f6 f7 f6 ][ f7 f6 f7 f6 f7 f6 f7 f6 ]
const __m256i coeffs_67 = yy_set_m128i(coeffs_67_128, coeffs_67_128);
const __m256i round_const = _mm256_set1_epi32(
(1 << (conv_params->round_0 - 1)) + (1 << (bd + FILTER_BITS - 1)));
for (int i = 0; i < intermediate_height; ++i) {
for (int j = 0; j < w; j += 16) {
const uint16_t *src_ij = src_ptr + i * src_stride + j;
// Load 16-bit src data
const __m256i src_0 = yy_loadu_256(src_ij + 0);
const __m256i src_1 = yy_loadu_256(src_ij + 1);
const __m256i src_2 = yy_loadu_256(src_ij + 2);
const __m256i src_3 = yy_loadu_256(src_ij + 3);
const __m256i src_4 = yy_loadu_256(src_ij + 4);
const __m256i src_5 = yy_loadu_256(src_ij + 5);
const __m256i src_6 = yy_loadu_256(src_ij + 6);
const __m256i src_7 = yy_loadu_256(src_ij + 7);
// Multiply src data by filter coeffs and sum pairs
const __m256i res_0 = _mm256_madd_epi16(src_0, coeffs_01);
const __m256i res_1 = _mm256_madd_epi16(src_1, coeffs_01);
const __m256i res_2 = _mm256_madd_epi16(src_2, coeffs_23);
const __m256i res_3 = _mm256_madd_epi16(src_3, coeffs_23);
const __m256i res_4 = _mm256_madd_epi16(src_4, coeffs_45);
const __m256i res_5 = _mm256_madd_epi16(src_5, coeffs_45);
const __m256i res_6 = _mm256_madd_epi16(src_6, coeffs_67);
const __m256i res_7 = _mm256_madd_epi16(src_7, coeffs_67);
// Calculate scalar product for even- and odd-indices separately,
// increasing to 32-bit precision
const __m256i res_even_sum = _mm256_add_epi32(
_mm256_add_epi32(res_0, res_4), _mm256_add_epi32(res_2, res_6));
const __m256i res_even = _mm256_srai_epi32(
_mm256_add_epi32(res_even_sum, round_const), conv_params->round_0);
const __m256i res_odd_sum = _mm256_add_epi32(
_mm256_add_epi32(res_1, res_5), _mm256_add_epi32(res_3, res_7));
const __m256i res_odd = _mm256_srai_epi32(
_mm256_add_epi32(res_odd_sum, round_const), conv_params->round_0);
// Reduce to 16-bit precision and pack even- and odd-index results
// back into one register. The _mm256_packs_epi32 intrinsic returns
// a register with the pixels ordered as follows:
// [ 15 13 11 9 14 12 10 8 ] [ 7 5 3 1 6 4 2 0 ]
const __m256i res = _mm256_packs_epi32(res_even, res_odd);
const __m256i res_clamped =
_mm256_min_epi16(_mm256_max_epi16(res, clamp_low), clamp_high_ep);
// Store in a temporary array
yy_storeu_256(temp + i * MAX_SB_SIZE + j, res_clamped);
}
}
}
/* Vertical filter */
{
const __m256i clamp_high = _mm256_set1_epi16((1 << bd) - 1);
// coeffs [ f7 f6 f5 f4 f3 f2 f1 f0 ]
const __m128i coeffs_y = _mm_add_epi16(xx_loadu_128(filter_y), offset);
// coeffs [ f3 f2 f3 f2 f1 f0 f1 f0 ]
const __m128i coeffs_0123 = _mm_unpacklo_epi32(coeffs_y, coeffs_y);
// coeffs [ f7 f6 f7 f6 f5 f4 f5 f4 ]
const __m128i coeffs_4567 = _mm_unpackhi_epi32(coeffs_y, coeffs_y);
// coeffs [ f1 f0 f1 f0 f1 f0 f1 f0 ]
const __m128i coeffs_01_128 = _mm_unpacklo_epi64(coeffs_0123, coeffs_0123);
// coeffs [ f3 f2 f3 f2 f3 f2 f3 f2 ]
const __m128i coeffs_23_128 = _mm_unpackhi_epi64(coeffs_0123, coeffs_0123);
// coeffs [ f5 f4 f5 f4 f5 f4 f5 f4 ]
const __m128i coeffs_45_128 = _mm_unpacklo_epi64(coeffs_4567, coeffs_4567);
// coeffs [ f7 f6 f7 f6 f7 f6 f7 f6 ]
const __m128i coeffs_67_128 = _mm_unpackhi_epi64(coeffs_4567, coeffs_4567);
// coeffs [ f1 f0 f1 f0 f1 f0 f1 f0 ][ f1 f0 f1 f0 f1 f0 f1 f0 ]
const __m256i coeffs_01 = yy_set_m128i(coeffs_01_128, coeffs_01_128);
// coeffs [ f3 f2 f3 f2 f3 f2 f3 f2 ][ f3 f2 f3 f2 f3 f2 f3 f2 ]
const __m256i coeffs_23 = yy_set_m128i(coeffs_23_128, coeffs_23_128);
// coeffs [ f5 f4 f5 f4 f5 f4 f5 f4 ][ f5 f4 f5 f4 f5 f4 f5 f4 ]
const __m256i coeffs_45 = yy_set_m128i(coeffs_45_128, coeffs_45_128);
// coeffs [ f7 f6 f7 f6 f7 f6 f7 f6 ][ f7 f6 f7 f6 f7 f6 f7 f6 ]
const __m256i coeffs_67 = yy_set_m128i(coeffs_67_128, coeffs_67_128);
const __m256i round_const =
_mm256_set1_epi32((1 << (conv_params->round_1 - 1)) -
(1 << (bd + conv_params->round_1 - 1)));
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; j += 16) {
const uint16_t *temp_ij = temp + i * MAX_SB_SIZE + j;
// Load 16-bit data from the output of the horizontal filter in
// which the pixels are ordered as follows:
// [ 15 13 11 9 14 12 10 8 ] [ 7 5 3 1 6 4 2 0 ]
const __m256i data_0 = yy_loadu_256(temp_ij + 0 * MAX_SB_SIZE);
const __m256i data_1 = yy_loadu_256(temp_ij + 1 * MAX_SB_SIZE);
const __m256i data_2 = yy_loadu_256(temp_ij + 2 * MAX_SB_SIZE);
const __m256i data_3 = yy_loadu_256(temp_ij + 3 * MAX_SB_SIZE);
const __m256i data_4 = yy_loadu_256(temp_ij + 4 * MAX_SB_SIZE);
const __m256i data_5 = yy_loadu_256(temp_ij + 5 * MAX_SB_SIZE);
const __m256i data_6 = yy_loadu_256(temp_ij + 6 * MAX_SB_SIZE);
const __m256i data_7 = yy_loadu_256(temp_ij + 7 * MAX_SB_SIZE);
// Filter the even-indices, increasing to 32-bit precision
const __m256i src_0 = _mm256_unpacklo_epi16(data_0, data_1);
const __m256i src_2 = _mm256_unpacklo_epi16(data_2, data_3);
const __m256i src_4 = _mm256_unpacklo_epi16(data_4, data_5);
const __m256i src_6 = _mm256_unpacklo_epi16(data_6, data_7);
const __m256i res_0 = _mm256_madd_epi16(src_0, coeffs_01);
const __m256i res_2 = _mm256_madd_epi16(src_2, coeffs_23);
const __m256i res_4 = _mm256_madd_epi16(src_4, coeffs_45);
const __m256i res_6 = _mm256_madd_epi16(src_6, coeffs_67);
const __m256i res_even = _mm256_add_epi32(
_mm256_add_epi32(res_0, res_2), _mm256_add_epi32(res_4, res_6));
// Filter the odd-indices, increasing to 32-bit precision
const __m256i src_1 = _mm256_unpackhi_epi16(data_0, data_1);
const __m256i src_3 = _mm256_unpackhi_epi16(data_2, data_3);
const __m256i src_5 = _mm256_unpackhi_epi16(data_4, data_5);
const __m256i src_7 = _mm256_unpackhi_epi16(data_6, data_7);
const __m256i res_1 = _mm256_madd_epi16(src_1, coeffs_01);
const __m256i res_3 = _mm256_madd_epi16(src_3, coeffs_23);
const __m256i res_5 = _mm256_madd_epi16(src_5, coeffs_45);
const __m256i res_7 = _mm256_madd_epi16(src_7, coeffs_67);
const __m256i res_odd = _mm256_add_epi32(
_mm256_add_epi32(res_1, res_3), _mm256_add_epi32(res_5, res_7));
// Pixels are currently in the following order:
// res_even order: [ 14 12 10 8 ] [ 6 4 2 0 ]
// res_odd order: [ 15 13 11 9 ] [ 7 5 3 1 ]
//
// Rearrange the pixels into the following order:
// res_lo order: [ 11 10 9 8 ] [ 3 2 1 0 ]
// res_hi order: [ 15 14 13 12 ] [ 7 6 5 4 ]
const __m256i res_lo = _mm256_unpacklo_epi32(res_even, res_odd);
const __m256i res_hi = _mm256_unpackhi_epi32(res_even, res_odd);
const __m256i res_lo_round = _mm256_srai_epi32(
_mm256_add_epi32(res_lo, round_const), conv_params->round_1);
const __m256i res_hi_round = _mm256_srai_epi32(
_mm256_add_epi32(res_hi, round_const), conv_params->round_1);
// Reduce to 16-bit precision and pack into the correct order:
// [ 15 14 13 12 11 10 9 8 ][ 7 6 5 4 3 2 1 0 ]
const __m256i res_16bit =
_mm256_packs_epi32(res_lo_round, res_hi_round);
const __m256i res_16bit_clamped = _mm256_min_epi16(
_mm256_max_epi16(res_16bit, clamp_low), clamp_high);
// Store in the dst array
yy_storeu_256(dst + i * dst_stride + j, res_16bit_clamped);
}
}
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0xa1a0, %rsp # imm = 0xA1A0
movl 0x38(%rbp), %eax
movq 0x30(%rbp), %rax
movl 0x28(%rbp), %eax
movl 0x20(%rbp), %eax
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x9168(%rsp)
movq %rsi, 0x9160(%rsp)
movq %rdx, 0x9158(%rsp)
movq %rcx, 0x9150(%rsp)
movq %r8, 0x9148(%rsp)
movl %r9d, 0x9144(%rsp)
movq 0x9168(%rsp), %rax
addq %rax, %rax
movq %rax, 0x9138(%rsp)
movq 0x9158(%rsp), %rax
addq %rax, %rax
movq %rax, 0x9130(%rsp)
movl 0x28(%rbp), %eax
addl $0x7, %eax
movl %eax, 0xa1c(%rsp)
movl $0x3, 0xa18(%rsp)
movq 0x9138(%rsp), %rax
movq 0x9160(%rsp), %rcx
addq %rcx, %rcx
leaq (%rcx,%rcx,2), %rcx
negq %rcx
leaq -0x6(%rax,%rcx), %rax
movq %rax, 0xa10(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x9170(%rsp)
vmovdqa 0x9170(%rsp), %xmm0
vmovdqa %xmm0, 0xa00(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x9180(%rsp)
vmovaps 0x9180(%rsp), %ymm0
vmovaps %ymm0, 0x9e0(%rsp)
vmovdqa 0xa00(%rsp), %xmm0
movl $0x80, %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
vmovdqa %xmm0, 0x9d0(%rsp)
vmovaps 0x9e0(%rsp), %ymm0
vmovaps %ymm0, 0x9a0(%rsp)
movl 0x38(%rbp), %eax
movq 0x30(%rbp), %rcx
movl (%rcx), %ecx
subl %ecx, %eax
movb %al, %cl
addb $0x8, %cl
movl $0x1, %eax
movl %eax, 0x18(%rsp)
shll %cl, %eax
decl %eax
movw %ax, 0x91ae(%rsp)
movw 0x91ae(%rsp), %ax
movw %ax, 0x16(%rsp)
movw %ax, 0xa0be(%rsp)
movw %ax, 0xa0bc(%rsp)
movw %ax, 0xa0ba(%rsp)
movw %ax, 0xa0b8(%rsp)
movw %ax, 0xa0b6(%rsp)
movw %ax, 0xa0b4(%rsp)
movw %ax, 0xa0b2(%rsp)
movw %ax, 0xa0b0(%rsp)
movw %ax, 0xa0ae(%rsp)
movw %ax, 0xa0ac(%rsp)
movw %ax, 0xa0aa(%rsp)
movw %ax, 0xa0a8(%rsp)
movw %ax, 0xa0a6(%rsp)
movw %ax, 0xa0a4(%rsp)
movw %ax, 0xa0a2(%rsp)
movw %ax, 0xa0a0(%rsp)
movzwl 0xa0a0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0xa0a2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0xa0a4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0xa0a6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0xa0a8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0xa0aa(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0xa0ac(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0xa0ae(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0xa0b0(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0xa0b2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0xa0b4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0xa0b6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0xa0b8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0xa0ba(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0xa0bc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0xa0be(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0xa090(%rsp)
vmovdqa %xmm0, 0xa080(%rsp)
vmovaps 0xa080(%rsp), %ymm0
vmovaps %ymm0, 0x980(%rsp)
movq 0x9148(%rsp), %rdi
vzeroupper
callq 0x947a90
vmovaps %xmm0, %xmm1
vmovdqa 0x9d0(%rsp), %xmm0
vmovdqa %xmm1, 0x91e0(%rsp)
vmovdqa %xmm0, 0x91d0(%rsp)
vmovdqa 0x91e0(%rsp), %xmm0
vmovdqa 0x91d0(%rsp), %xmm1
vpaddw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x970(%rsp)
vmovdqa 0x970(%rsp), %xmm0
vmovdqa %xmm0, 0x9220(%rsp)
vmovdqa %xmm0, 0x9210(%rsp)
vmovdqa 0x9220(%rsp), %xmm0
vmovdqa 0x9210(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x960(%rsp)
vmovdqa 0x970(%rsp), %xmm0
vmovdqa %xmm0, 0x9260(%rsp)
vmovdqa %xmm0, 0x9250(%rsp)
vmovdqa 0x9260(%rsp), %xmm0
vmovdqa 0x9250(%rsp), %xmm1
vpunpckhdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x950(%rsp)
vmovdqa 0x960(%rsp), %xmm0
vmovdqa %xmm0, 0x92e0(%rsp)
vmovdqa %xmm0, 0x92d0(%rsp)
vmovdqa 0x92e0(%rsp), %xmm0
vmovdqa 0x92d0(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x940(%rsp)
vmovdqa 0x960(%rsp), %xmm0
vmovdqa %xmm0, 0x9360(%rsp)
vmovdqa %xmm0, 0x9350(%rsp)
vmovdqa 0x9360(%rsp), %xmm0
vmovdqa 0x9350(%rsp), %xmm1
vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x930(%rsp)
vmovdqa 0x950(%rsp), %xmm0
vmovdqa %xmm0, 0x92c0(%rsp)
vmovdqa %xmm0, 0x92b0(%rsp)
vmovdqa 0x92c0(%rsp), %xmm0
vmovdqa 0x92b0(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x920(%rsp)
vmovdqa 0x950(%rsp), %xmm0
vmovdqa %xmm0, 0x9340(%rsp)
vmovdqa %xmm0, 0x9330(%rsp)
vmovdqa 0x9340(%rsp), %xmm0
vmovdqa 0x9330(%rsp), %xmm1
vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x910(%rsp)
vmovdqa 0x940(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
vmovaps %ymm0, 0x8e0(%rsp)
vmovdqa 0x930(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
vmovaps %ymm0, 0x8c0(%rsp)
vmovdqa 0x920(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
vmovaps %ymm0, 0x8a0(%rsp)
vmovdqa 0x910(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
movl 0x18(%rsp), %edx
vmovaps %ymm0, 0x880(%rsp)
movq 0x30(%rbp), %rax
movb (%rax), %cl
decb %cl
movl %edx, %eax
shll %cl, %eax
movb 0x38(%rbp), %cl
addb $0x6, %cl
shll %cl, %edx
movl %edx, %ecx
addl %ecx, %eax
movl %eax, 0x937c(%rsp)
movl 0x937c(%rsp), %eax
movl %eax, 0x1c(%rsp)
movl %eax, 0xa13c(%rsp)
movl %eax, 0xa138(%rsp)
movl %eax, 0xa134(%rsp)
movl %eax, 0xa130(%rsp)
movl %eax, 0xa12c(%rsp)
movl %eax, 0xa128(%rsp)
movl %eax, 0xa124(%rsp)
movl %eax, 0xa120(%rsp)
movl 0xa124(%rsp), %edx
movl 0xa128(%rsp), %ecx
movl 0xa12c(%rsp), %eax
movl 0xa134(%rsp), %r8d
movl 0xa138(%rsp), %edi
movl 0xa13c(%rsp), %esi
vmovd 0xa130(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0xa120(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xa100(%rsp)
vmovdqa 0xa100(%rsp), %ymm0
vmovdqa %ymm0, 0x860(%rsp)
movl $0x0, 0x85c(%rsp)
movl 0x85c(%rsp), %eax
cmpl 0xa1c(%rsp), %eax
jge 0x946cb4
movl $0x0, 0x858(%rsp)
movl 0x858(%rsp), %eax
cmpl 0x20(%rbp), %eax
jge 0x946c9c
movq 0xa10(%rsp), %rax
movslq 0x85c(%rsp), %rcx
imulq 0x9160(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movslq 0x858(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x850(%rsp)
movq 0x850(%rsp), %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x820(%rsp)
movq 0x850(%rsp), %rdi
addq $0x2, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x800(%rsp)
movq 0x850(%rsp), %rdi
addq $0x4, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x7e0(%rsp)
movq 0x850(%rsp), %rdi
addq $0x6, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x7c0(%rsp)
movq 0x850(%rsp), %rdi
addq $0x8, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x7a0(%rsp)
movq 0x850(%rsp), %rdi
addq $0xa, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x780(%rsp)
movq 0x850(%rsp), %rdi
addq $0xc, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x760(%rsp)
movq 0x850(%rsp), %rdi
addq $0xe, %rdi
callq 0x947ae0
vmovdqa %ymm0, 0x740(%rsp)
vmovdqa 0x820(%rsp), %ymm1
vmovdqa 0x8e0(%rsp), %ymm0
vmovdqa %ymm1, 0x9760(%rsp)
vmovdqa %ymm0, 0x9740(%rsp)
vmovdqa 0x9760(%rsp), %ymm0
vmovdqa 0x9740(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x720(%rsp)
vmovdqa 0x800(%rsp), %ymm1
vmovdqa 0x8e0(%rsp), %ymm0
vmovdqa %ymm1, 0x9720(%rsp)
vmovdqa %ymm0, 0x9700(%rsp)
vmovdqa 0x9720(%rsp), %ymm0
vmovdqa 0x9700(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x700(%rsp)
vmovdqa 0x7e0(%rsp), %ymm1
vmovdqa 0x8c0(%rsp), %ymm0
vmovdqa %ymm1, 0x96e0(%rsp)
vmovdqa %ymm0, 0x96c0(%rsp)
vmovdqa 0x96e0(%rsp), %ymm0
vmovdqa 0x96c0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x6e0(%rsp)
vmovdqa 0x7c0(%rsp), %ymm1
vmovdqa 0x8c0(%rsp), %ymm0
vmovdqa %ymm1, 0x96a0(%rsp)
vmovdqa %ymm0, 0x9680(%rsp)
vmovdqa 0x96a0(%rsp), %ymm0
vmovdqa 0x9680(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x6c0(%rsp)
vmovdqa 0x7a0(%rsp), %ymm1
vmovdqa 0x8a0(%rsp), %ymm0
vmovdqa %ymm1, 0x9660(%rsp)
vmovdqa %ymm0, 0x9640(%rsp)
vmovdqa 0x9660(%rsp), %ymm0
vmovdqa 0x9640(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x6a0(%rsp)
vmovdqa 0x780(%rsp), %ymm1
vmovdqa 0x8a0(%rsp), %ymm0
vmovdqa %ymm1, 0x9620(%rsp)
vmovdqa %ymm0, 0x9600(%rsp)
vmovdqa 0x9620(%rsp), %ymm0
vmovdqa 0x9600(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x680(%rsp)
vmovdqa 0x760(%rsp), %ymm1
vmovdqa 0x880(%rsp), %ymm0
vmovdqa %ymm1, 0x95e0(%rsp)
vmovdqa %ymm0, 0x95c0(%rsp)
vmovdqa 0x95e0(%rsp), %ymm0
vmovdqa 0x95c0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x660(%rsp)
vmovdqa 0x740(%rsp), %ymm1
vmovdqa 0x880(%rsp), %ymm0
vmovdqa %ymm1, 0x95a0(%rsp)
vmovdqa %ymm0, 0x9580(%rsp)
vmovdqa 0x95a0(%rsp), %ymm0
vmovdqa 0x9580(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x640(%rsp)
vmovdqa 0x720(%rsp), %ymm1
vmovdqa 0x6a0(%rsp), %ymm0
vmovdqa %ymm1, 0x9b60(%rsp)
vmovdqa %ymm0, 0x9b40(%rsp)
vmovdqa 0x9b60(%rsp), %ymm0
vmovdqa 0x9b40(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x6e0(%rsp), %ymm2
vmovdqa 0x660(%rsp), %ymm0
vmovdqa %ymm2, 0x9b20(%rsp)
vmovdqa %ymm0, 0x9b00(%rsp)
vmovdqa 0x9b20(%rsp), %ymm0
vmovdqa 0x9b00(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0x9ae0(%rsp)
vmovdqa %ymm0, 0x9ac0(%rsp)
vmovdqa 0x9ae0(%rsp), %ymm0
vmovdqa 0x9ac0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x620(%rsp)
vmovdqa 0x620(%rsp), %ymm1
vmovdqa 0x860(%rsp), %ymm0
vmovdqa %ymm1, 0x9aa0(%rsp)
vmovdqa %ymm0, 0x9a80(%rsp)
vmovdqa 0x9aa0(%rsp), %ymm0
vmovdqa 0x9a80(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq 0x30(%rbp), %rax
movl (%rax), %eax
vmovdqa %ymm0, 0x9c60(%rsp)
movl %eax, 0x9c5c(%rsp)
vmovdqa 0x9c60(%rsp), %ymm0
movl 0x9c5c(%rsp), %eax
vmovd %eax, %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x600(%rsp)
vmovdqa 0x700(%rsp), %ymm1
vmovdqa 0x680(%rsp), %ymm0
vmovdqa %ymm1, 0x9a60(%rsp)
vmovdqa %ymm0, 0x9a40(%rsp)
vmovdqa 0x9a60(%rsp), %ymm0
vmovdqa 0x9a40(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovdqa 0x6c0(%rsp), %ymm2
vmovdqa 0x640(%rsp), %ymm0
vmovdqa %ymm2, 0x9a20(%rsp)
vmovdqa %ymm0, 0x9a00(%rsp)
vmovdqa 0x9a20(%rsp), %ymm0
vmovdqa 0x9a00(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovdqa %ymm1, 0x99e0(%rsp)
vmovdqa %ymm0, 0x99c0(%rsp)
vmovdqa 0x99e0(%rsp), %ymm0
vmovdqa 0x99c0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x5e0(%rsp)
vmovdqa 0x5e0(%rsp), %ymm1
vmovdqa 0x860(%rsp), %ymm0
vmovdqa %ymm1, 0x99a0(%rsp)
vmovdqa %ymm0, 0x9980(%rsp)
vmovdqa 0x99a0(%rsp), %ymm0
vmovdqa 0x9980(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq 0x30(%rbp), %rax
movl (%rax), %eax
vmovdqa %ymm0, 0x9c20(%rsp)
movl %eax, 0x9c1c(%rsp)
vmovdqa 0x9c20(%rsp), %ymm0
movl 0x9c1c(%rsp), %eax
vmovd %eax, %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
vmovdqa 0x600(%rsp), %ymm1
vmovdqa 0x5c0(%rsp), %ymm0
vmovdqa %ymm1, 0x9ce0(%rsp)
vmovdqa %ymm0, 0x9cc0(%rsp)
vmovdqa 0x9ce0(%rsp), %ymm0
vmovdqa 0x9cc0(%rsp), %ymm1
vpackssdw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x5a0(%rsp)
vmovdqa 0x5a0(%rsp), %ymm1
vmovdqa 0x9a0(%rsp), %ymm0
vmovdqa %ymm1, 0x9de0(%rsp)
vmovdqa %ymm0, 0x9dc0(%rsp)
vmovdqa 0x9de0(%rsp), %ymm0
vmovdqa 0x9dc0(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm1
vmovdqa 0x980(%rsp), %ymm0
vmovdqa %ymm1, 0x9d60(%rsp)
vmovdqa %ymm0, 0x9d40(%rsp)
vmovdqa 0x9d60(%rsp), %ymm0
vmovdqa 0x9d40(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x580(%rsp)
leaq 0xa20(%rsp), %rdi
movl 0x85c(%rsp), %eax
shll $0x7, %eax
cltq
shlq %rax
addq %rax, %rdi
movslq 0x858(%rsp), %rax
shlq %rax
addq %rax, %rdi
vmovdqa 0x580(%rsp), %ymm0
callq 0x947b00
movl 0x858(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x858(%rsp)
jmp 0x946644
jmp 0x946c9e
movl 0x85c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x85c(%rsp)
jmp 0x946625
movb 0x38(%rbp), %cl
movl $0x1, %eax
movl %eax, 0xc(%rsp)
shll %cl, %eax
decl %eax
movw %ax, 0x91ac(%rsp)
movw 0x91ac(%rsp), %ax
movw %ax, 0xa(%rsp)
movw %ax, 0xa0fe(%rsp)
movw %ax, 0xa0fc(%rsp)
movw %ax, 0xa0fa(%rsp)
movw %ax, 0xa0f8(%rsp)
movw %ax, 0xa0f6(%rsp)
movw %ax, 0xa0f4(%rsp)
movw %ax, 0xa0f2(%rsp)
movw %ax, 0xa0f0(%rsp)
movw %ax, 0xa0ee(%rsp)
movw %ax, 0xa0ec(%rsp)
movw %ax, 0xa0ea(%rsp)
movw %ax, 0xa0e8(%rsp)
movw %ax, 0xa0e6(%rsp)
movw %ax, 0xa0e4(%rsp)
movw %ax, 0xa0e2(%rsp)
movw %ax, 0xa0e0(%rsp)
movzwl 0xa0e0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0xa0e2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0xa0e4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0xa0e6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0xa0e8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0xa0ea(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0xa0ec(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0xa0ee(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
movzwl 0xa0f0(%rsp), %eax
vmovd %eax, %xmm1
movzwl 0xa0f2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm1, %xmm1
movzwl 0xa0f4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm1, %xmm1
movzwl 0xa0f6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm1, %xmm1
movzwl 0xa0f8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm1, %xmm1
movzwl 0xa0fa(%rsp), %eax
vpinsrw $0x5, %eax, %xmm1, %xmm1
movzwl 0xa0fc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm1, %xmm1
movzwl 0xa0fe(%rsp), %eax
vpinsrw $0x7, %eax, %xmm1, %xmm1
vmovdqa %xmm1, 0xa0d0(%rsp)
vmovdqa %xmm0, 0xa0c0(%rsp)
vmovaps 0xa0c0(%rsp), %ymm0
vmovaps %ymm0, 0x560(%rsp)
movq 0x10(%rbp), %rdi
vzeroupper
callq 0x947a90
vmovaps %xmm0, %xmm1
vmovdqa 0x9d0(%rsp), %xmm0
vmovdqa %xmm1, 0x91c0(%rsp)
vmovdqa %xmm0, 0x91b0(%rsp)
vmovdqa 0x91c0(%rsp), %xmm0
vmovdqa 0x91b0(%rsp), %xmm1
vpaddw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x550(%rsp)
vmovdqa 0x550(%rsp), %xmm0
vmovdqa %xmm0, 0x9200(%rsp)
vmovdqa %xmm0, 0x91f0(%rsp)
vmovdqa 0x9200(%rsp), %xmm0
vmovdqa 0x91f0(%rsp), %xmm1
vpunpckldq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x540(%rsp)
vmovdqa 0x550(%rsp), %xmm0
vmovdqa %xmm0, 0x9240(%rsp)
vmovdqa %xmm0, 0x9230(%rsp)
vmovdqa 0x9240(%rsp), %xmm0
vmovdqa 0x9230(%rsp), %xmm1
vpunpckhdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
vmovdqa %xmm0, 0x530(%rsp)
vmovdqa 0x540(%rsp), %xmm0
vmovdqa %xmm0, 0x92a0(%rsp)
vmovdqa %xmm0, 0x9290(%rsp)
vmovdqa 0x92a0(%rsp), %xmm0
vmovdqa 0x9290(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x520(%rsp)
vmovdqa 0x540(%rsp), %xmm0
vmovdqa %xmm0, 0x9320(%rsp)
vmovdqa %xmm0, 0x9310(%rsp)
vmovdqa 0x9320(%rsp), %xmm0
vmovdqa 0x9310(%rsp), %xmm1
vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x510(%rsp)
vmovdqa 0x530(%rsp), %xmm0
vmovdqa %xmm0, 0x9280(%rsp)
vmovdqa %xmm0, 0x9270(%rsp)
vmovdqa 0x9280(%rsp), %xmm0
vmovdqa 0x9270(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x500(%rsp)
vmovdqa 0x530(%rsp), %xmm0
vmovdqa %xmm0, 0x9300(%rsp)
vmovdqa %xmm0, 0x92f0(%rsp)
vmovdqa 0x9300(%rsp), %xmm0
vmovdqa 0x92f0(%rsp), %xmm1
vpunpckhqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[1],xmm1[1]
vmovdqa %xmm0, 0x4f0(%rsp)
vmovdqa 0x520(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
vmovaps %ymm0, 0x4c0(%rsp)
vmovdqa 0x510(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
vmovaps %ymm0, 0x4a0(%rsp)
vmovdqa 0x500(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
vmovaps %ymm0, 0x480(%rsp)
vmovdqa 0x4f0(%rsp), %xmm1
vmovaps %xmm1, %xmm0
callq 0x947ab0
movl 0xc(%rsp), %edx
vmovaps %ymm0, 0x460(%rsp)
movq 0x30(%rbp), %rax
movl 0x4(%rax), %esi
movb %sil, %cl
decb %cl
movl %edx, %eax
shll %cl, %eax
movl 0x38(%rbp), %ecx
addl %esi, %ecx
decb %cl
shll %cl, %edx
movl %edx, %ecx
subl %ecx, %eax
movl %eax, 0x9378(%rsp)
movl 0x9378(%rsp), %eax
movl %eax, 0x10(%rsp)
movl %eax, 0xa18c(%rsp)
movl %eax, 0xa188(%rsp)
movl %eax, 0xa184(%rsp)
movl %eax, 0xa180(%rsp)
movl %eax, 0xa17c(%rsp)
movl %eax, 0xa178(%rsp)
movl %eax, 0xa174(%rsp)
movl %eax, 0xa170(%rsp)
movl 0xa174(%rsp), %edx
movl 0xa178(%rsp), %ecx
movl 0xa17c(%rsp), %eax
movl 0xa184(%rsp), %r8d
movl 0xa188(%rsp), %edi
movl 0xa18c(%rsp), %esi
vmovd 0xa180(%rsp), %xmm0
vpinsrd $0x1, %r8d, %xmm0, %xmm0
vpinsrd $0x2, %edi, %xmm0, %xmm0
vpinsrd $0x3, %esi, %xmm0, %xmm1
vmovd 0xa170(%rsp), %xmm0
vpinsrd $0x1, %edx, %xmm0, %xmm0
vpinsrd $0x2, %ecx, %xmm0, %xmm0
vpinsrd $0x3, %eax, %xmm0, %xmm2
vmovaps %xmm2, %xmm0
vinserti128 $0x1, %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0xa140(%rsp)
vmovdqa 0xa140(%rsp), %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movl $0x0, 0x43c(%rsp)
movl 0x43c(%rsp), %eax
cmpl 0x28(%rbp), %eax
jge 0x947a7f
movl $0x0, 0x438(%rsp)
movl 0x438(%rsp), %eax
cmpl 0x20(%rbp), %eax
jge 0x947a67
movl 0x43c(%rsp), %eax
shll $0x7, %eax
cltq
leaq 0xa20(%rsp,%rax,2), %rax
movslq 0x438(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x430(%rsp)
movq 0x430(%rsp), %rdi
callq 0x947ae0
vmovaps %ymm0, 0x400(%rsp)
movq 0x430(%rsp), %rdi
addq $0x100, %rdi # imm = 0x100
callq 0x947ae0
vmovaps %ymm0, 0x3e0(%rsp)
movq 0x430(%rsp), %rdi
addq $0x200, %rdi # imm = 0x200
callq 0x947ae0
vmovaps %ymm0, 0x3c0(%rsp)
movq 0x430(%rsp), %rdi
addq $0x300, %rdi # imm = 0x300
callq 0x947ae0
vmovaps %ymm0, 0x3a0(%rsp)
movq 0x430(%rsp), %rdi
addq $0x400, %rdi # imm = 0x400
callq 0x947ae0
vmovaps %ymm0, 0x380(%rsp)
movq 0x430(%rsp), %rdi
addq $0x500, %rdi # imm = 0x500
callq 0x947ae0
vmovaps %ymm0, 0x360(%rsp)
movq 0x430(%rsp), %rdi
addq $0x600, %rdi # imm = 0x600
callq 0x947ae0
vmovaps %ymm0, 0x340(%rsp)
movq 0x430(%rsp), %rdi
addq $0x700, %rdi # imm = 0x700
callq 0x947ae0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x9ee0(%rsp)
vmovaps %ymm0, 0x9ec0(%rsp)
vmovaps 0x9ee0(%rsp), %ymm0
vmovaps 0x9ec0(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm1, 0x9ea0(%rsp)
vmovaps %ymm0, 0x9e80(%rsp)
vmovaps 0x9ea0(%rsp), %ymm0
vmovaps 0x9e80(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x380(%rsp), %ymm1
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm1, 0x9e60(%rsp)
vmovaps %ymm0, 0x9e40(%rsp)
vmovaps 0x9e60(%rsp), %ymm0
vmovaps 0x9e40(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm1, 0x9e20(%rsp)
vmovaps %ymm0, 0x9e00(%rsp)
vmovaps 0x9e20(%rsp), %ymm0
vmovaps 0x9e00(%rsp), %ymm1
vpunpcklwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm1, 0x9560(%rsp)
vmovaps %ymm0, 0x9540(%rsp)
vmovaps 0x9560(%rsp), %ymm0
vmovaps 0x9540(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm1, 0x9520(%rsp)
vmovaps %ymm0, 0x9500(%rsp)
vmovaps 0x9520(%rsp), %ymm0
vmovaps 0x9500(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x2c0(%rsp), %ymm1
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm1, 0x94e0(%rsp)
vmovaps %ymm0, 0x94c0(%rsp)
vmovaps 0x94e0(%rsp), %ymm0
vmovaps 0x94c0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm1, 0x94a0(%rsp)
vmovaps %ymm0, 0x9480(%rsp)
vmovaps 0x94a0(%rsp), %ymm0
vmovaps 0x9480(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x280(%rsp), %ymm1
vmovaps 0x260(%rsp), %ymm0
vmovaps %ymm1, 0x9960(%rsp)
vmovaps %ymm0, 0x9940(%rsp)
vmovaps 0x9960(%rsp), %ymm0
vmovaps 0x9940(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovaps 0x240(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm2, 0x9920(%rsp)
vmovaps %ymm0, 0x9900(%rsp)
vmovaps 0x9920(%rsp), %ymm0
vmovaps 0x9900(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovaps %ymm1, 0x98e0(%rsp)
vmovaps %ymm0, 0x98c0(%rsp)
vmovaps 0x98e0(%rsp), %ymm0
vmovaps 0x98c0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x9fe0(%rsp)
vmovaps %ymm0, 0x9fc0(%rsp)
vmovaps 0x9fe0(%rsp), %ymm0
vmovaps 0x9fc0(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm1, 0x9fa0(%rsp)
vmovaps %ymm0, 0x9f80(%rsp)
vmovaps 0x9fa0(%rsp), %ymm0
vmovaps 0x9f80(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x380(%rsp), %ymm1
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm1, 0x9f60(%rsp)
vmovaps %ymm0, 0x9f40(%rsp)
vmovaps 0x9f60(%rsp), %ymm0
vmovaps 0x9f40(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm1, 0x9f20(%rsp)
vmovaps %ymm0, 0x9f00(%rsp)
vmovaps 0x9f20(%rsp), %ymm0
vmovaps 0x9f00(%rsp), %ymm1
vpunpckhwd %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x1e0(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm1, 0x9460(%rsp)
vmovaps %ymm0, 0x9440(%rsp)
vmovaps 0x9460(%rsp), %ymm0
vmovaps 0x9440(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm1, 0x9420(%rsp)
vmovaps %ymm0, 0x9400(%rsp)
vmovaps 0x9420(%rsp), %ymm0
vmovaps 0x9400(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm1, 0x93e0(%rsp)
vmovaps %ymm0, 0x93c0(%rsp)
vmovaps 0x93e0(%rsp), %ymm0
vmovaps 0x93c0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x180(%rsp), %ymm1
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm1, 0x93a0(%rsp)
vmovaps %ymm0, 0x9380(%rsp)
vmovaps 0x93a0(%rsp), %ymm0
vmovaps 0x9380(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x160(%rsp), %ymm1
vmovaps 0x140(%rsp), %ymm0
vmovaps %ymm1, 0x98a0(%rsp)
vmovaps %ymm0, 0x9880(%rsp)
vmovaps 0x98a0(%rsp), %ymm0
vmovaps 0x9880(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm1
vmovaps 0x120(%rsp), %ymm2
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm2, 0x9860(%rsp)
vmovaps %ymm0, 0x9840(%rsp)
vmovaps 0x9860(%rsp), %ymm0
vmovaps 0x9840(%rsp), %ymm2
vpaddd %ymm2, %ymm0, %ymm0
vmovaps %ymm1, 0x9820(%rsp)
vmovaps %ymm0, 0x9800(%rsp)
vmovaps 0x9820(%rsp), %ymm0
vmovaps 0x9800(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x200(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0xa020(%rsp)
vmovaps %ymm0, 0xa000(%rsp)
vmovaps 0xa020(%rsp), %ymm0
vmovaps 0xa000(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0x200(%rsp), %ymm1
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm1, 0xa060(%rsp)
vmovaps %ymm0, 0xa040(%rsp)
vmovaps 0xa060(%rsp), %ymm0
vmovaps 0xa040(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovdqa %ymm0, 0xa0(%rsp)
vmovdqa 0xc0(%rsp), %ymm1
vmovdqa 0x440(%rsp), %ymm0
vmovdqa %ymm1, 0x97e0(%rsp)
vmovdqa %ymm0, 0x97c0(%rsp)
vmovdqa 0x97e0(%rsp), %ymm0
vmovdqa 0x97c0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq 0x30(%rbp), %rax
movl 0x4(%rax), %eax
vmovdqa %ymm0, 0x9be0(%rsp)
movl %eax, 0x9bdc(%rsp)
vmovdqa 0x9be0(%rsp), %ymm0
movl 0x9bdc(%rsp), %eax
vmovd %eax, %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0xa0(%rsp), %ymm1
vmovdqa 0x440(%rsp), %ymm0
vmovdqa %ymm1, 0x97a0(%rsp)
vmovdqa %ymm0, 0x9780(%rsp)
vmovdqa 0x97a0(%rsp), %ymm0
vmovdqa 0x9780(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
movq 0x30(%rbp), %rax
movl 0x4(%rax), %eax
vmovdqa %ymm0, 0x9ba0(%rsp)
movl %eax, 0x9b9c(%rsp)
vmovdqa 0x9ba0(%rsp), %ymm0
movl 0x9b9c(%rsp), %eax
vmovd %eax, %xmm1
vpsrad %xmm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rsp)
vmovdqa 0x80(%rsp), %ymm1
vmovdqa 0x60(%rsp), %ymm0
vmovdqa %ymm1, 0x9ca0(%rsp)
vmovdqa %ymm0, 0x9c80(%rsp)
vmovdqa 0x9ca0(%rsp), %ymm0
vmovdqa 0x9c80(%rsp), %ymm1
vpackssdw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0x40(%rsp), %ymm1
vmovdqa 0x9a0(%rsp), %ymm0
vmovdqa %ymm1, 0x9da0(%rsp)
vmovdqa %ymm0, 0x9d80(%rsp)
vmovdqa 0x9da0(%rsp), %ymm0
vmovdqa 0x9d80(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm1
vmovdqa 0x560(%rsp), %ymm0
vmovdqa %ymm1, 0x9d20(%rsp)
vmovdqa %ymm0, 0x9d00(%rsp)
vmovdqa 0x9d20(%rsp), %ymm0
vmovdqa 0x9d00(%rsp), %ymm1
vpminsw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
movq 0x9130(%rsp), %rdi
movslq 0x43c(%rsp), %rax
imulq 0x9150(%rsp), %rax
shlq %rax
addq %rax, %rdi
movslq 0x438(%rsp), %rax
shlq %rax
addq %rax, %rdi
vmovdqa 0x20(%rsp), %ymm0
callq 0x947b00
movl 0x438(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x438(%rsp)
jmp 0x947174
jmp 0x947a69
movl 0x43c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x43c(%rsp)
jmp 0x947159
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/common/x86/highbd_wiener_convolve_avx2.c |
av1_quantize_lp_avx2 | void av1_quantize_lp_avx2(const int16_t *coeff_ptr, intptr_t n_coeffs,
const int16_t *round_ptr, const int16_t *quant_ptr,
int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
(void)scan;
__m256i eob256 = _mm256_setzero_si256();
// Setup global values.
__m256i round256 =
_mm256_castsi128_si256(_mm_load_si128((const __m128i *)round_ptr));
__m256i quant256 =
_mm256_castsi128_si256(_mm_load_si128((const __m128i *)quant_ptr));
__m256i dequant256 =
_mm256_castsi128_si256(_mm_load_si128((const __m128i *)dequant_ptr));
// Populate upper AC values.
round256 = _mm256_permute4x64_epi64(round256, 0x54);
quant256 = _mm256_permute4x64_epi64(quant256, 0x54);
dequant256 = _mm256_permute4x64_epi64(dequant256, 0x54);
// Process DC and the first 15 AC coeffs.
quantize_lp_16_first(coeff_ptr, iscan, qcoeff_ptr, dqcoeff_ptr, &round256,
&quant256, &dequant256, &eob256);
if (n_coeffs > 16) {
// Overwrite the DC constants with AC constants
dequant256 = _mm256_permute2x128_si256(dequant256, dequant256, 0x31);
quant256 = _mm256_permute2x128_si256(quant256, quant256, 0x31);
round256 = _mm256_permute2x128_si256(round256, round256, 0x31);
// AC only loop.
for (int idx = 16; idx < n_coeffs; idx += 16) {
quantize_lp_16(coeff_ptr, idx, iscan, qcoeff_ptr, dqcoeff_ptr, &round256,
&quant256, &dequant256, &eob256);
}
}
*eob_ptr = accumulate_eob256(eob256);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0xa40, %rsp # imm = 0xA40
movq 0x28(%rbp), %rax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, 0xd8(%rsp)
movq %rsi, 0xd0(%rsp)
movq %rdx, 0xc8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %r8, 0xb8(%rsp)
movq %r9, 0xb0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps %ymm1, 0x80(%rsp)
movq 0xc8(%rsp), %rax
movq %rax, 0x158(%rsp)
movq 0x158(%rsp), %rax
vmovdqa (%rax), %xmm1
vmovdqa %xmm1, 0x130(%rsp)
vmovdqa 0x130(%rsp), %xmm1
vmovdqa %xmm2, 0x70(%rsp)
vmovdqa %xmm1, 0x60(%rsp)
movq 0xc0(%rsp), %rax
movq %rax, 0x150(%rsp)
movq 0x150(%rsp), %rax
vmovdqa (%rax), %xmm1
vmovdqa %xmm1, 0x120(%rsp)
vmovdqa 0x120(%rsp), %xmm1
vmovdqa %xmm2, 0x50(%rsp)
vmovdqa %xmm1, 0x40(%rsp)
movq 0x10(%rbp), %rax
movq %rax, 0x148(%rsp)
movq 0x148(%rsp), %rax
vmovdqa (%rax), %xmm1
vmovdqa %xmm1, 0x110(%rsp)
vmovdqa 0x110(%rsp), %xmm1
vmovdqa %xmm2, 0x30(%rsp)
vmovdqa %xmm1, 0x20(%rsp)
vmovaps 0x60(%rsp), %ymm1
vpermq $0x54, %ymm1, %ymm1 # ymm1 = ymm1[0,1,1,1]
vmovaps %ymm1, 0x60(%rsp)
vmovaps 0x40(%rsp), %ymm1
vpermq $0x54, %ymm1, %ymm1 # ymm1 = ymm1[0,1,1,1]
vmovaps %ymm1, 0x40(%rsp)
vmovaps 0x20(%rsp), %ymm1
vpermq $0x54, %ymm1, %ymm1 # ymm1 = ymm1[0,1,1,1]
vmovaps %ymm1, 0x20(%rsp)
movq 0xd8(%rsp), %rsi
movq 0x28(%rbp), %rdx
movq 0xb8(%rsp), %rcx
movq 0xb0(%rsp), %rax
movq %rsi, 0x2d8(%rsp)
movq %rdx, 0x2d0(%rsp)
movq %rcx, 0x2c8(%rsp)
movq %rax, 0x2c0(%rsp)
leaq 0x60(%rsp), %rax
movq %rax, 0x2b8(%rsp)
leaq 0x40(%rsp), %rax
movq %rax, 0x2b0(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x2a8(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0x2a0(%rsp)
movq 0x2d8(%rsp), %rax
movq %rax, 0x4d8(%rsp)
movq 0x4d8(%rsp), %rax
vmovups (%rax), %ymm1
vmovaps %ymm1, 0x280(%rsp)
vmovaps 0x280(%rsp), %ymm1
vmovaps %ymm1, 0x500(%rsp)
vpabsw 0x500(%rsp), %ymm1
vmovaps %ymm1, 0x260(%rsp)
vmovaps 0x260(%rsp), %ymm2
movq 0x2b8(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps %ymm2, 0x580(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vmovaps 0x580(%rsp), %ymm1
vmovaps 0x560(%rsp), %ymm2
vpaddsw %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0x240(%rsp)
vmovaps 0x240(%rsp), %ymm2
movq 0x2b0(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps %ymm2, 0x600(%rsp)
vmovaps %ymm1, 0x5e0(%rsp)
vmovaps 0x600(%rsp), %ymm1
vmovaps 0x5e0(%rsp), %ymm2
vpmulhw %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0x220(%rsp)
vmovaps 0x220(%rsp), %ymm2
vmovaps 0x280(%rsp), %ymm1
vmovaps %ymm2, 0x680(%rsp)
vmovaps %ymm1, 0x660(%rsp)
vmovaps 0x680(%rsp), %ymm1
vmovaps 0x660(%rsp), %ymm2
vpsignw %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0x200(%rsp)
vmovaps 0x200(%rsp), %ymm2
movq 0x2a8(%rsp), %rax
vmovaps (%rax), %ymm1
vmovaps %ymm2, 0x700(%rsp)
vmovaps %ymm1, 0x6e0(%rsp)
vmovaps 0x700(%rsp), %ymm1
vmovaps 0x6e0(%rsp), %ymm2
vpmullw %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0x1e0(%rsp)
vmovaps 0x220(%rsp), %ymm1
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm1, 0x780(%rsp)
vmovaps %ymm0, 0x760(%rsp)
vmovaps 0x780(%rsp), %ymm0
vmovaps 0x760(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1c0(%rsp)
movq 0x2c8(%rsp), %rax
vmovdqa 0x200(%rsp), %ymm0
movq %rax, 0x898(%rsp)
vmovdqa %ymm0, 0x860(%rsp)
vmovdqa 0x860(%rsp), %ymm0
movq 0x898(%rsp), %rax
vmovdqu %ymm0, (%rax)
movq 0x2c0(%rsp), %rax
vmovdqa 0x1e0(%rsp), %ymm0
movq %rax, 0x858(%rsp)
vmovdqa %ymm0, 0x820(%rsp)
vmovdqa 0x820(%rsp), %ymm0
movq 0x858(%rsp), %rax
vmovdqu %ymm0, (%rax)
movq 0x2d0(%rsp), %rax
movq %rax, 0x4d0(%rsp)
movq 0x4d0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x1a0(%rsp)
vmovdqa 0x1a0(%rsp), %ymm1
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa %ymm1, 0x900(%rsp)
vmovdqa %ymm0, 0x8e0(%rsp)
vmovdqa 0x900(%rsp), %ymm0
vmovdqa 0x8e0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x180(%rsp), %ymm1
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa %ymm1, 0x980(%rsp)
vmovdqa %ymm0, 0x960(%rsp)
vmovdqa 0x980(%rsp), %ymm0
vpand 0x960(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x160(%rsp)
movq 0x2a0(%rsp), %rax
vmovdqa (%rax), %ymm1
vmovdqa 0x160(%rsp), %ymm0
vmovdqa %ymm1, 0xa00(%rsp)
vmovdqa %ymm0, 0x9e0(%rsp)
vmovdqa 0xa00(%rsp), %ymm0
vmovdqa 0x9e0(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm0
movq 0x2a0(%rsp), %rax
vmovdqa %ymm0, (%rax)
cmpq $0x10, 0xd0(%rsp)
jle 0x94cde2
vbroadcastf128 0x30(%rsp), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x20(%rsp)
vbroadcastf128 0x50(%rsp), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x40(%rsp)
vbroadcastf128 0x70(%rsp), %ymm0 # ymm0 = mem[0,1,0,1]
vmovdqa %ymm0, 0x60(%rsp)
movl $0x10, 0x1c(%rsp)
movslq 0x1c(%rsp), %rax
cmpq 0xd0(%rsp), %rax
jge 0x94cde0
movq 0xd8(%rsp), %rdi
movslq 0x1c(%rsp), %rsi
movq 0x28(%rbp), %rdx
movq 0xb8(%rsp), %rcx
movq 0xb0(%rsp), %rax
movq %rdi, 0x498(%rsp)
movq %rsi, 0x490(%rsp)
movq %rdx, 0x488(%rsp)
movq %rcx, 0x480(%rsp)
movq %rax, 0x478(%rsp)
leaq 0x60(%rsp), %rax
movq %rax, 0x470(%rsp)
leaq 0x40(%rsp), %rax
movq %rax, 0x468(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x460(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0x458(%rsp)
movq 0x498(%rsp), %rax
movq 0x490(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x4c8(%rsp)
movq 0x4c8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vpabsw 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x400(%rsp), %ymm1
movq 0x470(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3e0(%rsp), %ymm1
movq 0x468(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x5c0(%rsp)
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5c0(%rsp), %ymm0
vmovaps 0x5a0(%rsp), %ymm1
vpmulhw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm1, 0x640(%rsp)
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x640(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x3a0(%rsp), %ymm1
movq 0x460(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x6c0(%rsp)
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6c0(%rsp), %ymm0
vmovaps 0x6a0(%rsp), %ymm1
vpmullw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x380(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm1, 0x740(%rsp)
vmovaps %ymm0, 0x720(%rsp)
vmovaps 0x740(%rsp), %ymm0
vmovaps 0x720(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
movq 0x480(%rsp), %rax
movq 0x490(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x3a0(%rsp), %ymm0
movq %rax, 0x818(%rsp)
vmovdqa %ymm0, 0x7e0(%rsp)
vmovdqa 0x7e0(%rsp), %ymm0
movq 0x818(%rsp), %rax
vmovdqu %ymm0, (%rax)
movq 0x478(%rsp), %rax
movq 0x490(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
vmovdqa 0x380(%rsp), %ymm0
movq %rax, 0x7d8(%rsp)
vmovdqa %ymm0, 0x7a0(%rsp)
vmovdqa 0x7a0(%rsp), %ymm0
movq 0x7d8(%rsp), %rax
vmovdqu %ymm0, (%rax)
movq 0x488(%rsp), %rax
movq 0x490(%rsp), %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, 0x4c0(%rsp)
movq 0x4c0(%rsp), %rax
vmovdqu (%rax), %ymm0
vmovdqa %ymm0, 0x340(%rsp)
vmovdqa 0x340(%rsp), %ymm1
vmovdqa 0x360(%rsp), %ymm0
vmovdqa %ymm1, 0x8c0(%rsp)
vmovdqa %ymm0, 0x8a0(%rsp)
vmovdqa 0x8c0(%rsp), %ymm0
vmovdqa 0x8a0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x320(%rsp)
vmovdqa 0x320(%rsp), %ymm1
vmovdqa 0x360(%rsp), %ymm0
vmovdqa %ymm1, 0x940(%rsp)
vmovdqa %ymm0, 0x920(%rsp)
vmovdqa 0x940(%rsp), %ymm0
vpand 0x920(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x300(%rsp)
movq 0x458(%rsp), %rax
vmovdqa (%rax), %ymm1
vmovdqa 0x300(%rsp), %ymm0
vmovdqa %ymm1, 0x9c0(%rsp)
vmovdqa %ymm0, 0x9a0(%rsp)
vmovdqa 0x9c0(%rsp), %ymm0
vmovdqa 0x9a0(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm0
movq 0x458(%rsp), %rax
vmovdqa %ymm0, (%rax)
movl 0x1c(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x1c(%rsp)
jmp 0x94c9f9
jmp 0x94cde2
vmovdqa 0x80(%rsp), %ymm0
callq 0x94ce10
movw %ax, %cx
movq 0x18(%rbp), %rax
movw %cx, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
accumulate_eob256 | static inline int16_t accumulate_eob256(__m256i eob256) {
const __m128i eob_lo = _mm256_castsi256_si128(eob256);
const __m128i eob_hi = _mm256_extractf128_si256(eob256, 1);
__m128i eob = _mm_max_epi16(eob_lo, eob_hi);
__m128i eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
eob = _mm_max_epi16(eob, eob_shuffled);
eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
eob = _mm_max_epi16(eob, eob_shuffled);
eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
eob = _mm_max_epi16(eob, eob_shuffled);
return _mm_extract_epi16(eob, 1);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x120, %rsp # imm = 0x120
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm0
vmovdqa %xmm0, 0x30(%rsp)
vmovdqa 0x50(%rsp), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
vmovdqa 0x30(%rsp), %xmm1
vmovdqa 0x20(%rsp), %xmm0
vmovdqa %xmm1, 0x100(%rsp)
vmovdqa %xmm0, 0xf0(%rsp)
vmovdqa 0x100(%rsp), %xmm0
vmovdqa 0xf0(%rsp), %xmm1
vpmaxsw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x10(%rsp), %xmm0
vpshufd $0xe, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,0]
vmovdqa %xmm0, (%rsp)
vmovdqa 0x10(%rsp), %xmm1
vmovdqa (%rsp), %xmm0
vmovdqa %xmm1, 0xe0(%rsp)
vmovdqa %xmm0, 0xd0(%rsp)
vmovdqa 0xe0(%rsp), %xmm0
vmovdqa 0xd0(%rsp), %xmm1
vpmaxsw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x10(%rsp), %xmm0
vpshuflw $0xe, %xmm0, %xmm0 # xmm0 = xmm0[2,3,0,0,4,5,6,7]
vmovdqa %xmm0, (%rsp)
vmovdqa 0x10(%rsp), %xmm1
vmovdqa (%rsp), %xmm0
vmovdqa %xmm1, 0xc0(%rsp)
vmovdqa %xmm0, 0xb0(%rsp)
vmovdqa 0xc0(%rsp), %xmm0
vmovdqa 0xb0(%rsp), %xmm1
vpmaxsw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x10(%rsp), %xmm0
vpshuflw $0x1, %xmm0, %xmm0 # xmm0 = xmm0[1,0,0,0,4,5,6,7]
vmovdqa %xmm0, (%rsp)
vmovdqa 0x10(%rsp), %xmm1
vmovdqa (%rsp), %xmm0
vmovdqa %xmm1, 0xa0(%rsp)
vmovdqa %xmm0, 0x90(%rsp)
vmovdqa 0xa0(%rsp), %xmm0
vmovdqa 0x90(%rsp), %xmm1
vpmaxsw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x10(%rsp), %xmm0
vpextrw $0x1, %xmm0, %eax
movzwl %ax, %eax
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
av1_quantize_fp_avx2 | void av1_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const int16_t *zbin_ptr, const int16_t *round_ptr,
const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
const int16_t *scan_ptr, const int16_t *iscan_ptr) {
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
const int log_scale = 0;
const int step = 16;
__m256i qp[3], thr;
__m256i eob = _mm256_setzero_si256();
init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
quantize_fp_16(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr, &eob);
coeff_ptr += step;
qcoeff_ptr += step;
dqcoeff_ptr += step;
iscan_ptr += step;
n_coeffs -= step;
update_qp(&thr, qp);
while (n_coeffs > 0) {
quantize_fp_16(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
&eob);
coeff_ptr += step;
qcoeff_ptr += step;
dqcoeff_ptr += step;
iscan_ptr += step;
n_coeffs -= step;
}
*eob_ptr = quant_gather_eob(eob);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0xb00, %rsp # imm = 0xB00
movq 0x38(%rbp), %rax
movq 0x30(%rbp), %rax
movq 0x28(%rbp), %rax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, 0xf8(%rsp)
movq %rsi, 0xf0(%rsp)
movq %rdx, 0xe8(%rsp)
movq %rcx, 0xe0(%rsp)
movq %r8, 0xd8(%rsp)
movq %r9, 0xd0(%rsp)
movl $0x0, 0xcc(%rsp)
movl $0x10, 0xc8(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm0, 0x20(%rsp)
movq 0xe0(%rsp), %rdi
movq 0xd8(%rsp), %rsi
movq 0x20(%rbp), %rdx
xorl %ecx, %ecx
leaq 0x40(%rsp), %r8
movq %r8, 0x10(%rsp)
leaq 0x60(%rsp), %r9
movq %r9, 0x18(%rsp)
vzeroupper
callq 0x94d920
movq 0x10(%rsp), %r8
movq 0x18(%rsp), %rdi
movq 0xf8(%rsp), %rsi
movq 0x38(%rbp), %rdx
movq 0x10(%rbp), %rcx
movq 0x18(%rbp), %rax
movq %r8, 0x3f8(%rsp)
movq %rdi, 0x3f0(%rsp)
movq %rsi, 0x3e8(%rsp)
movq %rdx, 0x3e0(%rsp)
movq %rcx, 0x3d8(%rsp)
movq %rax, 0x3d0(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x3c8(%rsp)
movq 0x3e8(%rsp), %rdi
callq 0x94f2c0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm0, 0x420(%rsp)
vpabsw 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x380(%rsp)
vmovaps 0x380(%rsp), %ymm1
movq 0x3f8(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x6c0(%rsp)
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6c0(%rsp), %ymm0
vmovaps 0x6a0(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vmovdqa 0x360(%rsp), %ymm0
vmovdqa %ymm0, 0x760(%rsp)
vmovdqa 0x760(%rsp), %ymm0
vpmovmskb %ymm0, %eax
movl %eax, 0x35c(%rsp)
cmpl $0x0, 0x35c(%rsp)
je 0x94d41d
vmovaps 0x380(%rsp), %ymm1
movq 0x3f0(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x320(%rsp), %ymm1
movq 0x3f0(%rsp), %rax
vmovaps 0x20(%rax), %ymm0
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps 0x4e0(%rsp), %ymm1
vpmulhw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm1, 0x580(%rsp)
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmovaps 0x560(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
movq 0x3f0(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm1, 0x600(%rsp)
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x5e0(%rsp), %ymm1
vpmullw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x300(%rsp), %ymm1
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm1, 0x680(%rsp)
vmovaps %ymm0, 0x660(%rsp)
vmovaps 0x680(%rsp), %ymm0
vmovaps 0x660(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
movq 0x3d8(%rsp), %rdi
callq 0x94f340
vmovaps 0x2c0(%rsp), %ymm0
movq 0x3d0(%rsp), %rdi
callq 0x94f340
movq 0x3e0(%rsp), %rax
movq 0x3c8(%rsp), %rcx
vmovaps (%rcx), %ymm1
vmovaps 0x2a0(%rsp), %ymm0
movq %rax, 0x870(%rsp)
vmovaps %ymm1, 0x840(%rsp)
vmovaps %ymm0, 0x820(%rsp)
movq 0x870(%rsp), %rax
movq %rax, 0x878(%rsp)
movq 0x878(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x800(%rsp)
vmovaps 0x800(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovdqa %ymm0, 0x7e0(%rsp)
vmovdqa 0x7e0(%rsp), %ymm1
vmovdqa 0x820(%rsp), %ymm0
vmovdqa %ymm1, 0x8a0(%rsp)
vmovdqa %ymm0, 0x880(%rsp)
vmovdqa 0x8a0(%rsp), %ymm0
vmovdqa 0x880(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x7c0(%rsp)
vmovdqa 0x7c0(%rsp), %ymm1
vmovdqa 0x820(%rsp), %ymm0
vmovdqa %ymm1, 0x8e0(%rsp)
vmovdqa %ymm0, 0x8c0(%rsp)
vmovdqa 0x8e0(%rsp), %ymm0
vpand 0x8c0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x7a0(%rsp)
vmovdqa 0x840(%rsp), %ymm1
vmovdqa 0x7a0(%rsp), %ymm0
vmovdqa %ymm1, 0x920(%rsp)
vmovdqa %ymm0, 0x900(%rsp)
vmovdqa 0x920(%rsp), %ymm0
vmovdqa 0x900(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm0
movq 0x3c8(%rsp), %rax
vmovdqa %ymm0, (%rax)
jmp 0x94d43a
movq 0x3d8(%rsp), %rdi
vzeroupper
callq 0x94f480
movq 0x3d0(%rsp), %rdi
callq 0x94f480
movq 0xf8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xf8(%rsp)
movq 0x10(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x10(%rbp)
movq 0x18(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x18(%rbp)
movq 0x38(%rbp), %rax
addq $0x20, %rax
movq %rax, 0x38(%rbp)
movq 0xf0(%rsp), %rax
subq $0x10, %rax
movq %rax, 0xf0(%rsp)
leaq 0x60(%rsp), %rsi
leaq 0x40(%rsp), %rdi
vzeroupper
callq 0x94dd90
cmpq $0x0, 0xf0(%rsp)
jle 0x94d8f7
movq 0xf8(%rsp), %rsi
movq 0x38(%rbp), %rdx
movq 0x10(%rbp), %rcx
movq 0x18(%rbp), %rax
leaq 0x40(%rsp), %rdi
movq %rdi, 0x278(%rsp)
leaq 0x60(%rsp), %rdi
movq %rdi, 0x270(%rsp)
movq %rsi, 0x268(%rsp)
movq %rdx, 0x260(%rsp)
movq %rcx, 0x258(%rsp)
movq %rax, 0x250(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x248(%rsp)
movq 0x268(%rsp), %rdi
callq 0x94f2c0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm0, 0x440(%rsp)
vpabsw 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x200(%rsp), %ymm1
movq 0x278(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x740(%rsp)
vmovaps %ymm0, 0x720(%rsp)
vmovaps 0x740(%rsp), %ymm0
vmovaps 0x720(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x1e0(%rsp), %ymm0
vmovdqa %ymm0, 0x780(%rsp)
vmovdqa 0x780(%rsp), %ymm0
vpmovmskb %ymm0, %eax
movl %eax, 0x1dc(%rsp)
cmpl $0x0, 0x1dc(%rsp)
je 0x94d889
vmovaps 0x200(%rsp), %ymm1
movq 0x270(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x4c0(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps 0x4a0(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
movq 0x270(%rsp), %rax
vmovaps 0x20(%rax), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpmulhw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x180(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm1, 0x5c0(%rsp)
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5c0(%rsp), %ymm0
vmovaps 0x5a0(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x160(%rsp), %ymm1
movq 0x270(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm1, 0x640(%rsp)
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x640(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpmullw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x180(%rsp), %ymm1
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm1, 0x700(%rsp)
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps 0x700(%rsp), %ymm0
vmovaps 0x6e0(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x160(%rsp), %ymm0
movq 0x258(%rsp), %rdi
callq 0x94f340
vmovaps 0x140(%rsp), %ymm0
movq 0x250(%rsp), %rdi
callq 0x94f340
movq 0x260(%rsp), %rax
movq 0x248(%rsp), %rcx
vmovaps (%rcx), %ymm1
vmovaps 0x120(%rsp), %ymm0
movq %rax, 0xa10(%rsp)
vmovaps %ymm1, 0x9e0(%rsp)
vmovaps %ymm0, 0x9c0(%rsp)
movq 0xa10(%rsp), %rax
movq %rax, 0xa18(%rsp)
movq 0xa18(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x9a0(%rsp)
vmovaps 0x9a0(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovdqa %ymm0, 0x980(%rsp)
vmovdqa 0x980(%rsp), %ymm1
vmovdqa 0x9c0(%rsp), %ymm0
vmovdqa %ymm1, 0xa40(%rsp)
vmovdqa %ymm0, 0xa20(%rsp)
vmovdqa 0xa40(%rsp), %ymm0
vmovdqa 0xa20(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x960(%rsp)
vmovdqa 0x960(%rsp), %ymm1
vmovdqa 0x9c0(%rsp), %ymm0
vmovdqa %ymm1, 0xa80(%rsp)
vmovdqa %ymm0, 0xa60(%rsp)
vmovdqa 0xa80(%rsp), %ymm0
vpand 0xa60(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x940(%rsp)
vmovdqa 0x9e0(%rsp), %ymm1
vmovdqa 0x940(%rsp), %ymm0
vmovdqa %ymm1, 0xac0(%rsp)
vmovdqa %ymm0, 0xaa0(%rsp)
vmovdqa 0xac0(%rsp), %ymm0
vmovdqa 0xaa0(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm0
movq 0x248(%rsp), %rax
vmovdqa %ymm0, (%rax)
jmp 0x94d8a6
movq 0x258(%rsp), %rdi
vzeroupper
callq 0x94f480
movq 0x250(%rsp), %rdi
callq 0x94f480
movq 0xf8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xf8(%rsp)
movq 0x10(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x10(%rbp)
movq 0x18(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x18(%rbp)
movq 0x38(%rbp), %rax
addq $0x20, %rax
movq %rax, 0x38(%rbp)
movq 0xf0(%rsp), %rax
subq $0x10, %rax
movq %rax, 0xf0(%rsp)
jmp 0x94d498
vmovdqa 0x20(%rsp), %ymm0
callq 0x94ddf0
movw %ax, %cx
movq 0x28(%rbp), %rax
movw %cx, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
init_qp | static inline void init_qp(const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *dequant_ptr, int log_scale,
__m256i *thr, __m256i *qp) {
__m128i round = _mm_loadu_si128((const __m128i *)round_ptr);
const __m128i quant = _mm_loadu_si128((const __m128i *)quant_ptr);
const __m128i dequant = _mm_loadu_si128((const __m128i *)dequant_ptr);
if (log_scale > 0) {
const __m128i rnd = _mm_set1_epi16((int16_t)1 << (log_scale - 1));
round = _mm_add_epi16(round, rnd);
round = _mm_srai_epi16(round, log_scale);
}
init_one_qp(&round, &qp[0]);
init_one_qp(&quant, &qp[1]);
if (log_scale == 1) {
qp[1] = _mm256_slli_epi16(qp[1], log_scale);
}
init_one_qp(&dequant, &qp[2]);
*thr = _mm256_srai_epi16(qp[2], 1 + log_scale);
// Subtracting 1 here eliminates a _mm256_cmpeq_epi16() instruction when
// calculating the zbin mask.
*thr = _mm256_sub_epi16(*thr, _mm256_set1_epi16(1));
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x240, %rsp # imm = 0x240
movq %rdi, 0x78(%rsp)
movq %rsi, 0x70(%rsp)
movq %rdx, 0x68(%rsp)
movl %ecx, 0x64(%rsp)
movq %r8, 0x58(%rsp)
movq %r9, 0x50(%rsp)
movq 0x78(%rsp), %rax
movq %rax, 0xe0(%rsp)
movq 0xe0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x40(%rsp)
movq 0x70(%rsp), %rax
movq %rax, 0xd8(%rsp)
movq 0xd8(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x30(%rsp)
movq 0x68(%rsp), %rax
movq %rax, 0xd0(%rsp)
movq 0xd0(%rsp), %rax
vmovdqu (%rax), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
cmpl $0x0, 0x64(%rsp)
jle 0x94db07
movb 0x64(%rsp), %cl
decb %cl
movl $0x1, %eax
shll %cl, %eax
movw %ax, 0xee(%rsp)
movw 0xee(%rsp), %ax
movw %ax, 0xe(%rsp)
movw %ax, 0x1de(%rsp)
movw %ax, 0x1dc(%rsp)
movw %ax, 0x1da(%rsp)
movw %ax, 0x1d8(%rsp)
movw %ax, 0x1d6(%rsp)
movw %ax, 0x1d4(%rsp)
movw %ax, 0x1d2(%rsp)
movw %ax, 0x1d0(%rsp)
movzwl 0x1d0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x1d2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x1d4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x1d6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x1d8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x1da(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x1dc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x1de(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
vmovdqa %xmm0, 0x1c0(%rsp)
vmovdqa 0x1c0(%rsp), %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x40(%rsp), %xmm1
vmovdqa 0x10(%rsp), %xmm0
vmovdqa %xmm1, 0x100(%rsp)
vmovdqa %xmm0, 0xf0(%rsp)
vmovdqa 0x100(%rsp), %xmm0
vmovdqa 0xf0(%rsp), %xmm1
vpaddw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x40(%rsp)
vmovdqa 0x40(%rsp), %xmm0
movl 0x64(%rsp), %eax
vmovdqa %xmm0, 0x120(%rsp)
movl %eax, 0x11c(%rsp)
vmovdqa 0x120(%rsp), %xmm0
movl 0x11c(%rsp), %eax
vmovd %eax, %xmm1
vpsraw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x40(%rsp)
movq 0x50(%rsp), %rsi
leaq 0x40(%rsp), %rdi
callq 0x94f250
movq 0x50(%rsp), %rsi
addq $0x20, %rsi
leaq 0x30(%rsp), %rdi
callq 0x94f250
cmpl $0x1, 0x64(%rsp)
jne 0x94db70
movq 0x50(%rsp), %rax
vmovdqa 0x20(%rax), %ymm0
movl 0x64(%rsp), %eax
vmovdqa %ymm0, 0x140(%rsp)
movl %eax, 0x13c(%rsp)
vmovdqa 0x140(%rsp), %ymm0
movl 0x13c(%rsp), %eax
vmovd %eax, %xmm1
vpsllw %xmm1, %ymm0, %ymm0
movq 0x50(%rsp), %rax
vmovdqa %ymm0, 0x20(%rax)
movq 0x50(%rsp), %rsi
addq $0x40, %rsi
leaq 0x20(%rsp), %rdi
vzeroupper
callq 0x94f250
movq 0x50(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
movl 0x64(%rsp), %eax
incl %eax
vmovaps %ymm0, 0x180(%rsp)
movl %eax, 0x17c(%rsp)
vmovaps 0x180(%rsp), %ymm0
vmovd 0x17c(%rsp), %xmm1
vpsraw %xmm1, %ymm0, %ymm0
movq 0x58(%rsp), %rax
vmovaps %ymm0, (%rax)
movq 0x58(%rsp), %rax
vmovaps (%rax), %ymm1
movw $0x1, 0x1be(%rsp)
movw 0x1be(%rsp), %ax
movw %ax, 0x22e(%rsp)
movw %ax, 0x22c(%rsp)
movw %ax, 0x22a(%rsp)
movw %ax, 0x228(%rsp)
movw %ax, 0x226(%rsp)
movw %ax, 0x224(%rsp)
movw %ax, 0x222(%rsp)
movw %ax, 0x220(%rsp)
movw %ax, 0x21e(%rsp)
movw %ax, 0x21c(%rsp)
movw %ax, 0x21a(%rsp)
movw %ax, 0x218(%rsp)
movw %ax, 0x216(%rsp)
movw %ax, 0x214(%rsp)
movw %ax, 0x212(%rsp)
movw %ax, 0x210(%rsp)
movzwl 0x220(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x222(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x224(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x226(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x228(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x22a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x22c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x22e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm2
movzwl 0x210(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0x212(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0x214(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0x216(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0x218(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0x21a(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0x21c(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0x21e(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm3
vmovaps %xmm3, %xmm0
vinserti128 $0x1, %xmm2, %ymm0, %ymm0
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x1e0(%rsp), %ymm0
vmovdqa %ymm1, 0xa0(%rsp)
vmovdqa %ymm0, 0x80(%rsp)
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa 0x80(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
movq 0x58(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
quant_gather_eob | static inline uint16_t quant_gather_eob(__m256i eob) {
const __m128i eob_lo = _mm256_castsi256_si128(eob);
const __m128i eob_hi = _mm256_extractf128_si256(eob, 1);
__m128i eob_s = _mm_max_epi16(eob_lo, eob_hi);
eob_s = _mm_subs_epu16(_mm_set1_epi16(INT16_MAX), eob_s);
eob_s = _mm_minpos_epu16(eob_s);
return INT16_MAX - _mm_extract_epi16(eob_s, 0);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x120, %rsp # imm = 0x120
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm0
vmovdqa %xmm0, 0x30(%rsp)
vmovdqa 0x50(%rsp), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
vmovdqa 0x30(%rsp), %xmm1
vmovdqa 0x20(%rsp), %xmm0
vmovdqa %xmm1, 0xa0(%rsp)
vmovdqa %xmm0, 0x90(%rsp)
vmovdqa 0xa0(%rsp), %xmm0
vmovdqa 0x90(%rsp), %xmm1
vpmaxsw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
movw $0x7fff, 0xbe(%rsp) # imm = 0x7FFF
movw 0xbe(%rsp), %ax
movw %ax, 0xde(%rsp)
movw %ax, 0xdc(%rsp)
movw %ax, 0xda(%rsp)
movw %ax, 0xd8(%rsp)
movw %ax, 0xd6(%rsp)
movw %ax, 0xd4(%rsp)
movw %ax, 0xd2(%rsp)
movw %ax, 0xd0(%rsp)
movzwl 0xd0(%rsp), %eax
vmovd %eax, %xmm0
movzwl 0xd2(%rsp), %eax
vpinsrw $0x1, %eax, %xmm0, %xmm0
movzwl 0xd4(%rsp), %eax
vpinsrw $0x2, %eax, %xmm0, %xmm0
movzwl 0xd6(%rsp), %eax
vpinsrw $0x3, %eax, %xmm0, %xmm0
movzwl 0xd8(%rsp), %eax
vpinsrw $0x4, %eax, %xmm0, %xmm0
movzwl 0xda(%rsp), %eax
vpinsrw $0x5, %eax, %xmm0, %xmm0
movzwl 0xdc(%rsp), %eax
vpinsrw $0x6, %eax, %xmm0, %xmm0
movzwl 0xde(%rsp), %eax
vpinsrw $0x7, %eax, %xmm0, %xmm0
vmovdqa %xmm0, 0xc0(%rsp)
vmovdqa 0xc0(%rsp), %xmm1
vmovdqa 0x10(%rsp), %xmm0
vmovdqa %xmm1, 0xf0(%rsp)
vmovdqa %xmm0, 0xe0(%rsp)
vmovdqa 0xf0(%rsp), %xmm0
vmovdqa 0xe0(%rsp), %xmm1
vpsubusw %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x10(%rsp), %xmm0
vmovdqa %xmm0, 0x100(%rsp)
vphminposuw 0x100(%rsp), %xmm0
vmovdqa %xmm0, 0x10(%rsp)
movl 0x10(%rsp), %eax
movzwl %ax, %ecx
movl $0x7fff, %eax # imm = 0x7FFF
subl %ecx, %eax
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
av1_quantize_fp_32x32_avx2 | void av1_quantize_fp_32x32_avx2(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
const int16_t *scan_ptr, const int16_t *iscan_ptr) {
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
const int log_scale = 1;
const unsigned int step = 16;
__m256i qp[3], thr;
__m256i eob = _mm256_setzero_si256();
init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
quantize_fp_32x32(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
&eob);
coeff_ptr += step;
qcoeff_ptr += step;
dqcoeff_ptr += step;
iscan_ptr += step;
n_coeffs -= step;
update_qp(&thr, qp);
while (n_coeffs > 0) {
quantize_fp_32x32(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
&eob);
coeff_ptr += step;
qcoeff_ptr += step;
dqcoeff_ptr += step;
iscan_ptr += step;
n_coeffs -= step;
}
*eob_ptr = quant_gather_eob(eob);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0xc40, %rsp # imm = 0xC40
movq 0x38(%rbp), %rax
movq 0x30(%rbp), %rax
movq 0x28(%rbp), %rax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, 0xf8(%rsp)
movq %rsi, 0xf0(%rsp)
movq %rdx, 0xe8(%rsp)
movq %rcx, 0xe0(%rsp)
movq %r8, 0xd8(%rsp)
movq %r9, 0xd0(%rsp)
movl $0x1, 0xcc(%rsp)
movl $0x10, 0xc8(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm0, 0x20(%rsp)
movq 0xe0(%rsp), %rdi
movq 0xd8(%rsp), %rsi
movq 0x20(%rbp), %rdx
movl $0x1, %ecx
leaq 0x40(%rsp), %r8
movq %r8, 0x10(%rsp)
leaq 0x60(%rsp), %r9
movq %r9, 0x18(%rsp)
vzeroupper
callq 0x94d920
movq 0x10(%rsp), %r8
movq 0x18(%rsp), %rdi
movq 0xf8(%rsp), %rsi
movq 0x38(%rbp), %rdx
movq 0x10(%rbp), %rcx
movq 0x18(%rbp), %rax
movq %r8, 0x438(%rsp)
movq %rdi, 0x430(%rsp)
movq %rsi, 0x428(%rsp)
movq %rdx, 0x420(%rsp)
movq %rcx, 0x418(%rsp)
movq %rax, 0x410(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x408(%rsp)
movq 0x428(%rsp), %rdi
callq 0x94f2c0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm0, 0x460(%rsp)
vpabsw 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
movq 0x438(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x700(%rsp)
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps 0x700(%rsp), %ymm0
vmovaps 0x6e0(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x3a0(%rsp)
vmovdqa 0x3a0(%rsp), %ymm0
vmovdqa %ymm0, 0x7a0(%rsp)
vmovdqa 0x7a0(%rsp), %ymm0
vpmovmskb %ymm0, %eax
movl %eax, 0x39c(%rsp)
cmpl $0x0, 0x39c(%rsp)
je 0x94e4ae
vmovaps 0x3c0(%rsp), %ymm1
movq 0x430(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x4c0(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps 0x4a0(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x360(%rsp), %ymm1
movq 0x430(%rsp), %rax
vmovaps 0x20(%rax), %ymm0
vmovaps %ymm1, 0xb40(%rsp)
vmovaps %ymm0, 0xb20(%rsp)
vmovaps 0xb40(%rsp), %ymm0
vmovaps 0xb20(%rsp), %ymm1
vpmulhuw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x340(%rsp)
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x580(%rsp)
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmovaps 0x560(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x340(%rsp), %ymm1
movq 0x430(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm1, 0x640(%rsp)
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x640(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpmullw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xbc0(%rsp)
movl $0x1, 0xbbc(%rsp)
vmovaps 0xbc0(%rsp), %ymm0
vmovd 0xbbc(%rsp), %xmm1
vpsrlw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps 0x340(%rsp), %ymm1
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x440(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm1, 0x6c0(%rsp)
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6c0(%rsp), %ymm0
vmovaps 0x6a0(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x300(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x320(%rsp), %ymm0
movq 0x418(%rsp), %rdi
callq 0x94f340
vmovaps 0x2c0(%rsp), %ymm0
movq 0x410(%rsp), %rdi
callq 0x94f340
movq 0x420(%rsp), %rax
movq 0x408(%rsp), %rcx
vmovaps (%rcx), %ymm1
vmovaps 0x2e0(%rsp), %ymm0
movq %rax, 0x8b0(%rsp)
vmovaps %ymm1, 0x880(%rsp)
vmovaps %ymm0, 0x860(%rsp)
movq 0x8b0(%rsp), %rax
movq %rax, 0x8b8(%rsp)
movq 0x8b8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x840(%rsp)
vmovaps 0x840(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovdqa %ymm0, 0x820(%rsp)
vmovdqa 0x820(%rsp), %ymm1
vmovdqa 0x860(%rsp), %ymm0
vmovdqa %ymm1, 0x8e0(%rsp)
vmovdqa %ymm0, 0x8c0(%rsp)
vmovdqa 0x8e0(%rsp), %ymm0
vmovdqa 0x8c0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x800(%rsp)
vmovdqa 0x800(%rsp), %ymm1
vmovdqa 0x860(%rsp), %ymm0
vmovdqa %ymm1, 0x920(%rsp)
vmovdqa %ymm0, 0x900(%rsp)
vmovdqa 0x920(%rsp), %ymm0
vpand 0x900(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x7e0(%rsp)
vmovdqa 0x880(%rsp), %ymm1
vmovdqa 0x7e0(%rsp), %ymm0
vmovdqa %ymm1, 0x960(%rsp)
vmovdqa %ymm0, 0x940(%rsp)
vmovdqa 0x960(%rsp), %ymm0
vmovdqa 0x940(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm0
movq 0x408(%rsp), %rax
vmovdqa %ymm0, (%rax)
jmp 0x94e4cb
movq 0x418(%rsp), %rdi
vzeroupper
callq 0x94f480
movq 0x410(%rsp), %rdi
callq 0x94f480
movq 0xf8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xf8(%rsp)
movq 0x10(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x10(%rbp)
movq 0x18(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x18(%rbp)
movq 0x38(%rbp), %rax
addq $0x20, %rax
movq %rax, 0x38(%rbp)
movq 0xf0(%rsp), %rax
subq $0x10, %rax
movq %rax, 0xf0(%rsp)
leaq 0x60(%rsp), %rsi
leaq 0x40(%rsp), %rdi
vzeroupper
callq 0x94dd90
cmpq $0x0, 0xf0(%rsp)
jle 0x94e9f6
movq 0xf8(%rsp), %rsi
movq 0x38(%rbp), %rdx
movq 0x10(%rbp), %rcx
movq 0x18(%rbp), %rax
leaq 0x40(%rsp), %rdi
movq %rdi, 0x298(%rsp)
leaq 0x60(%rsp), %rdi
movq %rdi, 0x290(%rsp)
movq %rsi, 0x288(%rsp)
movq %rdx, 0x280(%rsp)
movq %rcx, 0x278(%rsp)
movq %rax, 0x270(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x268(%rsp)
movq 0x288(%rsp), %rdi
callq 0x94f2c0
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x240(%rsp), %ymm0
vmovaps %ymm0, 0x480(%rsp)
vpabsw 0x480(%rsp), %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x220(%rsp), %ymm1
movq 0x298(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x780(%rsp)
vmovaps %ymm0, 0x760(%rsp)
vmovaps 0x780(%rsp), %ymm0
vmovaps 0x760(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x200(%rsp)
vmovdqa 0x200(%rsp), %ymm0
vmovdqa %ymm0, 0x7c0(%rsp)
vmovdqa 0x7c0(%rsp), %ymm0
vpmovmskb %ymm0, %eax
movl %eax, 0x1fc(%rsp)
cmpl $0x0, 0x1fc(%rsp)
je 0x94e988
vmovaps 0x220(%rsp), %ymm1
movq 0x290(%rsp), %rax
vmovaps (%rax), %ymm0
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps 0x4e0(%rsp), %ymm1
vpaddsw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1c0(%rsp), %ymm1
movq 0x290(%rsp), %rax
vmovaps 0x20(%rax), %ymm0
vmovaps %ymm1, 0xb80(%rsp)
vmovaps %ymm0, 0xb60(%rsp)
vmovaps 0xb80(%rsp), %ymm0
vmovaps 0xb60(%rsp), %ymm1
vpmulhuw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
vmovaps 0x240(%rsp), %ymm0
vmovaps %ymm1, 0x600(%rsp)
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x5e0(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
movq 0x290(%rsp), %rax
vmovaps 0x40(%rax), %ymm0
vmovaps %ymm1, 0x680(%rsp)
vmovaps %ymm0, 0x660(%rsp)
vmovaps 0x680(%rsp), %ymm0
vmovaps 0x660(%rsp), %ymm1
vpmullw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc00(%rsp)
movl $0x1, 0xbfc(%rsp)
vmovaps 0xc00(%rsp), %ymm0
vmovd 0xbfc(%rsp), %xmm1
vpsrlw %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm1, 0x740(%rsp)
vmovaps %ymm0, 0x720(%rsp)
vmovaps 0x740(%rsp), %ymm0
vmovaps 0x720(%rsp), %ymm1
vpcmpgtw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x160(%rsp), %ymm1
vmovaps 0x240(%rsp), %ymm0
vmovaps %ymm1, 0x5c0(%rsp)
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5c0(%rsp), %ymm0
vmovaps 0x5a0(%rsp), %ymm1
vpsignw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x180(%rsp), %ymm0
movq 0x278(%rsp), %rdi
callq 0x94f340
vmovaps 0x120(%rsp), %ymm0
movq 0x270(%rsp), %rdi
callq 0x94f340
movq 0x280(%rsp), %rax
movq 0x268(%rsp), %rcx
vmovaps (%rcx), %ymm1
vmovaps 0x140(%rsp), %ymm0
movq %rax, 0xa50(%rsp)
vmovaps %ymm1, 0xa20(%rsp)
vmovaps %ymm0, 0xa00(%rsp)
movq 0xa50(%rsp), %rax
movq %rax, 0xa58(%rsp)
movq 0xa58(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x9e0(%rsp)
vmovaps 0x9e0(%rsp), %ymm0
vpermq $0xd8, %ymm0, %ymm0 # ymm0 = ymm0[0,2,1,3]
vmovdqa %ymm0, 0x9c0(%rsp)
vmovdqa 0x9c0(%rsp), %ymm1
vmovdqa 0xa00(%rsp), %ymm0
vmovdqa %ymm1, 0xa80(%rsp)
vmovdqa %ymm0, 0xa60(%rsp)
vmovdqa 0xa80(%rsp), %ymm0
vmovdqa 0xa60(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x9a0(%rsp)
vmovdqa 0x9a0(%rsp), %ymm1
vmovdqa 0xa00(%rsp), %ymm0
vmovdqa %ymm1, 0xac0(%rsp)
vmovdqa %ymm0, 0xaa0(%rsp)
vmovdqa 0xac0(%rsp), %ymm0
vpand 0xaa0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x980(%rsp)
vmovdqa 0xa20(%rsp), %ymm1
vmovdqa 0x980(%rsp), %ymm0
vmovdqa %ymm1, 0xb00(%rsp)
vmovdqa %ymm0, 0xae0(%rsp)
vmovdqa 0xb00(%rsp), %ymm0
vmovdqa 0xae0(%rsp), %ymm1
vpmaxsw %ymm1, %ymm0, %ymm0
movq 0x268(%rsp), %rax
vmovdqa %ymm0, (%rax)
jmp 0x94e9a5
movq 0x278(%rsp), %rdi
vzeroupper
callq 0x94f480
movq 0x270(%rsp), %rdi
callq 0x94f480
movq 0xf8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xf8(%rsp)
movq 0x10(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x10(%rbp)
movq 0x18(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x18(%rbp)
movq 0x38(%rbp), %rax
addq $0x20, %rax
movq %rax, 0x38(%rbp)
movq 0xf0(%rsp), %rax
subq $0x10, %rax
movq %rax, 0xf0(%rsp)
jmp 0x94e529
vmovdqa 0x20(%rsp), %ymm0
callq 0x94ddf0
movw %ax, %cx
movq 0x28(%rbp), %rax
movw %cx, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
av1_quantize_fp_64x64_avx2 | void av1_quantize_fp_64x64_avx2(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
const int16_t *scan_ptr, const int16_t *iscan_ptr) {
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
const int log_scale = 2;
const unsigned int step = 16;
__m256i qp[3], thr;
__m256i eob = _mm256_setzero_si256();
init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
quantize_fp_64x64(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
&eob);
coeff_ptr += step;
qcoeff_ptr += step;
dqcoeff_ptr += step;
iscan_ptr += step;
n_coeffs -= step;
update_qp(&thr, qp);
while (n_coeffs > 0) {
quantize_fp_64x64(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
&eob);
coeff_ptr += step;
qcoeff_ptr += step;
dqcoeff_ptr += step;
iscan_ptr += step;
n_coeffs -= step;
}
*eob_ptr = quant_gather_eob(eob);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x140, %rsp # imm = 0x140
movq 0x38(%rbp), %rax
movq 0x30(%rbp), %rax
movq 0x28(%rbp), %rax
movq 0x20(%rbp), %rax
movq 0x18(%rbp), %rax
movq 0x10(%rbp), %rax
movq %rdi, 0xf8(%rsp)
movq %rsi, 0xf0(%rsp)
movq %rdx, 0xe8(%rsp)
movq %rcx, 0xe0(%rsp)
movq %r8, 0xd8(%rsp)
movq %r9, 0xd0(%rsp)
movl $0x2, 0xcc(%rsp)
movl $0x10, 0xc8(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x100(%rsp)
vmovdqa 0x100(%rsp), %ymm0
vmovdqa %ymm0, 0x20(%rsp)
movq 0xe0(%rsp), %rdi
movq 0xd8(%rsp), %rsi
movq 0x20(%rbp), %rdx
leaq 0x60(%rsp), %r9
movl $0x2, %ecx
leaq 0x40(%rsp), %r8
vzeroupper
callq 0x94d920
leaq 0x60(%rsp), %rsi
movq 0xf8(%rsp), %rdx
movq 0x38(%rbp), %rcx
movq 0x10(%rbp), %r8
movq 0x18(%rbp), %r9
leaq 0x40(%rsp), %rdi
leaq 0x20(%rsp), %rax
movq %rax, (%rsp)
callq 0x94ec00
movq 0xf8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xf8(%rsp)
movq 0x10(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x10(%rbp)
movq 0x18(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x18(%rbp)
movq 0x38(%rbp), %rax
addq $0x20, %rax
movq %rax, 0x38(%rbp)
movq 0xf0(%rsp), %rax
subq $0x10, %rax
movq %rax, 0xf0(%rsp)
leaq 0x60(%rsp), %rsi
leaq 0x40(%rsp), %rdi
callq 0x94dd90
cmpq $0x0, 0xf0(%rsp)
jle 0x94ebe3
leaq 0x60(%rsp), %rsi
movq 0xf8(%rsp), %rdx
movq 0x38(%rbp), %rcx
movq 0x10(%rbp), %r8
movq 0x18(%rbp), %r9
leaq 0x40(%rsp), %rdi
leaq 0x20(%rsp), %rax
movq %rax, (%rsp)
callq 0x94ec00
movq 0xf8(%rsp), %rax
addq $0x40, %rax
movq %rax, 0xf8(%rsp)
movq 0x10(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x10(%rbp)
movq 0x18(%rbp), %rax
addq $0x40, %rax
movq %rax, 0x18(%rbp)
movq 0x38(%rbp), %rax
addq $0x20, %rax
movq %rax, 0x38(%rbp)
movq 0xf0(%rsp), %rax
subq $0x10, %rax
movq %rax, 0xf0(%rsp)
jmp 0x94eb5b
vmovdqa 0x20(%rsp), %ymm0
callq 0x94ddf0
movw %ax, %cx
movq 0x28(%rbp), %rax
movw %cx, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
| /m-ab-s[P]aom/av1/encoder/x86/av1_quantize_avx2.c |
av1_block_error_lp_avx2 | int64_t av1_block_error_lp_avx2(const int16_t *coeff, const int16_t *dqcoeff,
intptr_t block_size) {
assert(block_size % 16 == 0);
__m256i sse_256 = _mm256_setzero_si256();
int64_t sse;
if (block_size == 16)
av1_block_error_block_size16_avx2(coeff, dqcoeff, &sse_256);
else if (block_size == 32)
av1_block_error_block_size32_avx2(coeff, dqcoeff, &sse_256);
else
av1_block_error_block_size64_avx2(coeff, dqcoeff, &sse_256, block_size);
// Save the higher 64 bit of each 128 bit lane.
const __m256i sse_hi = _mm256_srli_si256(sse_256, 8);
// Add the higher 64 bit to the low 64 bit.
sse_256 = _mm256_add_epi64(sse_256, sse_hi);
// Accumulate the sse_256 register to get final sse
const __m128i sse_128 = _mm_add_epi64(_mm256_castsi256_si128(sse_256),
_mm256_extractf128_si256(sse_256, 1));
// Store the results.
_mm_storel_epi64((__m128i *)&sse, sse_128);
return sse;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x180, %rsp # imm = 0x180
movq %rdi, 0x98(%rsp)
movq %rsi, 0x90(%rsp)
movq %rdx, 0x88(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0xa0(%rsp)
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa %ymm0, 0x60(%rsp)
cmpq $0x10, 0x88(%rsp)
jne 0x94f57d
movq 0x98(%rsp), %rdi
movq 0x90(%rsp), %rsi
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x94f6b0
jmp 0x94f5ce
cmpq $0x20, 0x88(%rsp)
jne 0x94f5a7
movq 0x98(%rsp), %rdi
movq 0x90(%rsp), %rsi
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x94f820
jmp 0x94f5cc
movq 0x98(%rsp), %rdi
movq 0x90(%rsp), %rsi
movq 0x88(%rsp), %rcx
leaq 0x60(%rsp), %rdx
vzeroupper
callq 0x94fb40
jmp 0x94f5ce
vmovaps 0x60(%rsp), %ymm0
vpsrldq $0x8, %ymm0, %ymm0 # ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x60(%rsp), %ymm1
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm1, 0xe0(%rsp)
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xe0(%rsp), %ymm0
vmovaps 0xc0(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x120(%rsp), %ymm0
vmovaps %xmm0, %xmm1
vmovdqa 0x70(%rsp), %xmm0
vmovdqa %xmm1, 0x110(%rsp)
vmovdqa %xmm0, 0x100(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa 0x100(%rsp), %xmm1
vpaddq %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rsp)
vmovdqa 0x10(%rsp), %xmm0
leaq 0x58(%rsp), %rax
movq %rax, 0x168(%rsp)
vmovdqa %xmm0, 0x150(%rsp)
movq 0x150(%rsp), %rcx
movq 0x168(%rsp), %rax
movq %rcx, (%rax)
movq 0x58(%rsp), %rax
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/error_intrin_avx2.c |
av1_block_error_block_size16_avx2 | static inline void av1_block_error_block_size16_avx2(const int16_t *coeff,
const int16_t *dqcoeff,
__m256i *sse_256) {
const __m256i _coeff = _mm256_loadu_si256((const __m256i *)coeff);
const __m256i _dqcoeff = _mm256_loadu_si256((const __m256i *)dqcoeff);
// d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
const __m256i diff = _mm256_sub_epi16(_dqcoeff, _coeff);
// r0 r1 r2 r3 r4 r5 r6 r7
const __m256i error = _mm256_madd_epi16(diff, diff);
// r0+r1 r2+r3 | r0+r1 r2+r3 | r4+r5 r6+r7 | r4+r5 r6+r7
const __m256i error_hi = _mm256_hadd_epi32(error, error);
// r0+r1 | r2+r3 | r4+r5 | r6+r7
*sse_256 = _mm256_unpacklo_epi32(error_hi, _mm256_setzero_si256());
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x220, %rsp # imm = 0x220
movq %rdi, 0xb8(%rsp)
movq %rsi, 0xb0(%rsp)
movq %rdx, 0xa8(%rsp)
movq 0xb8(%rsp), %rax
movq %rax, 0x1b8(%rsp)
movq 0x1b8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x80(%rsp)
movq 0xb0(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movq 0x1b0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm1
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm1, 0x100(%rsp)
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x100(%rsp), %ymm0
vmovaps 0xe0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x140(%rsp), %ymm0
vmovaps 0x120(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x20(%rsp)
vmovaps 0x20(%rsp), %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x1c0(%rsp), %ymm1
vphaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, (%rsp)
vmovaps (%rsp), %ymm1
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm1, 0x180(%rsp)
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x180(%rsp), %ymm0
vmovaps 0x160(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
movq 0xa8(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/error_intrin_avx2.c |
av1_block_error_block_size32_avx2 | static inline void av1_block_error_block_size32_avx2(const int16_t *coeff,
const int16_t *dqcoeff,
__m256i *sse_256) {
const __m256i zero = _mm256_setzero_si256();
const __m256i _coeff_0 = _mm256_loadu_si256((const __m256i *)coeff);
const __m256i _dqcoeff_0 = _mm256_loadu_si256((const __m256i *)dqcoeff);
const __m256i _coeff_1 = _mm256_loadu_si256((const __m256i *)(coeff + 16));
const __m256i _dqcoeff_1 =
_mm256_loadu_si256((const __m256i *)(dqcoeff + 16));
// d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
const __m256i diff_0 = _mm256_sub_epi16(_dqcoeff_0, _coeff_0);
const __m256i diff_1 = _mm256_sub_epi16(_dqcoeff_1, _coeff_1);
// r0 r1 r2 r3 r4 r5 r6 r7
const __m256i error_0 = _mm256_madd_epi16(diff_0, diff_0);
const __m256i error_1 = _mm256_madd_epi16(diff_1, diff_1);
const __m256i err_final_0 = _mm256_add_epi32(error_0, error_1);
// For extreme input values, the accumulation needs to happen in 64 bit
// precision to avoid any overflow.
const __m256i exp0_error_lo = _mm256_unpacklo_epi32(err_final_0, zero);
const __m256i exp0_error_hi = _mm256_unpackhi_epi32(err_final_0, zero);
const __m256i sum_temp_0 = _mm256_add_epi64(exp0_error_hi, exp0_error_lo);
*sse_256 = _mm256_add_epi64(*sse_256, sum_temp_0);
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x460, %rsp # imm = 0x460
movq %rdi, 0x1b8(%rsp)
movq %rsi, 0x1b0(%rsp)
movq %rdx, 0x1a8(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x1c0(%rsp), %ymm0
vmovaps %ymm0, 0x180(%rsp)
movq 0x1b8(%rsp), %rax
movq %rax, 0x3f8(%rsp)
movq 0x3f8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x160(%rsp)
movq 0x1b0(%rsp), %rax
movq %rax, 0x3f0(%rsp)
movq 0x3f0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x140(%rsp)
movq 0x1b8(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x3e8(%rsp)
movq 0x3e8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x120(%rsp)
movq 0x1b0(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x3e0(%rsp)
movq 0x3e0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x140(%rsp), %ymm1
vmovaps 0x160(%rsp), %ymm0
vmovaps %ymm1, 0x2c0(%rsp)
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps 0x2a0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x100(%rsp), %ymm1
vmovaps 0x120(%rsp), %ymm0
vmovaps %ymm1, 0x280(%rsp)
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps 0x260(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xe0(%rsp), %ymm0
vmovaps %ymm0, 0x340(%rsp)
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x340(%rsp), %ymm0
vmovaps 0x320(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps 0x2e0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0xa0(%rsp), %ymm1
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps 0x400(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm1
vmovaps 0x180(%rsp), %ymm0
vmovaps %ymm1, 0x380(%rsp)
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x380(%rsp), %ymm0
vmovaps 0x360(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x60(%rsp), %ymm1
vmovaps 0x180(%rsp), %ymm0
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x3c0(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x20(%rsp), %ymm1
vmovdqa 0x40(%rsp), %ymm0
vmovdqa %ymm1, 0x240(%rsp)
vmovdqa %ymm0, 0x220(%rsp)
vmovdqa 0x240(%rsp), %ymm0
vpaddq 0x220(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
movq 0x1a8(%rsp), %rax
vmovdqa (%rax), %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x200(%rsp)
vmovdqa %ymm0, 0x1e0(%rsp)
vmovdqa 0x200(%rsp), %ymm0
vpaddq 0x1e0(%rsp), %ymm0, %ymm0
movq 0x1a8(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/error_intrin_avx2.c |
av1_block_error_block_size64_avx2 | static inline void av1_block_error_block_size64_avx2(const int16_t *coeff,
const int16_t *dqcoeff,
__m256i *sse_256,
intptr_t block_size) {
const __m256i zero = _mm256_setzero_si256();
for (int i = 0; i < block_size; i += 64) {
// Load 64 elements for coeff and dqcoeff.
const __m256i _coeff_0 = _mm256_loadu_si256((const __m256i *)coeff);
const __m256i _dqcoeff_0 = _mm256_loadu_si256((const __m256i *)dqcoeff);
const __m256i _coeff_1 = _mm256_loadu_si256((const __m256i *)(coeff + 16));
const __m256i _dqcoeff_1 =
_mm256_loadu_si256((const __m256i *)(dqcoeff + 16));
const __m256i _coeff_2 = _mm256_loadu_si256((const __m256i *)(coeff + 32));
const __m256i _dqcoeff_2 =
_mm256_loadu_si256((const __m256i *)(dqcoeff + 32));
const __m256i _coeff_3 = _mm256_loadu_si256((const __m256i *)(coeff + 48));
const __m256i _dqcoeff_3 =
_mm256_loadu_si256((const __m256i *)(dqcoeff + 48));
// d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
const __m256i diff_0 = _mm256_sub_epi16(_dqcoeff_0, _coeff_0);
const __m256i diff_1 = _mm256_sub_epi16(_dqcoeff_1, _coeff_1);
const __m256i diff_2 = _mm256_sub_epi16(_dqcoeff_2, _coeff_2);
const __m256i diff_3 = _mm256_sub_epi16(_dqcoeff_3, _coeff_3);
// r0 r1 r2 r3 r4 r5 r6 r7
const __m256i error_0 = _mm256_madd_epi16(diff_0, diff_0);
const __m256i error_1 = _mm256_madd_epi16(diff_1, diff_1);
const __m256i error_2 = _mm256_madd_epi16(diff_2, diff_2);
const __m256i error_3 = _mm256_madd_epi16(diff_3, diff_3);
// r00 r01 r02 r03 r04 r05 r06 r07
const __m256i err_final_0 = _mm256_add_epi32(error_0, error_1);
// r10 r11 r12 r13 r14 r15 r16 r17
const __m256i err_final_1 = _mm256_add_epi32(error_2, error_3);
// For extreme input values, the accumulation needs to happen in 64 bit
// precision to avoid any overflow. r00 r01 r04 r05
const __m256i exp0_error_lo = _mm256_unpacklo_epi32(err_final_0, zero);
// r02 r03 r06 r07
const __m256i exp0_error_hi = _mm256_unpackhi_epi32(err_final_0, zero);
// r10 r11 r14 r15
const __m256i exp1_error_lo = _mm256_unpacklo_epi32(err_final_1, zero);
// r12 r13 r16 r17
const __m256i exp1_error_hi = _mm256_unpackhi_epi32(err_final_1, zero);
const __m256i sum_temp_0 = _mm256_add_epi64(exp0_error_hi, exp0_error_lo);
const __m256i sum_temp_1 = _mm256_add_epi64(exp1_error_hi, exp1_error_lo);
const __m256i sse_256_temp = _mm256_add_epi64(sum_temp_1, sum_temp_0);
*sse_256 = _mm256_add_epi64(*sse_256, sse_256_temp);
coeff += 64;
dqcoeff += 64;
}
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x880, %rsp # imm = 0x880
movq %rdi, 0x378(%rsp)
movq %rsi, 0x370(%rsp)
movq %rdx, 0x368(%rsp)
movq %rcx, 0x360(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x380(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x340(%rsp)
movl $0x0, 0x33c(%rsp)
movslq 0x33c(%rsp), %rax
cmpq 0x360(%rsp), %rax
jge 0x95019f
movq 0x378(%rsp), %rax
movq %rax, 0x7d8(%rsp)
movq 0x7d8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x300(%rsp)
movq 0x370(%rsp), %rax
movq %rax, 0x7d0(%rsp)
movq 0x7d0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
movq 0x378(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x7c8(%rsp)
movq 0x7c8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
movq 0x370(%rsp), %rax
addq $0x20, %rax
movq %rax, 0x7c0(%rsp)
movq 0x7c0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
movq 0x378(%rsp), %rax
addq $0x40, %rax
movq %rax, 0x7b8(%rsp)
movq 0x7b8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x280(%rsp)
movq 0x370(%rsp), %rax
addq $0x40, %rax
movq %rax, 0x7b0(%rsp)
movq 0x7b0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x260(%rsp)
movq 0x378(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x7a8(%rsp)
movq 0x7a8(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x240(%rsp)
movq 0x370(%rsp), %rax
addq $0x60, %rax
movq %rax, 0x7a0(%rsp)
movq 0x7a0(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm1, 0x580(%rsp)
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmovaps 0x560(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x260(%rsp), %ymm1
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps 0x4e0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x220(%rsp), %ymm1
vmovaps 0x240(%rsp), %ymm0
vmovaps %ymm1, 0x4c0(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps 0x4a0(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x200(%rsp), %ymm0
vmovaps %ymm0, 0x680(%rsp)
vmovaps %ymm0, 0x660(%rsp)
vmovaps 0x680(%rsp), %ymm0
vmovaps 0x660(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x1e0(%rsp), %ymm0
vmovaps %ymm0, 0x640(%rsp)
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x640(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x1c0(%rsp), %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x5e0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x1a0(%rsp), %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5c0(%rsp), %ymm0
vmovaps 0x5a0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x180(%rsp), %ymm1
vmovaps 0x160(%rsp), %ymm0
vmovaps %ymm1, 0x840(%rsp)
vmovaps %ymm0, 0x820(%rsp)
vmovaps 0x840(%rsp), %ymm0
vmovaps 0x820(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x140(%rsp), %ymm1
vmovaps 0x120(%rsp), %ymm0
vmovaps %ymm1, 0x800(%rsp)
vmovaps %ymm0, 0x7e0(%rsp)
vmovaps 0x800(%rsp), %ymm0
vmovaps 0x7e0(%rsp), %ymm1
vpaddd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x100(%rsp), %ymm1
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm1, 0x700(%rsp)
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps 0x700(%rsp), %ymm0
vmovaps 0x6e0(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0x100(%rsp), %ymm1
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm1, 0x780(%rsp)
vmovaps %ymm0, 0x760(%rsp)
vmovaps 0x780(%rsp), %ymm0
vmovaps 0x760(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovaps %ymm0, 0xa0(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm1, 0x6c0(%rsp)
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6c0(%rsp), %ymm0
vmovaps 0x6a0(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0xe0(%rsp), %ymm1
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm1, 0x740(%rsp)
vmovaps %ymm0, 0x720(%rsp)
vmovaps 0x740(%rsp), %ymm0
vmovaps 0x720(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovdqa %ymm0, 0x60(%rsp)
vmovdqa 0xa0(%rsp), %ymm1
vmovdqa 0xc0(%rsp), %ymm0
vmovdqa %ymm1, 0x480(%rsp)
vmovdqa %ymm0, 0x460(%rsp)
vmovdqa 0x480(%rsp), %ymm0
vpaddq 0x460(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x40(%rsp)
vmovdqa 0x60(%rsp), %ymm1
vmovdqa 0x80(%rsp), %ymm0
vmovdqa %ymm1, 0x440(%rsp)
vmovdqa %ymm0, 0x420(%rsp)
vmovdqa 0x440(%rsp), %ymm0
vpaddq 0x420(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rsp)
vmovdqa 0x20(%rsp), %ymm1
vmovdqa 0x40(%rsp), %ymm0
vmovdqa %ymm1, 0x400(%rsp)
vmovdqa %ymm0, 0x3e0(%rsp)
vmovdqa 0x400(%rsp), %ymm0
vpaddq 0x3e0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, (%rsp)
movq 0x368(%rsp), %rax
vmovdqa (%rax), %ymm1
vmovdqa (%rsp), %ymm0
vmovdqa %ymm1, 0x3c0(%rsp)
vmovdqa %ymm0, 0x3a0(%rsp)
vmovdqa 0x3c0(%rsp), %ymm0
vpaddq 0x3a0(%rsp), %ymm0, %ymm0
movq 0x368(%rsp), %rax
vmovdqa %ymm0, (%rax)
movq 0x378(%rsp), %rax
addq $0x80, %rax
movq %rax, 0x378(%rsp)
movq 0x370(%rsp), %rax
addq $0x80, %rax
movq %rax, 0x370(%rsp)
movl 0x33c(%rsp), %eax
addl $0x40, %eax
movl %eax, 0x33c(%rsp)
jmp 0x94fb99
movq %rbp, %rsp
popq %rbp
vzeroupper
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/error_intrin_avx2.c |
av1_block_error_avx2 | int64_t av1_block_error_avx2(const tran_low_t *coeff, const tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz) {
__m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
__m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
__m256i sse_reg_64hi, ssz_reg_64hi;
__m128i sse_reg128, ssz_reg128;
int64_t sse;
int i;
const __m256i zero_reg = _mm256_setzero_si256();
// init sse and ssz registerd to zero
sse_reg = _mm256_setzero_si256();
ssz_reg = _mm256_setzero_si256();
for (i = 0; i < block_size; i += 16) {
// load 32 bytes from coeff and dqcoeff
read_coeff(coeff, i, &coeff_reg);
read_coeff(dqcoeff, i, &dqcoeff_reg);
// dqcoeff - coeff
dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg);
// madd (dqcoeff - coeff)
dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg);
// madd coeff
coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg);
// expand each double word of madd (dqcoeff - coeff) to quad word
exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg);
exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg);
// expand each double word of madd (coeff) to quad word
exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg);
exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg);
// add each quad word of madd (dqcoeff - coeff) and madd (coeff)
sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo);
ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo);
sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi);
ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi);
}
// save the higher 64 bit of each 128 bit lane
sse_reg_64hi = _mm256_srli_si256(sse_reg, 8);
ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8);
// add the higher 64 bit to the low 64 bit
sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi);
ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi);
// add each 64 bit from each of the 128 bit lane of the 256 bit
sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg),
_mm256_extractf128_si256(sse_reg, 1));
ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg),
_mm256_extractf128_si256(ssz_reg, 1));
// store the results
_mm_storel_epi64((__m128i *)(&sse), sse_reg128);
_mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
_mm256_zeroupper();
return sse;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x640, %rsp # imm = 0x640
movq %rdi, 0x1b8(%rsp)
movq %rsi, 0x1b0(%rsp)
movq %rdx, 0x1a8(%rsp)
movq %rcx, 0x1a0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x200(%rsp), %ymm1
vmovaps %ymm1, (%rsp)
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x1e0(%rsp), %ymm1
vmovaps %ymm1, 0x180(%rsp)
vmovaps %ymm0, 0x1c0(%rsp)
vmovdqa 0x1c0(%rsp), %ymm0
vmovdqa %ymm0, 0x160(%rsp)
movl $0x0, 0x34(%rsp)
movslq 0x34(%rsp), %rax
cmpq 0x1a8(%rsp), %rax
jge 0x950541
movq 0x1b8(%rsp), %rdi
movslq 0x34(%rsp), %rsi
leaq 0x140(%rsp), %rdx
vzeroupper
callq 0x950710
movq 0x1b0(%rsp), %rdi
movslq 0x34(%rsp), %rsi
leaq 0x120(%rsp), %rdx
callq 0x950710
vmovaps 0x120(%rsp), %ymm1
vmovaps 0x140(%rsp), %ymm0
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmovaps 0x460(%rsp), %ymm1
vpsubw %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x120(%rsp), %ymm0
vmovaps %ymm0, 0x500(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps 0x4e0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x140(%rsp), %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps 0x4a0(%rsp), %ymm1
vpmaddwd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps (%rsp), %ymm0
vmovaps %ymm1, 0x580(%rsp)
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmovaps 0x560(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps (%rsp), %ymm0
vmovaps %ymm1, 0x600(%rsp)
vmovaps %ymm0, 0x5e0(%rsp)
vmovaps 0x600(%rsp), %ymm0
vmovaps 0x5e0(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0x140(%rsp), %ymm1
vmovaps (%rsp), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpunpckldq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0x140(%rsp), %ymm1
vmovaps (%rsp), %ymm0
vmovaps %ymm1, 0x5c0(%rsp)
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps 0x5c0(%rsp), %ymm0
vmovaps 0x5a0(%rsp), %ymm1
vpunpckhdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
vmovdqa %ymm0, 0xa0(%rsp)
vmovdqa 0x180(%rsp), %ymm1
vmovdqa 0x100(%rsp), %ymm0
vmovdqa %ymm1, 0x380(%rsp)
vmovdqa %ymm0, 0x360(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vpaddq 0x360(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x160(%rsp), %ymm1
vmovdqa 0xc0(%rsp), %ymm0
vmovdqa %ymm1, 0x340(%rsp)
vmovdqa %ymm0, 0x320(%rsp)
vmovdqa 0x340(%rsp), %ymm0
vpaddq 0x320(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x160(%rsp)
vmovdqa 0x180(%rsp), %ymm1
vmovdqa 0xe0(%rsp), %ymm0
vmovdqa %ymm1, 0x300(%rsp)
vmovdqa %ymm0, 0x2e0(%rsp)
vmovdqa 0x300(%rsp), %ymm0
vpaddq 0x2e0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x180(%rsp)
vmovdqa 0x160(%rsp), %ymm1
vmovdqa 0xa0(%rsp), %ymm0
vmovdqa %ymm1, 0x2c0(%rsp)
vmovdqa %ymm0, 0x2a0(%rsp)
vmovdqa 0x2c0(%rsp), %ymm0
vpaddq 0x2a0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x160(%rsp)
movl 0x34(%rsp), %eax
addl $0x10, %eax
movl %eax, 0x34(%rsp)
jmp 0x950238
vmovaps 0x180(%rsp), %ymm0
vpsrldq $0x8, %ymm0, %ymm0 # ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
vmovaps %ymm0, 0x80(%rsp)
vmovaps 0x160(%rsp), %ymm0
vpsrldq $0x8, %ymm0, %ymm0 # ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x180(%rsp), %ymm1
vmovaps 0x80(%rsp), %ymm0
vmovaps %ymm1, 0x280(%rsp)
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps 0x260(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x160(%rsp), %ymm1
vmovaps 0x60(%rsp), %ymm0
vmovaps %ymm1, 0x240(%rsp)
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x240(%rsp), %ymm0
vmovaps 0x220(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x180(%rsp), %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps %xmm0, %xmm1
vmovdqa 0x190(%rsp), %xmm0
vmovdqa %xmm1, 0x3d0(%rsp)
vmovdqa %xmm0, 0x3c0(%rsp)
vmovdqa 0x3d0(%rsp), %xmm0
vmovdqa 0x3c0(%rsp), %xmm1
vpaddq %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x50(%rsp)
vmovaps 0x160(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %xmm0, %xmm1
vmovdqa 0x170(%rsp), %xmm0
vmovdqa %xmm1, 0x3b0(%rsp)
vmovdqa %xmm0, 0x3a0(%rsp)
vmovdqa 0x3b0(%rsp), %xmm0
vmovdqa 0x3a0(%rsp), %xmm1
vpaddq %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x40(%rsp)
vmovdqa 0x50(%rsp), %xmm0
leaq 0x38(%rsp), %rax
movq %rax, 0x458(%rsp)
vmovdqa %xmm0, 0x440(%rsp)
movq 0x440(%rsp), %rcx
movq 0x458(%rsp), %rax
movq %rcx, (%rax)
movq 0x1a0(%rsp), %rax
vmovdqa 0x40(%rsp), %xmm0
movq %rax, 0x438(%rsp)
vmovdqa %xmm0, 0x420(%rsp)
movq 0x420(%rsp), %rcx
movq 0x438(%rsp), %rax
movq %rcx, (%rax)
vzeroupper
movq 0x38(%rsp), %rax
movq %rbp, %rsp
popq %rbp
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/encoder/x86/error_intrin_avx2.c |
av1_calc_proj_params_high_bd_avx2 | void av1_calc_proj_params_high_bd_avx2(const uint8_t *src8, int width,
int height, int src_stride,
const uint8_t *dat8, int dat_stride,
int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride,
int64_t H[2][2], int64_t C[2],
const sgr_params_type *params) {
if ((params->r[0] > 0) && (params->r[1] > 0)) {
calc_proj_params_r0_r1_high_bd_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt0, flt0_stride, flt1,
flt1_stride, H, C);
} else if (params->r[0] > 0) {
calc_proj_params_r0_high_bd_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt0, flt0_stride, H, C);
} else if (params->r[1] > 0) {
calc_proj_params_r1_high_bd_avx2(src8, width, height, src_stride, dat8,
dat_stride, flt1, flt1_stride, H, C);
}
} | pushq %rbp
pushq %r14
pushq %rbx
subq $0x60, %rsp
movq 0xb0(%rsp), %rax
movq 0xa8(%rsp), %rax
movq 0xa0(%rsp), %rax
movl 0x98(%rsp), %eax
movq 0x90(%rsp), %rax
movl 0x88(%rsp), %eax
movq 0x80(%rsp), %rax
movq %rdi, 0x58(%rsp)
movl %esi, 0x54(%rsp)
movl %edx, 0x50(%rsp)
movl %ecx, 0x4c(%rsp)
movq %r8, 0x40(%rsp)
movl %r9d, 0x3c(%rsp)
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
jle 0x9c9f64
movq 0xb0(%rsp), %rax
cmpl $0x0, 0x4(%rax)
jle 0x9c9f64
movq 0x58(%rsp), %rdi
movl 0x54(%rsp), %esi
movl 0x50(%rsp), %edx
movl 0x4c(%rsp), %ecx
movq 0x40(%rsp), %r8
movl 0x3c(%rsp), %r9d
movq 0x80(%rsp), %r14
movl 0x88(%rsp), %ebp
movq 0x90(%rsp), %rbx
movl 0x98(%rsp), %r11d
movq 0xa0(%rsp), %r10
movq 0xa8(%rsp), %rax
movq %r14, (%rsp)
movl %ebp, 0x8(%rsp)
movq %rbx, 0x10(%rsp)
movl %r11d, 0x18(%rsp)
movq %r10, 0x20(%rsp)
movq %rax, 0x28(%rsp)
callq 0x9ca040
jmp 0x9ca02b
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
jle 0x9c9fc6
movq 0x58(%rsp), %rdi
movl 0x54(%rsp), %esi
movl 0x50(%rsp), %edx
movl 0x4c(%rsp), %ecx
movq 0x40(%rsp), %r8
movl 0x3c(%rsp), %r9d
movq 0x80(%rsp), %rbx
movl 0x88(%rsp), %r11d
movq 0xa0(%rsp), %r10
movq 0xa8(%rsp), %rax
movq %rbx, (%rsp)
movl %r11d, 0x8(%rsp)
movq %r10, 0x10(%rsp)
movq %rax, 0x18(%rsp)
callq 0x9caf50
jmp 0x9ca029
movq 0xb0(%rsp), %rax
cmpl $0x0, 0x4(%rax)
jle 0x9ca027
movq 0x58(%rsp), %rdi
movl 0x54(%rsp), %esi
movl 0x50(%rsp), %edx
movl 0x4c(%rsp), %ecx
movq 0x40(%rsp), %r8
movl 0x3c(%rsp), %r9d
movq 0x90(%rsp), %rbx
movl 0x98(%rsp), %r11d
movq 0xa0(%rsp), %r10
movq 0xa8(%rsp), %rax
movq %rbx, (%rsp)
movl %r11d, 0x8(%rsp)
movq %r10, 0x10(%rsp)
movq %rax, 0x18(%rsp)
callq 0x9cb770
jmp 0x9ca029
jmp 0x9ca02b
addq $0x60, %rsp
popq %rbx
popq %r14
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/pickrst_avx2.c |
calc_proj_params_r0_r1_high_bd_avx2 | static inline void calc_proj_params_r0_r1_high_bd_avx2(
const uint8_t *src8, int width, int height, int src_stride,
const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
const int size = width * height;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
__m256i h00, h01, h11, c0, c1;
const __m256i zero = _mm256_setzero_si256();
h01 = h11 = c0 = c1 = h00 = zero;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; j += 8) {
const __m256i u_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
const __m256i s_load = _mm256_cvtepu16_epi32(
_mm_load_si128((__m128i *)(src + i * src_stride + j)));
__m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
__m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
__m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
__m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
s = _mm256_sub_epi32(s, d);
f1 = _mm256_sub_epi32(f1, d);
f2 = _mm256_sub_epi32(f2, d);
const __m256i h00_even = _mm256_mul_epi32(f1, f1);
const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f1, 32));
h00 = _mm256_add_epi64(h00, h00_even);
h00 = _mm256_add_epi64(h00, h00_odd);
const __m256i h01_even = _mm256_mul_epi32(f1, f2);
const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
_mm256_srli_epi64(f2, 32));
h01 = _mm256_add_epi64(h01, h01_even);
h01 = _mm256_add_epi64(h01, h01_odd);
const __m256i h11_even = _mm256_mul_epi32(f2, f2);
const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
_mm256_srli_epi64(f2, 32));
h11 = _mm256_add_epi64(h11, h11_even);
h11 = _mm256_add_epi64(h11, h11_odd);
const __m256i c0_even = _mm256_mul_epi32(f1, s);
const __m256i c0_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
c0 = _mm256_add_epi64(c0, c0_even);
c0 = _mm256_add_epi64(c0, c0_odd);
const __m256i c1_even = _mm256_mul_epi32(f2, s);
const __m256i c1_odd =
_mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
c1 = _mm256_add_epi64(c1, c1_even);
c1 = _mm256_add_epi64(c1, c1_odd);
}
}
__m256i c_low = _mm256_unpacklo_epi64(c0, c1);
const __m256i c_high = _mm256_unpackhi_epi64(c0, c1);
c_low = _mm256_add_epi64(c_low, c_high);
const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1),
_mm256_castsi256_si128(c_low));
__m256i h0x_low = _mm256_unpacklo_epi64(h00, h01);
const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01);
h0x_low = _mm256_add_epi64(h0x_low, h0x_high);
const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1),
_mm256_castsi256_si128(h0x_low));
// Using the symmetric properties of H, calculations of H[1][0] are not
// needed.
__m256i h1x_low = _mm256_unpacklo_epi64(zero, h11);
const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11);
h1x_low = _mm256_add_epi64(h1x_low, h1x_high);
const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1),
_mm256_castsi256_si128(h1x_low));
xx_storeu_128(C, c_128bit);
xx_storeu_128(H[0], h0x_128bit);
xx_storeu_128(H[1], h1x_128bit);
H[0][0] /= size;
H[0][1] /= size;
H[1][1] /= size;
// Since H is a symmetric matrix
H[1][0] = H[0][1];
C[0] /= size;
C[1] /= size;
} | pushq %rbp
movq %rsp, %rbp
andq $-0x20, %rsp
subq $0x10a0, %rsp # imm = 0x10A0
movq 0x38(%rbp), %rax
movq 0x30(%rbp), %rax
movl 0x28(%rbp), %eax
movq 0x20(%rbp), %rax
movl 0x18(%rbp), %eax
movq 0x10(%rbp), %rax
movq %rdi, 0x458(%rsp)
movl %esi, 0x454(%rsp)
movl %edx, 0x450(%rsp)
movl %ecx, 0x44c(%rsp)
movq %r8, 0x440(%rsp)
movl %r9d, 0x43c(%rsp)
movl 0x454(%rsp), %eax
movl 0x450(%rsp), %ecx
imull %ecx, %eax
movl %eax, 0x438(%rsp)
movq 0x458(%rsp), %rax
addq %rax, %rax
movq %rax, 0x430(%rsp)
movq 0x440(%rsp), %rax
addq %rax, %rax
movq %rax, 0x428(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x460(%rsp)
vmovdqa 0x460(%rsp), %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vmovdqa 0x360(%rsp), %ymm0
vmovdqa %ymm0, 0x400(%rsp)
vmovdqa %ymm0, 0x380(%rsp)
vmovdqa %ymm0, 0x3a0(%rsp)
vmovdqa %ymm0, 0x3c0(%rsp)
vmovdqa %ymm0, 0x3e0(%rsp)
movl $0x0, 0x35c(%rsp)
movl 0x35c(%rsp), %eax
cmpl 0x450(%rsp), %eax
jge 0x9caafc
movl $0x0, 0x358(%rsp)
movl 0x358(%rsp), %eax
cmpl 0x454(%rsp), %eax
jge 0x9caae4
movq 0x428(%rsp), %rax
movl 0x35c(%rsp), %ecx
movl 0x43c(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movslq 0x358(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1088(%rsp)
movq 0x1088(%rsp), %rax
vmovdqa (%rax), %xmm0
vmovdqa %xmm0, 0x830(%rsp)
vpmovzxwd 0x830(%rsp), %ymm0
vmovaps %ymm0, 0x320(%rsp)
movq 0x430(%rsp), %rax
movl 0x35c(%rsp), %ecx
movl 0x44c(%rsp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,2), %rax
movslq 0x358(%rsp), %rcx
leaq (%rax,%rcx,2), %rax
movq %rax, 0x1080(%rsp)
movq 0x1080(%rsp), %rax
vmovdqa (%rax), %xmm0
vmovdqa %xmm0, 0x820(%rsp)
vpmovzxwd 0x820(%rsp), %ymm0
vmovaps %ymm0, 0x300(%rsp)
movq 0x10(%rbp), %rax
movl 0x35c(%rsp), %ecx
movl 0x18(%rbp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,4), %rax
movslq 0x358(%rsp), %rcx
leaq (%rax,%rcx,4), %rax
movq %rax, 0x918(%rsp)
movq 0x918(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
movq 0x20(%rbp), %rax
movl 0x35c(%rsp), %ecx
movl 0x28(%rbp), %edx
imull %edx, %ecx
movslq %ecx, %rcx
leaq (%rax,%rcx,4), %rax
movslq 0x358(%rsp), %rcx
leaq (%rax,%rcx,4), %rax
movq %rax, 0x910(%rsp)
movq 0x910(%rsp), %rax
vmovups (%rax), %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm0, 0xb60(%rsp)
movl $0x4, 0xb5c(%rsp)
vmovaps 0xb60(%rsp), %ymm0
vmovd 0xb5c(%rsp), %xmm1
vpslld %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm0, 0xb20(%rsp)
movl $0x4, 0xb1c(%rsp)
vmovaps 0xb20(%rsp), %ymm0
vmovd 0xb1c(%rsp), %xmm1
vpslld %xmm1, %ymm0, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x280(%rsp), %ymm1
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm1, 0x8e0(%rsp)
vmovaps %ymm0, 0x8c0(%rsp)
vmovaps 0x8e0(%rsp), %ymm0
vmovaps 0x8c0(%rsp), %ymm1
vpsubd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm1, 0x8a0(%rsp)
vmovaps %ymm0, 0x880(%rsp)
vmovaps 0x8a0(%rsp), %ymm0
vmovaps 0x880(%rsp), %ymm1
vpsubd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
vmovaps 0x2c0(%rsp), %ymm1
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm1, 0x860(%rsp)
vmovaps %ymm0, 0x840(%rsp)
vmovaps 0x860(%rsp), %ymm0
vmovaps 0x840(%rsp), %ymm1
vpsubd %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0xde0(%rsp)
vmovaps %ymm0, 0xdc0(%rsp)
vmovaps 0xde0(%rsp), %ymm0
vmovaps 0xdc0(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0x1060(%rsp)
movl $0x20, 0x105c(%rsp)
vmovaps 0x1060(%rsp), %ymm0
vmovd 0x105c(%rsp), %xmm1
vpsrlq %xmm1, %ymm0, %ymm1
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0x1020(%rsp)
movl $0x20, 0x101c(%rsp)
vmovaps 0x1020(%rsp), %ymm0
vmovd 0x101c(%rsp), %xmm2
vpsrlq %xmm2, %ymm0, %ymm0
vmovaps %ymm1, 0xda0(%rsp)
vmovaps %ymm0, 0xd80(%rsp)
vmovaps 0xda0(%rsp), %ymm0
vmovaps 0xd80(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x260(%rsp), %ymm0
vmovaps %ymm1, 0x800(%rsp)
vmovaps %ymm0, 0x7e0(%rsp)
vmovaps 0x800(%rsp), %ymm0
vmovaps 0x7e0(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x240(%rsp), %ymm0
vmovaps %ymm1, 0x7c0(%rsp)
vmovaps %ymm0, 0x7a0(%rsp)
vmovaps 0x7c0(%rsp), %ymm0
vmovaps 0x7a0(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm1, 0xd60(%rsp)
vmovaps %ymm0, 0xd40(%rsp)
vmovaps 0xd60(%rsp), %ymm0
vmovaps 0xd40(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0xfe0(%rsp)
movl $0x20, 0xfdc(%rsp)
vmovaps 0xfe0(%rsp), %ymm0
vmovd 0xfdc(%rsp), %xmm1
vpsrlq %xmm1, %ymm0, %ymm1
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0xfa0(%rsp)
movl $0x20, 0xf9c(%rsp)
vmovaps 0xfa0(%rsp), %ymm0
vmovd 0xf9c(%rsp), %xmm2
vpsrlq %xmm2, %ymm0, %ymm0
vmovaps %ymm1, 0xd20(%rsp)
vmovaps %ymm0, 0xd00(%rsp)
vmovaps 0xd20(%rsp), %ymm0
vmovaps 0xd00(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps 0x3e0(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm0
vmovaps %ymm1, 0x780(%rsp)
vmovaps %ymm0, 0x760(%rsp)
vmovaps 0x780(%rsp), %ymm0
vmovaps 0x760(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3e0(%rsp), %ymm1
vmovaps 0x200(%rsp), %ymm0
vmovaps %ymm1, 0x740(%rsp)
vmovaps %ymm0, 0x720(%rsp)
vmovaps 0x740(%rsp), %ymm0
vmovaps 0x720(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0xce0(%rsp)
vmovaps %ymm0, 0xcc0(%rsp)
vmovaps 0xce0(%rsp), %ymm0
vmovaps 0xcc0(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0xf60(%rsp)
movl $0x20, 0xf5c(%rsp)
vmovaps 0xf60(%rsp), %ymm0
vmovd 0xf5c(%rsp), %xmm1
vpsrlq %xmm1, %ymm0, %ymm1
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0xf20(%rsp)
movl $0x20, 0xf1c(%rsp)
vmovaps 0xf20(%rsp), %ymm0
vmovd 0xf1c(%rsp), %xmm2
vpsrlq %xmm2, %ymm0, %ymm0
vmovaps %ymm1, 0xca0(%rsp)
vmovaps %ymm0, 0xc80(%rsp)
vmovaps 0xca0(%rsp), %ymm0
vmovaps 0xc80(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
vmovaps 0x1e0(%rsp), %ymm0
vmovaps %ymm1, 0x700(%rsp)
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps 0x700(%rsp), %ymm0
vmovaps 0x6e0(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x3c0(%rsp), %ymm1
vmovaps 0x1c0(%rsp), %ymm0
vmovaps %ymm1, 0x6c0(%rsp)
vmovaps %ymm0, 0x6a0(%rsp)
vmovaps 0x6c0(%rsp), %ymm0
vmovaps 0x6a0(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm1, 0xc60(%rsp)
vmovaps %ymm0, 0xc40(%rsp)
vmovaps 0xc60(%rsp), %ymm0
vmovaps 0xc40(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps 0x2e0(%rsp), %ymm0
vmovaps %ymm0, 0xee0(%rsp)
movl $0x20, 0xedc(%rsp)
vmovaps 0xee0(%rsp), %ymm0
vmovd 0xedc(%rsp), %xmm1
vpsrlq %xmm1, %ymm0, %ymm1
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm0, 0xea0(%rsp)
movl $0x20, 0xe9c(%rsp)
vmovaps 0xea0(%rsp), %ymm0
vmovd 0xe9c(%rsp), %xmm2
vpsrlq %xmm2, %ymm0, %ymm0
vmovaps %ymm1, 0xc20(%rsp)
vmovaps %ymm0, 0xc00(%rsp)
vmovaps 0xc20(%rsp), %ymm0
vmovaps 0xc00(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x180(%rsp)
vmovaps 0x3a0(%rsp), %ymm1
vmovaps 0x1a0(%rsp), %ymm0
vmovaps %ymm1, 0x680(%rsp)
vmovaps %ymm0, 0x660(%rsp)
vmovaps 0x680(%rsp), %ymm0
vmovaps 0x660(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x3a0(%rsp), %ymm1
vmovaps 0x180(%rsp), %ymm0
vmovaps %ymm1, 0x640(%rsp)
vmovaps %ymm0, 0x620(%rsp)
vmovaps 0x640(%rsp), %ymm0
vmovaps 0x620(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x2c0(%rsp), %ymm1
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm1, 0xbe0(%rsp)
vmovaps %ymm0, 0xbc0(%rsp)
vmovaps 0xbe0(%rsp), %ymm0
vmovaps 0xbc0(%rsp), %ymm1
vpmuldq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x2c0(%rsp), %ymm0
vmovaps %ymm0, 0xe60(%rsp)
movl $0x20, 0xe5c(%rsp)
vmovaps 0xe60(%rsp), %ymm0
vmovd 0xe5c(%rsp), %xmm1
vpsrlq %xmm1, %ymm0, %ymm1
vmovaps 0x280(%rsp), %ymm0
vmovaps %ymm0, 0xe20(%rsp)
movl $0x20, 0xe1c(%rsp)
vmovaps 0xe20(%rsp), %ymm0
vmovd 0xe1c(%rsp), %xmm2
vpsrlq %xmm2, %ymm0, %ymm0
vmovaps %ymm1, 0xba0(%rsp)
vmovaps %ymm0, 0xb80(%rsp)
vmovaps 0xba0(%rsp), %ymm0
vmovaps 0xb80(%rsp), %ymm1
vpsllq $0x20, %ymm0, %ymm2
vpsrad $0x1f, %ymm2, %ymm2
vpblendd $0xaa, %ymm2, %ymm0, %ymm2 # ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
vpsllq $0x20, %ymm1, %ymm2
vpsrad $0x1f, %ymm2, %ymm2
vpblendd $0xaa, %ymm2, %ymm1, %ymm2 # ymm2 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
vpmuldq %ymm1, %ymm0, %ymm0
vmovdqa %ymm0, 0x140(%rsp)
vmovdqa 0x380(%rsp), %ymm1
vmovdqa 0x160(%rsp), %ymm0
vmovdqa %ymm1, 0x600(%rsp)
vmovdqa %ymm0, 0x5e0(%rsp)
vmovdqa 0x600(%rsp), %ymm0
vpaddq 0x5e0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
vmovdqa 0x380(%rsp), %ymm1
vmovdqa 0x140(%rsp), %ymm0
vmovdqa %ymm1, 0x5c0(%rsp)
vmovdqa %ymm0, 0x5a0(%rsp)
vmovdqa 0x5c0(%rsp), %ymm0
vpaddq 0x5a0(%rsp), %ymm0, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
movl 0x358(%rsp), %eax
addl $0x8, %eax
movl %eax, 0x358(%rsp)
jmp 0x9ca14f
jmp 0x9caae6
movl 0x35c(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x35c(%rsp)
jmp 0x9ca130
vmovaps 0x3a0(%rsp), %ymm1
vmovaps 0x380(%rsp), %ymm0
vmovaps %ymm1, 0x9c0(%rsp)
vmovaps %ymm0, 0x9a0(%rsp)
vmovaps 0x9c0(%rsp), %ymm0
vmovaps 0x9a0(%rsp), %ymm1
vpunpcklqdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x3a0(%rsp), %ymm1
vmovaps 0x380(%rsp), %ymm0
vmovaps %ymm1, 0xa80(%rsp)
vmovaps %ymm0, 0xa60(%rsp)
vmovaps 0xa80(%rsp), %ymm0
vmovaps 0xa60(%rsp), %ymm1
vpunpckhqdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
vmovaps %ymm0, 0x100(%rsp)
vmovaps 0x120(%rsp), %ymm1
vmovaps 0x100(%rsp), %ymm0
vmovaps %ymm1, 0x580(%rsp)
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmovaps 0x560(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0x120(%rsp), %ymm0
vmovdqa 0x130(%rsp), %xmm1
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovdqa %xmm1, 0xb00(%rsp)
vmovdqa %xmm0, 0xaf0(%rsp)
vmovdqa 0xb00(%rsp), %xmm0
vmovdqa 0xaf0(%rsp), %xmm1
vpaddq %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0xf0(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0x980(%rsp)
vmovaps %ymm0, 0x960(%rsp)
vmovaps 0x980(%rsp), %ymm0
vmovaps 0x960(%rsp), %ymm1
vpunpcklqdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0x400(%rsp), %ymm1
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm1, 0xa40(%rsp)
vmovaps %ymm0, 0xa20(%rsp)
vmovaps 0xa40(%rsp), %ymm0
vmovaps 0xa20(%rsp), %ymm1
vpunpckhqdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
vmovaps %ymm0, 0xa0(%rsp)
vmovaps 0xc0(%rsp), %ymm1
vmovaps 0xa0(%rsp), %ymm0
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps 0x520(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0xc0(%rsp), %ymm0
vmovdqa 0xd0(%rsp), %xmm1
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovdqa %xmm1, 0xae0(%rsp)
vmovdqa %xmm0, 0xad0(%rsp)
vmovdqa 0xae0(%rsp), %xmm0
vmovdqa 0xad0(%rsp), %xmm1
vpaddq %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x90(%rsp)
vmovaps 0x360(%rsp), %ymm1
vmovaps 0x3c0(%rsp), %ymm0
vmovaps %ymm1, 0x940(%rsp)
vmovaps %ymm0, 0x920(%rsp)
vmovaps 0x940(%rsp), %ymm0
vmovaps 0x920(%rsp), %ymm1
vpunpcklqdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x360(%rsp), %ymm1
vmovaps 0x3c0(%rsp), %ymm0
vmovaps %ymm1, 0xa00(%rsp)
vmovaps %ymm0, 0x9e0(%rsp)
vmovaps 0xa00(%rsp), %ymm0
vmovaps 0x9e0(%rsp), %ymm1
vpunpckhqdq %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
vmovaps %ymm0, 0x40(%rsp)
vmovaps 0x60(%rsp), %ymm1
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps 0x4e0(%rsp), %ymm1
vpaddq %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %ymm0
vmovdqa 0x70(%rsp), %xmm1
vmovaps %ymm0, 0x480(%rsp)
vmovdqa 0x480(%rsp), %xmm0
vmovdqa %xmm1, 0xac0(%rsp)
vmovdqa %xmm0, 0xab0(%rsp)
vmovdqa 0xac0(%rsp), %xmm0
vpaddq 0xab0(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
movq 0x38(%rbp), %rdi
vmovdqa 0xf0(%rsp), %xmm0
vzeroupper
callq 0x9cfbb0
movq 0x30(%rbp), %rdi
vmovdqa 0x90(%rsp), %xmm0
callq 0x9cfbb0
movq 0x30(%rbp), %rdi
addq $0x10, %rdi
vmovdqa 0x30(%rsp), %xmm0
callq 0x9cfbb0
movslq 0x438(%rsp), %rcx
movq 0x30(%rbp), %rax
movq %rax, 0x8(%rsp)
movq (%rax), %rax
cqto
idivq %rcx
movq %rax, %rcx
movq 0x8(%rsp), %rax
movq %rcx, (%rax)
movslq 0x438(%rsp), %rcx
movq 0x30(%rbp), %rax
movq %rax, 0x10(%rsp)
movq 0x8(%rax), %rax
cqto
idivq %rcx
movq %rax, %rcx
movq 0x10(%rsp), %rax
movq %rcx, 0x8(%rax)
movslq 0x438(%rsp), %rcx
movq 0x30(%rbp), %rax
movq %rax, 0x18(%rsp)
movq 0x18(%rax), %rax
cqto
idivq %rcx
movq %rax, %rcx
movq 0x18(%rsp), %rax
movq %rcx, 0x18(%rax)
movq 0x30(%rbp), %rax
movq 0x8(%rax), %rcx
movq 0x30(%rbp), %rax
movq %rcx, 0x10(%rax)
movslq 0x438(%rsp), %rcx
movq 0x38(%rbp), %rax
movq %rax, 0x20(%rsp)
movq (%rax), %rax
cqto
idivq %rcx
movq %rax, %rcx
movq 0x20(%rsp), %rax
movq %rcx, (%rax)
movslq 0x438(%rsp), %rcx
movq 0x38(%rbp), %rax
movq %rax, 0x28(%rsp)
movq 0x8(%rax), %rax
cqto
idivq %rcx
movq %rax, %rcx
movq 0x28(%rsp), %rax
movq %rcx, 0x8(%rax)
movq %rbp, %rsp
popq %rbp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/encoder/x86/pickrst_avx2.c |
aom_ifft_2d_gen | void aom_ifft_2d_gen(const float *input, float *temp, float *output, int n,
aom_fft_1d_func_t fft_single, aom_fft_1d_func_t fft_multi,
aom_fft_1d_func_t ifft_multi,
aom_fft_transpose_func_t transpose, int vec_size) {
// Column 0 and n/2 have conjugate symmetry, so we can directly do the ifft
// and get real outputs.
for (int y = 0; y <= n / 2; ++y) {
output[y * n] = input[2 * y * n];
output[y * n + 1] = input[2 * (y * n + n / 2)];
}
for (int y = n / 2 + 1; y < n; ++y) {
output[y * n] = input[2 * (y - n / 2) * n + 1];
output[y * n + 1] = input[2 * ((y - n / 2) * n + n / 2) + 1];
}
for (int i = 0; i < 2; i += vec_size) {
ifft_multi(output + i, temp + i, n);
}
// For the other columns, since we don't have a full ifft for complex inputs
// we have to split them into the real and imaginary counterparts.
// Pack the real component, then the imaginary components.
for (int y = 0; y < n; ++y) {
for (int x = 1; x < n / 2; ++x) {
output[y * n + (x + 1)] = input[2 * (y * n + x)];
}
for (int x = 1; x < n / 2; ++x) {
output[y * n + (x + n / 2)] = input[2 * (y * n + x) + 1];
}
}
for (int y = 2; y < vec_size; y++) {
fft_single(output + y, temp + y, n);
}
// This is the part that can be sped up with SIMD
for (int y = AOMMAX(2, vec_size); y < n; y += vec_size) {
fft_multi(output + y, temp + y, n);
}
// Put the 0 and n/2 th results in the correct place.
for (int x = 0; x < n; ++x) {
output[x] = temp[x * n];
output[(n / 2) * n + x] = temp[x * n + 1];
}
// This rearranges and transposes.
for (int y = 1; y < n / 2; ++y) {
// Fill in the real columns
for (int x = 0; x <= n / 2; ++x) {
output[x + y * n] =
temp[(y + 1) + x * n] +
((x > 0 && x < n / 2) ? temp[(y + n / 2) + (x + n / 2) * n] : 0);
}
for (int x = n / 2 + 1; x < n; ++x) {
output[x + y * n] = temp[(y + 1) + (n - x) * n] -
temp[(y + n / 2) + ((n - x) + n / 2) * n];
}
// Fill in the imag columns
for (int x = 0; x <= n / 2; ++x) {
output[x + (y + n / 2) * n] =
temp[(y + n / 2) + x * n] -
((x > 0 && x < n / 2) ? temp[(y + 1) + (x + n / 2) * n] : 0);
}
for (int x = n / 2 + 1; x < n; ++x) {
output[x + (y + n / 2) * n] = temp[(y + 1) + ((n - x) + n / 2) * n] +
temp[(y + n / 2) + (n - x) * n];
}
}
for (int y = 0; y < n; y += vec_size) {
ifft_multi(output + y, temp + y, n);
}
transpose(temp, output, n);
} | subq $0x148, %rsp # imm = 0x148
movl 0x160(%rsp), %eax
movq 0x158(%rsp), %rax
movq 0x150(%rsp), %rax
movq %rdi, 0x140(%rsp)
movq %rsi, 0x138(%rsp)
movq %rdx, 0x130(%rsp)
movl %ecx, 0x12c(%rsp)
movq %r8, 0x120(%rsp)
movq %r9, 0x118(%rsp)
movl $0x0, 0x114(%rsp)
movl 0x114(%rsp), %eax
movl %eax, 0xd8(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0xd8(%rsp), %eax
cmpl %ecx, %eax
jg 0x9dee42
movq 0x140(%rsp), %rax
movl 0x114(%rsp), %ecx
shll %ecx
imull 0x12c(%rsp), %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movl 0x114(%rsp), %ecx
imull 0x12c(%rsp), %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movq 0x140(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl 0x114(%rsp), %ecx
imull 0x12c(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl %eax, %edx
movq 0xd0(%rsp), %rax
addl %edx, %ecx
shll %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movl 0x114(%rsp), %ecx
imull 0x12c(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0x114(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x114(%rsp)
jmp 0x9ded58
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
addl $0x1, %eax
movl %eax, 0x110(%rsp)
movl 0x110(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9def65
movq 0x140(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl 0x110(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl %eax, %edx
movq 0xc0(%rsp), %rax
subl %edx, %ecx
shll %ecx
imull 0x12c(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movl 0x110(%rsp), %ecx
imull 0x12c(%rsp), %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movq 0x140(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl 0x110(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
subl %eax, %ecx
imull 0x12c(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl %eax, %edx
movq 0xc8(%rsp), %rax
addl %edx, %ecx
shll %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movl 0x110(%rsp), %ecx
imull 0x12c(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0x110(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x110(%rsp)
jmp 0x9dee5b
movl $0x0, 0x10c(%rsp)
cmpl $0x2, 0x10c(%rsp)
jge 0x9defd0
movq 0x150(%rsp), %rax
movq 0x130(%rsp), %rdi
movslq 0x10c(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rdi
movq 0x138(%rsp), %rsi
movslq 0x10c(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rsi
movl 0x12c(%rsp), %edx
callq *%rax
movl 0x160(%rsp), %eax
addl 0x10c(%rsp), %eax
movl %eax, 0x10c(%rsp)
jmp 0x9def70
movl $0x0, 0x108(%rsp)
movl 0x108(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9df178
movl $0x1, 0x104(%rsp)
movl 0x104(%rsp), %eax
movl %eax, 0xbc(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0xbc(%rsp), %eax
cmpl %ecx, %eax
jge 0x9df08d
movq 0x140(%rsp), %rax
movl 0x108(%rsp), %ecx
imull 0x12c(%rsp), %ecx
addl 0x104(%rsp), %ecx
shll %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movl 0x108(%rsp), %ecx
imull 0x12c(%rsp), %ecx
movl 0x104(%rsp), %edx
addl $0x1, %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0x104(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x104(%rsp)
jmp 0x9deffa
movl $0x1, 0x100(%rsp)
movl 0x100(%rsp), %eax
movl %eax, 0xb8(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0xb8(%rsp), %eax
cmpl %ecx, %eax
jge 0x9df160
movq 0x140(%rsp), %rax
movl 0x108(%rsp), %ecx
imull 0x12c(%rsp), %ecx
addl 0x100(%rsp), %ecx
shll %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl 0x108(%rsp), %ecx
imull 0x12c(%rsp), %ecx
movl 0x100(%rsp), %eax
movl %eax, 0xac(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0xac(%rsp), %edx
movl %eax, %esi
movq 0xb0(%rsp), %rax
addl %esi, %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0x100(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x100(%rsp)
jmp 0x9df098
jmp 0x9df162
movl 0x108(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x108(%rsp)
jmp 0x9defdb
movl $0x2, 0xfc(%rsp)
movl 0xfc(%rsp), %eax
cmpl 0x160(%rsp), %eax
jge 0x9df1e5
movq 0x120(%rsp), %rax
movq 0x130(%rsp), %rdi
movslq 0xfc(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rdi
movq 0x138(%rsp), %rsi
movslq 0xfc(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rsi
movl 0x12c(%rsp), %edx
callq *%rax
movl 0xfc(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xfc(%rsp)
jmp 0x9df183
movl $0x2, %eax
cmpl 0x160(%rsp), %eax
jle 0x9df201
movl $0x2, %eax
movl %eax, 0xa8(%rsp)
jmp 0x9df20f
movl 0x160(%rsp), %eax
movl %eax, 0xa8(%rsp)
movl 0xa8(%rsp), %eax
movl %eax, 0xf8(%rsp)
movl 0xf8(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9df283
movq 0x118(%rsp), %rax
movq 0x130(%rsp), %rdi
movslq 0xf8(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rdi
movq 0x138(%rsp), %rsi
movslq 0xf8(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rsi
movl 0x12c(%rsp), %edx
callq *%rax
movl 0x160(%rsp), %eax
addl 0xf8(%rsp), %eax
movl %eax, 0xf8(%rsp)
jmp 0x9df21d
movl $0x0, 0xf4(%rsp)
movl 0xf4(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9df34e
movq 0x138(%rsp), %rax
movl 0xf4(%rsp), %ecx
imull 0x12c(%rsp), %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movslq 0xf4(%rsp), %rcx
movss %xmm0, (%rax,%rcx,4)
movq 0x138(%rsp), %rax
movl 0xf4(%rsp), %ecx
imull 0x12c(%rsp), %ecx
addl $0x1, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movq %rax, 0xa0(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movq 0xa0(%rsp), %rax
imull 0x12c(%rsp), %ecx
addl 0xf4(%rsp), %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0xf4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xf4(%rsp)
jmp 0x9df28e
movl $0x1, 0xf0(%rsp)
movl 0xf0(%rsp), %eax
movl %eax, 0x9c(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0x9c(%rsp), %eax
cmpl %ecx, %eax
jge 0x9df8cc
movl $0x0, 0xec(%rsp)
movl 0xec(%rsp), %eax
movl %eax, 0x98(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0x98(%rsp), %eax
cmpl %ecx, %eax
jg 0x9df4fd
movq 0x138(%rsp), %rax
movl 0xf0(%rsp), %ecx
addl $0x1, %ecx
movl 0xec(%rsp), %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movss %xmm0, 0x94(%rsp)
cmpl $0x0, 0xec(%rsp)
jle 0x9df49b
movl 0xec(%rsp), %eax
movl %eax, 0x90(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0x90(%rsp), %eax
cmpl %ecx, %eax
jge 0x9df49b
movq 0x138(%rsp), %rax
movq %rax, 0x80(%rsp)
movl 0xf0(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
addl %eax, %ecx
movl 0xec(%rsp), %eax
movl %eax, 0x7c(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0x7c(%rsp), %edx
movl %eax, %esi
movq 0x80(%rsp), %rax
addl %esi, %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movss %xmm0, 0x8c(%rsp)
jmp 0x9df4a9
xorps %xmm0, %xmm0
movss %xmm0, 0x8c(%rsp)
jmp 0x9df4a9
movss 0x94(%rsp), %xmm0
movss 0x8c(%rsp), %xmm1
addss %xmm1, %xmm0
movq 0x130(%rsp), %rax
movl 0xec(%rsp), %ecx
movl 0xf0(%rsp), %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0xec(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xec(%rsp)
jmp 0x9df392
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
addl $0x1, %eax
movl %eax, 0xe8(%rsp)
movl 0xe8(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9df5ff
movq 0x138(%rsp), %rax
movl 0xf0(%rsp), %ecx
addl $0x1, %ecx
movl 0x12c(%rsp), %edx
subl 0xe8(%rsp), %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x138(%rsp), %rax
movq %rax, 0x70(%rsp)
movl 0xf0(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
addl %eax, %ecx
movl 0x12c(%rsp), %eax
subl 0xe8(%rsp), %eax
movl %eax, 0x6c(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0x6c(%rsp), %edx
movl %eax, %esi
movq 0x70(%rsp), %rax
addl %esi, %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
subss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movl 0xe8(%rsp), %ecx
movl 0xf0(%rsp), %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0xe8(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xe8(%rsp)
jmp 0x9df516
movl $0x0, 0xe4(%rsp)
movl 0xe4(%rsp), %eax
movl %eax, 0x68(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0x68(%rsp), %eax
cmpl %ecx, %eax
jg 0x9df781
movq 0x138(%rsp), %rax
movq %rax, 0x58(%rsp)
movl 0xf0(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl %eax, %edx
movq 0x58(%rsp), %rax
addl %edx, %ecx
movl 0xe4(%rsp), %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movss %xmm0, 0x64(%rsp)
cmpl $0x0, 0xe4(%rsp)
jle 0x9df703
movl 0xe4(%rsp), %eax
movl %eax, 0x54(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
movl %eax, %ecx
movl 0x54(%rsp), %eax
cmpl %ecx, %eax
jge 0x9df703
movq 0x138(%rsp), %rax
movq %rax, 0x48(%rsp)
movl 0xf0(%rsp), %ecx
addl $0x1, %ecx
movl 0xe4(%rsp), %eax
movl %eax, 0x44(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0x44(%rsp), %edx
movl %eax, %esi
movq 0x48(%rsp), %rax
addl %esi, %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movss %xmm0, 0x50(%rsp)
jmp 0x9df70e
xorps %xmm0, %xmm0
movss %xmm0, 0x50(%rsp)
jmp 0x9df70e
movss 0x64(%rsp), %xmm0
movss 0x50(%rsp), %xmm1
subss %xmm1, %xmm0
movq 0x130(%rsp), %rax
movq %rax, 0x38(%rsp)
movl 0xe4(%rsp), %ecx
movl 0xf0(%rsp), %eax
movl %eax, 0x34(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0x34(%rsp), %edx
movl %eax, %esi
movq 0x38(%rsp), %rax
addl %esi, %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0xe4(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xe4(%rsp)
jmp 0x9df60a
movl 0x12c(%rsp), %eax
movl $0x2, %ecx
cltd
idivl %ecx
addl $0x1, %eax
movl %eax, 0xe0(%rsp)
movl 0xe0(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9df8b4
movq 0x138(%rsp), %rax
movq %rax, 0x10(%rsp)
movl 0xf0(%rsp), %ecx
addl $0x1, %ecx
movl 0x12c(%rsp), %eax
subl 0xe0(%rsp), %eax
movl %eax, 0xc(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0xc(%rsp), %edx
movl %eax, %esi
movq 0x10(%rsp), %rax
addl %esi, %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss (%rax,%rcx,4), %xmm0
movq 0x138(%rsp), %rax
movq %rax, 0x18(%rsp)
movl 0xf0(%rsp), %ecx
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl %eax, %edx
movq 0x18(%rsp), %rax
addl %edx, %ecx
movl 0x12c(%rsp), %edx
subl 0xe0(%rsp), %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
addss (%rax,%rcx,4), %xmm0
movq 0x130(%rsp), %rax
movq %rax, 0x28(%rsp)
movl 0xe0(%rsp), %ecx
movl 0xf0(%rsp), %eax
movl %eax, 0x24(%rsp)
movl 0x12c(%rsp), %eax
movl $0x2, %esi
cltd
idivl %esi
movl 0x24(%rsp), %edx
movl %eax, %esi
movq 0x28(%rsp), %rax
addl %esi, %edx
imull 0x12c(%rsp), %edx
addl %edx, %ecx
movslq %ecx, %rcx
movss %xmm0, (%rax,%rcx,4)
movl 0xe0(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xe0(%rsp)
jmp 0x9df79a
jmp 0x9df8b6
movl 0xf0(%rsp), %eax
addl $0x1, %eax
movl %eax, 0xf0(%rsp)
jmp 0x9df359
movl $0x0, 0xdc(%rsp)
movl 0xdc(%rsp), %eax
cmpl 0x12c(%rsp), %eax
jge 0x9df93d
movq 0x150(%rsp), %rax
movq 0x130(%rsp), %rdi
movslq 0xdc(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rdi
movq 0x138(%rsp), %rsi
movslq 0xdc(%rsp), %rcx
shlq $0x2, %rcx
addq %rcx, %rsi
movl 0x12c(%rsp), %edx
callq *%rax
movl 0x160(%rsp), %eax
addl 0xdc(%rsp), %eax
movl %eax, 0xdc(%rsp)
jmp 0x9df8d7
movq 0x158(%rsp), %rax
movq 0x138(%rsp), %rdi
movq 0x130(%rsp), %rsi
movl 0x12c(%rsp), %edx
callq *%rax
addq $0x148, %rsp # imm = 0x148
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/fft.c |
aom_sum_squares_2d_i16_c | uint64_t aom_sum_squares_2d_i16_c(const int16_t *src, int src_stride, int width,
int height) {
int r, c;
uint64_t ss = 0;
for (r = 0; r < height; r++) {
for (c = 0; c < width; c++) {
const int16_t v = src[c];
ss += v * v;
}
src += src_stride;
}
return ss;
} | movq %rdi, -0x8(%rsp)
movl %esi, -0xc(%rsp)
movl %edx, -0x10(%rsp)
movl %ecx, -0x14(%rsp)
movq $0x0, -0x28(%rsp)
movl $0x0, -0x18(%rsp)
movl -0x18(%rsp), %eax
cmpl -0x14(%rsp), %eax
jge 0x9e6a5b
movl $0x0, -0x1c(%rsp)
movl -0x1c(%rsp), %eax
cmpl -0x10(%rsp), %eax
jge 0x9e6a37
movq -0x8(%rsp), %rax
movslq -0x1c(%rsp), %rcx
movw (%rax,%rcx,2), %ax
movw %ax, -0x2a(%rsp)
movswl -0x2a(%rsp), %eax
movswl -0x2a(%rsp), %ecx
imull %ecx, %eax
cltq
addq -0x28(%rsp), %rax
movq %rax, -0x28(%rsp)
movl -0x1c(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x1c(%rsp)
jmp 0x9e69f4
movl -0xc(%rsp), %ecx
movq -0x8(%rsp), %rax
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x8(%rsp)
movl -0x18(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x18(%rsp)
jmp 0x9e69e2
movq -0x28(%rsp), %rax
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/sum_squares.c |
aom_sum_squares_i16_c | uint64_t aom_sum_squares_i16_c(const int16_t *src, uint32_t n) {
uint64_t ss = 0;
do {
const int16_t v = *src++;
ss += v * v;
} while (--n);
return ss;
} | movq %rdi, -0x8(%rsp)
movl %esi, -0xc(%rsp)
movq $0x0, -0x18(%rsp)
movq -0x8(%rsp), %rax
movq %rax, %rcx
addq $0x2, %rcx
movq %rcx, -0x8(%rsp)
movw (%rax), %ax
movw %ax, -0x1a(%rsp)
movswl -0x1a(%rsp), %eax
movswl -0x1a(%rsp), %ecx
imull %ecx, %eax
cltq
addq -0x18(%rsp), %rax
movq %rax, -0x18(%rsp)
movl -0xc(%rsp), %eax
addl $-0x1, %eax
movl %eax, -0xc(%rsp)
cmpl $0x0, %eax
jne 0x9e6a82
movq -0x18(%rsp), %rax
retq
nopw (%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/sum_squares.c |
aom_var_2d_u8_c | uint64_t aom_var_2d_u8_c(uint8_t *src, int src_stride, int width, int height) {
int r, c;
uint64_t ss = 0, s = 0;
for (r = 0; r < height; r++) {
for (c = 0; c < width; c++) {
const uint8_t v = src[c];
ss += v * v;
s += v;
}
src += src_stride;
}
return (ss - s * s / (width * height));
} | movq %rdi, -0x8(%rsp)
movl %esi, -0xc(%rsp)
movl %edx, -0x10(%rsp)
movl %ecx, -0x14(%rsp)
movq $0x0, -0x28(%rsp)
movq $0x0, -0x30(%rsp)
movl $0x0, -0x18(%rsp)
movl -0x18(%rsp), %eax
cmpl -0x14(%rsp), %eax
jge 0x9e6b81
movl $0x0, -0x1c(%rsp)
movl -0x1c(%rsp), %eax
cmpl -0x10(%rsp), %eax
jge 0x9e6b5d
movq -0x8(%rsp), %rax
movslq -0x1c(%rsp), %rcx
movb (%rax,%rcx), %al
movb %al, -0x31(%rsp)
movzbl -0x31(%rsp), %eax
movzbl -0x31(%rsp), %ecx
imull %ecx, %eax
cltq
addq -0x28(%rsp), %rax
movq %rax, -0x28(%rsp)
movzbl -0x31(%rsp), %eax
addq -0x30(%rsp), %rax
movq %rax, -0x30(%rsp)
movl -0x1c(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x1c(%rsp)
jmp 0x9e6b0d
movl -0xc(%rsp), %ecx
movq -0x8(%rsp), %rax
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, -0x8(%rsp)
movl -0x18(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x18(%rsp)
jmp 0x9e6afb
movq -0x28(%rsp), %rax
movq %rax, -0x40(%rsp)
movq -0x30(%rsp), %rax
imulq -0x30(%rsp), %rax
movl -0x10(%rsp), %ecx
imull -0x14(%rsp), %ecx
movslq %ecx, %rcx
xorl %edx, %edx
divq %rcx
movq %rax, %rcx
movq -0x40(%rsp), %rax
subq %rcx, %rax
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/sum_squares.c |
aom_var_2d_u16_c | uint64_t aom_var_2d_u16_c(uint8_t *src, int src_stride, int width, int height) {
uint16_t *srcp = CONVERT_TO_SHORTPTR(src);
int r, c;
uint64_t ss = 0, s = 0;
for (r = 0; r < height; r++) {
for (c = 0; c < width; c++) {
const uint16_t v = srcp[c];
ss += v * v;
s += v;
}
srcp += src_stride;
}
return (ss - s * s / (width * height));
} | movq %rdi, -0x8(%rsp)
movl %esi, -0xc(%rsp)
movl %edx, -0x10(%rsp)
movl %ecx, -0x14(%rsp)
movq -0x8(%rsp), %rax
shlq %rax
movq %rax, -0x20(%rsp)
movq $0x0, -0x30(%rsp)
movq $0x0, -0x38(%rsp)
movl $0x0, -0x24(%rsp)
movl -0x24(%rsp), %eax
cmpl -0x14(%rsp), %eax
jge 0x9e6c87
movl $0x0, -0x28(%rsp)
movl -0x28(%rsp), %eax
cmpl -0x10(%rsp), %eax
jge 0x9e6c60
movq -0x20(%rsp), %rax
movslq -0x28(%rsp), %rcx
movw (%rax,%rcx,2), %ax
movw %ax, -0x3a(%rsp)
movzwl -0x3a(%rsp), %eax
movzwl -0x3a(%rsp), %ecx
imull %ecx, %eax
cltq
addq -0x30(%rsp), %rax
movq %rax, -0x30(%rsp)
movzwl -0x3a(%rsp), %eax
addq -0x38(%rsp), %rax
movq %rax, -0x38(%rsp)
movl -0x28(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x28(%rsp)
jmp 0x9e6c0e
movl -0xc(%rsp), %ecx
movq -0x20(%rsp), %rax
movslq %ecx, %rcx
shlq %rcx
addq %rcx, %rax
movq %rax, -0x20(%rsp)
movl -0x24(%rsp), %eax
addl $0x1, %eax
movl %eax, -0x24(%rsp)
jmp 0x9e6bf8
movq -0x30(%rsp), %rax
movq %rax, -0x48(%rsp)
movq -0x38(%rsp), %rax
imulq -0x38(%rsp), %rax
movl -0x10(%rsp), %ecx
imull -0x14(%rsp), %ecx
movslq %ecx, %rcx
xorl %edx, %edx
divq %rcx
movq %rax, %rcx
movq -0x48(%rsp), %rax
subq %rcx, %rax
retq
nopl (%rax)
| /m-ab-s[P]aom/aom_dsp/sum_squares.c |
aom_highbd_h_predictor_8x4_sse2 | void aom_highbd_h_predictor_8x4_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
const __m128i left_u16 = _mm_load_si128((const __m128i *)left);
const __m128i row0 = _mm_shufflelo_epi16(left_u16, 0x0);
const __m128i row1 = _mm_shufflelo_epi16(left_u16, 0x55);
const __m128i row2 = _mm_shufflelo_epi16(left_u16, 0xaa);
const __m128i row3 = _mm_shufflelo_epi16(left_u16, 0xff);
(void)above;
(void)bd;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row0, row0));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row1, row1));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row2, row2));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row3, row3));
} | subq $0x108, %rsp # imm = 0x108
movq %rdi, -0x10(%rsp)
movq %rsi, -0x18(%rsp)
movq %rdx, -0x20(%rsp)
movq %rcx, -0x28(%rsp)
movl %r8d, -0x2c(%rsp)
movq -0x28(%rsp), %rax
movq %rax, -0x8(%rsp)
movq -0x8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, -0x40(%rsp)
movaps -0x40(%rsp), %xmm0
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
movaps %xmm0, -0x50(%rsp)
movaps -0x40(%rsp), %xmm0
pshuflw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1,4,5,6,7]
movaps %xmm0, -0x60(%rsp)
movaps -0x40(%rsp), %xmm0
pshuflw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2,4,5,6,7]
movaps %xmm0, -0x70(%rsp)
movaps -0x40(%rsp), %xmm0
pshuflw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3,4,5,6,7]
movaps %xmm0, -0x80(%rsp)
movq -0x10(%rsp), %rax
movaps -0x50(%rsp), %xmm0
movaps %xmm0, 0xf0(%rsp)
movaps %xmm0, 0xe0(%rsp)
movaps 0xf0(%rsp), %xmm0
movaps 0xe0(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0x78(%rsp)
movaps %xmm0, 0x60(%rsp)
movaps 0x60(%rsp), %xmm0
movq 0x78(%rsp), %rax
movaps %xmm0, (%rax)
movq -0x18(%rsp), %rcx
movq -0x10(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, -0x10(%rsp)
movq -0x10(%rsp), %rax
movaps -0x60(%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
movaps %xmm0, 0xc0(%rsp)
movaps 0xd0(%rsp), %xmm0
movaps 0xc0(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0x58(%rsp)
movaps %xmm0, 0x40(%rsp)
movaps 0x40(%rsp), %xmm0
movq 0x58(%rsp), %rax
movaps %xmm0, (%rax)
movq -0x18(%rsp), %rcx
movq -0x10(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, -0x10(%rsp)
movq -0x10(%rsp), %rax
movaps -0x70(%rsp), %xmm0
movaps %xmm0, 0xb0(%rsp)
movaps %xmm0, 0xa0(%rsp)
movaps 0xb0(%rsp), %xmm0
movaps 0xa0(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0x38(%rsp)
movaps %xmm0, 0x20(%rsp)
movaps 0x20(%rsp), %xmm0
movq 0x38(%rsp), %rax
movaps %xmm0, (%rax)
movq -0x18(%rsp), %rcx
movq -0x10(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, -0x10(%rsp)
movq -0x10(%rsp), %rax
movaps -0x80(%rsp), %xmm0
movaps %xmm0, 0x90(%rsp)
movaps %xmm0, 0x80(%rsp)
movaps 0x90(%rsp), %xmm0
movaps 0x80(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0x18(%rsp)
movdqa %xmm0, (%rsp)
movdqa (%rsp), %xmm0
movq 0x18(%rsp), %rax
movdqa %xmm0, (%rax)
addq $0x108, %rsp # imm = 0x108
retq
nopl (%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
aom_highbd_h_predictor_8x8_sse2 | void aom_highbd_h_predictor_8x8_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
const __m128i left_u16 = _mm_load_si128((const __m128i *)left);
const __m128i row0 = _mm_shufflelo_epi16(left_u16, 0x0);
const __m128i row1 = _mm_shufflelo_epi16(left_u16, 0x55);
const __m128i row2 = _mm_shufflelo_epi16(left_u16, 0xaa);
const __m128i row3 = _mm_shufflelo_epi16(left_u16, 0xff);
const __m128i row4 = _mm_shufflehi_epi16(left_u16, 0x0);
const __m128i row5 = _mm_shufflehi_epi16(left_u16, 0x55);
const __m128i row6 = _mm_shufflehi_epi16(left_u16, 0xaa);
const __m128i row7 = _mm_shufflehi_epi16(left_u16, 0xff);
(void)above;
(void)bd;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row0, row0));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row1, row1));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row2, row2));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpacklo_epi64(row3, row3));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpackhi_epi64(row4, row4));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpackhi_epi64(row5, row5));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpackhi_epi64(row6, row6));
dst += stride;
_mm_store_si128((__m128i *)dst, _mm_unpackhi_epi64(row7, row7));
} | subq $0x248, %rsp # imm = 0x248
movq %rdi, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movq %rdx, 0x20(%rsp)
movq %rcx, 0x18(%rsp)
movl %r8d, 0x14(%rsp)
movq 0x18(%rsp), %rax
movq %rax, 0x38(%rsp)
movq 0x38(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, (%rsp)
movaps (%rsp), %xmm0
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
movaps %xmm0, -0x10(%rsp)
movaps (%rsp), %xmm0
pshuflw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1,4,5,6,7]
movaps %xmm0, -0x20(%rsp)
movaps (%rsp), %xmm0
pshuflw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2,4,5,6,7]
movaps %xmm0, -0x30(%rsp)
movaps (%rsp), %xmm0
pshuflw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3,4,5,6,7]
movaps %xmm0, -0x40(%rsp)
movaps (%rsp), %xmm0
pshufhw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,4,4,4,4]
movaps %xmm0, -0x50(%rsp)
movaps (%rsp), %xmm0
pshufhw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,5,5,5,5]
movaps %xmm0, -0x60(%rsp)
movaps (%rsp), %xmm0
pshufhw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,6,6,6,6]
movaps %xmm0, -0x70(%rsp)
movaps (%rsp), %xmm0
pshufhw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,7,7,7,7]
movaps %xmm0, -0x80(%rsp)
movq 0x30(%rsp), %rax
movaps -0x10(%rsp), %xmm0
movaps %xmm0, 0x1b0(%rsp)
movaps %xmm0, 0x1a0(%rsp)
movaps 0x1b0(%rsp), %xmm0
movaps 0x1a0(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0x138(%rsp)
movaps %xmm0, 0x120(%rsp)
movaps 0x120(%rsp), %xmm0
movq 0x138(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x20(%rsp), %xmm0
movaps %xmm0, 0x190(%rsp)
movaps %xmm0, 0x180(%rsp)
movaps 0x190(%rsp), %xmm0
movaps 0x180(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0x118(%rsp)
movaps %xmm0, 0x100(%rsp)
movaps 0x100(%rsp), %xmm0
movq 0x118(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x30(%rsp), %xmm0
movaps %xmm0, 0x170(%rsp)
movaps %xmm0, 0x160(%rsp)
movaps 0x170(%rsp), %xmm0
movaps 0x160(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0xf8(%rsp)
movaps %xmm0, 0xe0(%rsp)
movaps 0xe0(%rsp), %xmm0
movq 0xf8(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x40(%rsp), %xmm0
movaps %xmm0, 0x150(%rsp)
movaps %xmm0, 0x140(%rsp)
movaps 0x150(%rsp), %xmm0
movaps 0x140(%rsp), %xmm1
punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
movq %rax, 0xd8(%rsp)
movaps %xmm0, 0xc0(%rsp)
movaps 0xc0(%rsp), %xmm0
movq 0xd8(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x50(%rsp), %xmm0
movaps %xmm0, 0x230(%rsp)
movaps %xmm0, 0x220(%rsp)
movaps 0x230(%rsp), %xmm0
movaps 0x220(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movq %rax, 0xb8(%rsp)
movaps %xmm0, 0xa0(%rsp)
movaps 0xa0(%rsp), %xmm0
movq 0xb8(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x60(%rsp), %xmm0
movaps %xmm0, 0x210(%rsp)
movaps %xmm0, 0x200(%rsp)
movaps 0x210(%rsp), %xmm0
movaps 0x200(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movq %rax, 0x98(%rsp)
movaps %xmm0, 0x80(%rsp)
movaps 0x80(%rsp), %xmm0
movq 0x98(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x70(%rsp), %xmm0
movaps %xmm0, 0x1f0(%rsp)
movaps %xmm0, 0x1e0(%rsp)
movaps 0x1f0(%rsp), %xmm0
movaps 0x1e0(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movq %rax, 0x78(%rsp)
movaps %xmm0, 0x60(%rsp)
movaps 0x60(%rsp), %xmm0
movq 0x78(%rsp), %rax
movaps %xmm0, (%rax)
movq 0x28(%rsp), %rcx
movq 0x30(%rsp), %rax
leaq (%rax,%rcx,2), %rax
movq %rax, 0x30(%rsp)
movq 0x30(%rsp), %rax
movaps -0x80(%rsp), %xmm0
movaps %xmm0, 0x1d0(%rsp)
movaps %xmm0, 0x1c0(%rsp)
movaps 0x1d0(%rsp), %xmm0
movaps 0x1c0(%rsp), %xmm1
punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
movq %rax, 0x58(%rsp)
movdqa %xmm0, 0x40(%rsp)
movdqa 0x40(%rsp), %xmm0
movq 0x58(%rsp), %rax
movdqa %xmm0, (%rax)
addq $0x248, %rsp # imm = 0x248
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
h_predictor_16x8 | static inline void h_predictor_16x8(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left) {
const __m128i left_u16 = _mm_load_si128((const __m128i *)left);
const __m128i row0 = _mm_shufflelo_epi16(left_u16, 0x0);
const __m128i row1 = _mm_shufflelo_epi16(left_u16, 0x55);
const __m128i row2 = _mm_shufflelo_epi16(left_u16, 0xaa);
const __m128i row3 = _mm_shufflelo_epi16(left_u16, 0xff);
const __m128i row4 = _mm_shufflehi_epi16(left_u16, 0x0);
const __m128i row5 = _mm_shufflehi_epi16(left_u16, 0x55);
const __m128i row6 = _mm_shufflehi_epi16(left_u16, 0xaa);
const __m128i row7 = _mm_shufflehi_epi16(left_u16, 0xff);
h_store_16_unpacklo(&dst, stride, &row0);
h_store_16_unpacklo(&dst, stride, &row1);
h_store_16_unpacklo(&dst, stride, &row2);
h_store_16_unpacklo(&dst, stride, &row3);
h_store_16_unpackhi(&dst, stride, &row4);
h_store_16_unpackhi(&dst, stride, &row5);
h_store_16_unpackhi(&dst, stride, &row6);
h_store_16_unpackhi(&dst, stride, &row7);
} | subq $0xb8, %rsp
movq %rdi, 0xa8(%rsp)
movq %rsi, 0xa0(%rsp)
movq %rdx, 0x98(%rsp)
movq 0x98(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0xb0(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x80(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
movaps %xmm0, 0x70(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1,4,5,6,7]
movaps %xmm0, 0x60(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2,4,5,6,7]
movaps %xmm0, 0x50(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3,4,5,6,7]
movaps %xmm0, 0x40(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,4,4,4,4]
movaps %xmm0, 0x30(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,5,5,5,5]
movaps %xmm0, 0x20(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,6,6,6,6]
movaps %xmm0, 0x10(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,7,7,7,7]
movdqa %xmm0, (%rsp)
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x70(%rsp), %rdx
callq 0x9ece80
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x60(%rsp), %rdx
callq 0x9ece80
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x50(%rsp), %rdx
callq 0x9ece80
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x40(%rsp), %rdx
callq 0x9ece80
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x30(%rsp), %rdx
callq 0x9ecf30
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x20(%rsp), %rdx
callq 0x9ecf30
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x10(%rsp), %rdx
callq 0x9ecf30
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
movq %rsp, %rdx
callq 0x9ecf30
addq $0xb8, %rsp
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
aom_highbd_h_predictor_16x32_sse2 | void aom_highbd_h_predictor_16x32_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
int i;
(void)above;
(void)bd;
for (i = 0; i < 4; i++, left += 8) {
h_predictor_16x8(dst, stride, left);
dst += stride << 3;
}
} | subq $0x28, %rsp
movq %rdi, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
movq %rdx, 0x10(%rsp)
movq %rcx, 0x8(%rsp)
movl %r8d, 0x4(%rsp)
movl $0x0, (%rsp)
cmpl $0x4, (%rsp)
jge 0x9e8e7d
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %rsi
movq 0x8(%rsp), %rdx
callq 0x9e8be0
movq 0x18(%rsp), %rax
shlq $0x3, %rax
shlq %rax
addq 0x20(%rsp), %rax
movq %rax, 0x20(%rsp)
movl (%rsp), %eax
addl $0x1, %eax
movl %eax, (%rsp)
movq 0x8(%rsp), %rax
addq $0x10, %rax
movq %rax, 0x8(%rsp)
jmp 0x9e8e34
addq $0x28, %rsp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
aom_highbd_h_predictor_32x16_sse2 | void aom_highbd_h_predictor_32x16_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
int i;
(void)above;
(void)bd;
for (i = 0; i < 2; i++, left += 8) {
h_predictor_32x8(dst, stride, left);
dst += stride << 3;
}
} | subq $0x28, %rsp
movq %rdi, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
movq %rdx, 0x10(%rsp)
movq %rcx, 0x8(%rsp)
movl %r8d, 0x4(%rsp)
movl $0x0, (%rsp)
cmpl $0x2, (%rsp)
jge 0x9e8efd
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %rsi
movq 0x8(%rsp), %rdx
callq 0x9e8f10
movq 0x18(%rsp), %rax
shlq $0x3, %rax
shlq %rax
addq 0x20(%rsp), %rax
movq %rax, 0x20(%rsp)
movl (%rsp), %eax
addl $0x1, %eax
movl %eax, (%rsp)
movq 0x8(%rsp), %rax
addq $0x10, %rax
movq %rax, 0x8(%rsp)
jmp 0x9e8eb4
addq $0x28, %rsp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
h_predictor_32x8 | static inline void h_predictor_32x8(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left) {
const __m128i left_u16 = _mm_load_si128((const __m128i *)left);
const __m128i row0 = _mm_shufflelo_epi16(left_u16, 0x0);
const __m128i row1 = _mm_shufflelo_epi16(left_u16, 0x55);
const __m128i row2 = _mm_shufflelo_epi16(left_u16, 0xaa);
const __m128i row3 = _mm_shufflelo_epi16(left_u16, 0xff);
const __m128i row4 = _mm_shufflehi_epi16(left_u16, 0x0);
const __m128i row5 = _mm_shufflehi_epi16(left_u16, 0x55);
const __m128i row6 = _mm_shufflehi_epi16(left_u16, 0xaa);
const __m128i row7 = _mm_shufflehi_epi16(left_u16, 0xff);
h_store_32_unpacklo(&dst, stride, &row0);
h_store_32_unpacklo(&dst, stride, &row1);
h_store_32_unpacklo(&dst, stride, &row2);
h_store_32_unpacklo(&dst, stride, &row3);
h_store_32_unpackhi(&dst, stride, &row4);
h_store_32_unpackhi(&dst, stride, &row5);
h_store_32_unpackhi(&dst, stride, &row6);
h_store_32_unpackhi(&dst, stride, &row7);
} | subq $0xb8, %rsp
movq %rdi, 0xa8(%rsp)
movq %rsi, 0xa0(%rsp)
movq %rdx, 0x98(%rsp)
movq 0x98(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0xb0(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x80(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0,4,5,6,7]
movaps %xmm0, 0x70(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[1,1,1,1,4,5,6,7]
movaps %xmm0, 0x60(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2,4,5,6,7]
movaps %xmm0, 0x50(%rsp)
movaps 0x80(%rsp), %xmm0
pshuflw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[3,3,3,3,4,5,6,7]
movaps %xmm0, 0x40(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,4,4,4,4]
movaps %xmm0, 0x30(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0x55, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,5,5,5,5]
movaps %xmm0, 0x20(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,6,6,6,6]
movaps %xmm0, 0x10(%rsp)
movaps 0x80(%rsp), %xmm0
pshufhw $0xff, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2,3,7,7,7,7]
movdqa %xmm0, (%rsp)
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x70(%rsp), %rdx
callq 0x9ecfe0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x60(%rsp), %rdx
callq 0x9ecfe0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x50(%rsp), %rdx
callq 0x9ecfe0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x40(%rsp), %rdx
callq 0x9ecfe0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x30(%rsp), %rdx
callq 0x9ed0e0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x20(%rsp), %rdx
callq 0x9ed0e0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
leaq 0x10(%rsp), %rdx
callq 0x9ed0e0
movq 0xa0(%rsp), %rsi
leaq 0xa8(%rsp), %rdi
movq %rsp, %rdx
callq 0x9ed0e0
addq $0xb8, %rsp
retq
nopl (%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
aom_highbd_h_predictor_32x32_sse2 | void aom_highbd_h_predictor_32x32_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
int i;
(void)above;
(void)bd;
for (i = 0; i < 4; i++, left += 8) {
h_predictor_32x8(dst, stride, left);
dst += stride << 3;
}
} | subq $0x28, %rsp
movq %rdi, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
movq %rdx, 0x10(%rsp)
movq %rcx, 0x8(%rsp)
movl %r8d, 0x4(%rsp)
movl $0x0, (%rsp)
cmpl $0x4, (%rsp)
jge 0x9e912d
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %rsi
movq 0x8(%rsp), %rdx
callq 0x9e8f10
movq 0x18(%rsp), %rax
shlq $0x3, %rax
shlq %rax
addq 0x20(%rsp), %rax
movq %rax, 0x20(%rsp)
movl (%rsp), %eax
addl $0x1, %eax
movl %eax, (%rsp)
movq 0x8(%rsp), %rax
addq $0x10, %rax
movq %rax, 0x8(%rsp)
jmp 0x9e90e4
addq $0x28, %rsp
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
aom_highbd_dc_left_predictor_4x4_sse2 | void aom_highbd_dc_left_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
const __m128i two = _mm_cvtsi32_si128(2);
const __m128i sum = dc_sum_4(left);
const __m128i dc = _mm_srli_epi16(_mm_add_epi16(sum, two), 2);
(void)above;
(void)bd;
dc_store_4x4(dst, stride, &dc);
} | subq $0xb8, %rsp
movq %rdi, 0x58(%rsp)
movq %rsi, 0x50(%rsp)
movq %rdx, 0x48(%rsp)
movq %rcx, 0x40(%rsp)
movl %r8d, 0x3c(%rsp)
movl $0x2, 0x78(%rsp)
movd 0x78(%rsp), %xmm0
movdqa %xmm0, 0x60(%rsp)
movdqa 0x60(%rsp), %xmm0
movdqa %xmm0, 0x20(%rsp)
movq 0x40(%rsp), %rdi
callq 0x9e9210
movdqa %xmm0, 0x10(%rsp)
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm0
movdqa %xmm1, 0xa0(%rsp)
movdqa %xmm0, 0x90(%rsp)
movdqa 0xa0(%rsp), %xmm0
movdqa 0x90(%rsp), %xmm1
paddw %xmm1, %xmm0
movdqa %xmm0, 0x80(%rsp)
movl $0x2, 0x7c(%rsp)
movdqa 0x80(%rsp), %xmm0
movl 0x7c(%rsp), %eax
movd %eax, %xmm1
psrlw %xmm1, %xmm0
movdqa %xmm0, (%rsp)
movq 0x58(%rsp), %rdi
movq 0x50(%rsp), %rsi
movq %rsp, %rdx
callq 0x9e92a0
addq $0xb8, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/aom_dsp/x86/highbd_intrapred_sse2.c |
av1_idct4 | void av1_idct4(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
assert(output != input);
const int32_t size = 4;
const int32_t *cospi = cospi_arr(cos_bit);
int32_t stage = 0;
int32_t *bf0, *bf1;
int32_t step[4];
// stage 0;
// stage 1;
stage++;
bf1 = output;
bf1[0] = input[0];
bf1[1] = input[2];
bf1[2] = input[1];
bf1[3] = input[3];
av1_range_check_buf(stage, input, bf1, size, stage_range[stage]);
// stage 2
stage++;
bf0 = output;
bf1 = step;
bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit);
bf1[1] = half_btf(cospi[32], bf0[0], -cospi[32], bf0[1], cos_bit);
bf1[2] = half_btf(cospi[48], bf0[2], -cospi[16], bf0[3], cos_bit);
bf1[3] = half_btf(cospi[16], bf0[2], cospi[48], bf0[3], cos_bit);
av1_range_check_buf(stage, input, bf1, size, stage_range[stage]);
// stage 3
stage++;
bf0 = step;
bf1 = output;
bf1[0] = clamp_value(bf0[0] + bf0[3], stage_range[stage]);
bf1[1] = clamp_value(bf0[1] + bf0[2], stage_range[stage]);
bf1[2] = clamp_value(bf0[1] - bf0[2], stage_range[stage]);
bf1[3] = clamp_value(bf0[0] - bf0[3], stage_range[stage]);
} | subq $0x58, %rsp
movb %dl, %al
movq %rdi, 0x50(%rsp)
movq %rsi, 0x48(%rsp)
movb %al, 0x47(%rsp)
movq %rcx, 0x38(%rsp)
movl $0x4, 0x34(%rsp)
movsbl 0x47(%rsp), %edi
callq 0x9f4a90
movq %rax, 0x28(%rsp)
movl $0x0, 0x24(%rsp)
movl 0x24(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x24(%rsp)
movq 0x48(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x50(%rsp), %rax
movl (%rax), %ecx
movq 0x10(%rsp), %rax
movl %ecx, (%rax)
movq 0x50(%rsp), %rax
movl 0x8(%rax), %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x50(%rsp), %rax
movl 0x4(%rax), %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x50(%rsp), %rax
movl 0xc(%rax), %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0xc(%rax)
movl 0x24(%rsp), %edi
movq 0x50(%rsp), %rsi
movq 0x10(%rsp), %rdx
movq 0x38(%rsp), %rax
movslq 0x24(%rsp), %r8
movl $0x4, %ecx
movsbl (%rax,%r8), %r8d
callq 0x5be6a0
movl 0x24(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x24(%rsp)
movq 0x48(%rsp), %rax
movq %rax, 0x18(%rsp)
movq %rsp, %rax
movq %rax, 0x10(%rsp)
movq 0x28(%rsp), %rax
movl 0x80(%rax), %edi
movq 0x18(%rsp), %rax
movl (%rax), %esi
movq 0x28(%rsp), %rax
movl 0x80(%rax), %edx
movq 0x18(%rsp), %rax
movl 0x4(%rax), %ecx
movsbl 0x47(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, (%rax)
movq 0x28(%rsp), %rax
movl 0x80(%rax), %edi
movq 0x18(%rsp), %rax
movl (%rax), %esi
movq 0x28(%rsp), %rax
xorl %edx, %edx
subl 0x80(%rax), %edx
movq 0x18(%rsp), %rax
movl 0x4(%rax), %ecx
movsbl 0x47(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x28(%rsp), %rax
movl 0xc0(%rax), %edi
movq 0x18(%rsp), %rax
movl 0x8(%rax), %esi
movq 0x28(%rsp), %rax
xorl %edx, %edx
subl 0x40(%rax), %edx
movq 0x18(%rsp), %rax
movl 0xc(%rax), %ecx
movsbl 0x47(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x28(%rsp), %rax
movl 0x40(%rax), %edi
movq 0x18(%rsp), %rax
movl 0x8(%rax), %esi
movq 0x28(%rsp), %rax
movl 0xc0(%rax), %edx
movq 0x18(%rsp), %rax
movl 0xc(%rax), %ecx
movsbl 0x47(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0xc(%rax)
movl 0x24(%rsp), %edi
movq 0x50(%rsp), %rsi
movq 0x10(%rsp), %rdx
movq 0x38(%rsp), %rax
movslq 0x24(%rsp), %r8
movl $0x4, %ecx
movsbl (%rax,%r8), %r8d
callq 0x5be6a0
movl 0x24(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x24(%rsp)
movq %rsp, %rax
movq %rax, 0x18(%rsp)
movq 0x48(%rsp), %rax
movq %rax, 0x10(%rsp)
movq 0x18(%rsp), %rax
movl (%rax), %edi
movq 0x18(%rsp), %rax
addl 0xc(%rax), %edi
movq 0x38(%rsp), %rax
movslq 0x24(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, (%rax)
movq 0x18(%rsp), %rax
movl 0x4(%rax), %edi
movq 0x18(%rsp), %rax
addl 0x8(%rax), %edi
movq 0x38(%rsp), %rax
movslq 0x24(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x18(%rsp), %rax
movl 0x4(%rax), %edi
movq 0x18(%rsp), %rax
subl 0x8(%rax), %edi
movq 0x38(%rsp), %rax
movslq 0x24(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x18(%rsp), %rax
movl (%rax), %edi
movq 0x18(%rsp), %rax
subl 0xc(%rax), %edi
movq 0x38(%rsp), %rax
movslq 0x24(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x10(%rsp), %rax
movl %ecx, 0xc(%rax)
addq $0x58, %rsp
retq
| /m-ab-s[P]aom/av1/common/av1_inv_txfm1d.c |
half_btf | static inline int32_t half_btf(int32_t w0, int32_t in0, int32_t w1, int32_t in1,
int bit) {
int64_t result_64 = (int64_t)(w0 * in0) + (int64_t)(w1 * in1);
int64_t intermediate = result_64 + (1LL << (bit - 1));
// NOTE(rachelbarker): The value 'result_64' may not necessarily fit
// into 32 bits. However, the result of this function is nominally
// ROUND_POWER_OF_TWO_64(result_64, bit)
// and that is required to fit into stage_range[stage] many bits
// (checked by range_check_buf()).
//
// Here we've unpacked that rounding operation, and it can be shown
// that the value of 'intermediate' here *does* fit into 32 bits
// for any conformant bitstream.
// The upshot is that, if you do all this calculation using
// wrapping 32-bit arithmetic instead of (non-wrapping) 64-bit arithmetic,
// then you'll still get the correct result.
// To provide a check on this logic, we assert that 'intermediate'
// would fit into an int32 if range checking is enabled.
#if CONFIG_COEFFICIENT_RANGE_CHECKING
assert(intermediate >= INT32_MIN && intermediate <= INT32_MAX);
#endif
return (int32_t)(intermediate >> bit);
} | movl %edi, -0x4(%rsp)
movl %esi, -0x8(%rsp)
movl %edx, -0xc(%rsp)
movl %ecx, -0x10(%rsp)
movl %r8d, -0x14(%rsp)
movl -0x4(%rsp), %eax
imull -0x8(%rsp), %eax
cltq
movl -0xc(%rsp), %ecx
imull -0x10(%rsp), %ecx
movslq %ecx, %rcx
addq %rcx, %rax
movq %rax, -0x20(%rsp)
movq -0x20(%rsp), %rax
movl -0x14(%rsp), %ecx
subl $0x1, %ecx
movl %ecx, %ecx
movl $0x1, %edx
shlq %cl, %rdx
movq %rdx, %rcx
addq %rcx, %rax
movq %rax, -0x28(%rsp)
movq -0x28(%rsp), %rax
movl -0x14(%rsp), %ecx
sarq %cl, %rax
retq
nopw %cs:(%rax,%rax)
| /m-ab-s[P]aom/av1/common/av1_txfm.h |
av1_idct8 | void av1_idct8(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
assert(output != input);
const int32_t size = 8;
const int32_t *cospi = cospi_arr(cos_bit);
int32_t stage = 0;
int32_t *bf0, *bf1;
int32_t step[8];
// stage 0;
// stage 1;
stage++;
bf1 = output;
bf1[0] = input[0];
bf1[1] = input[4];
bf1[2] = input[2];
bf1[3] = input[6];
bf1[4] = input[1];
bf1[5] = input[5];
bf1[6] = input[3];
bf1[7] = input[7];
av1_range_check_buf(stage, input, bf1, size, stage_range[stage]);
// stage 2
stage++;
bf0 = output;
bf1 = step;
bf1[0] = bf0[0];
bf1[1] = bf0[1];
bf1[2] = bf0[2];
bf1[3] = bf0[3];
bf1[4] = half_btf(cospi[56], bf0[4], -cospi[8], bf0[7], cos_bit);
bf1[5] = half_btf(cospi[24], bf0[5], -cospi[40], bf0[6], cos_bit);
bf1[6] = half_btf(cospi[40], bf0[5], cospi[24], bf0[6], cos_bit);
bf1[7] = half_btf(cospi[8], bf0[4], cospi[56], bf0[7], cos_bit);
av1_range_check_buf(stage, input, bf1, size, stage_range[stage]);
// stage 3
stage++;
bf0 = step;
bf1 = output;
bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit);
bf1[1] = half_btf(cospi[32], bf0[0], -cospi[32], bf0[1], cos_bit);
bf1[2] = half_btf(cospi[48], bf0[2], -cospi[16], bf0[3], cos_bit);
bf1[3] = half_btf(cospi[16], bf0[2], cospi[48], bf0[3], cos_bit);
bf1[4] = clamp_value(bf0[4] + bf0[5], stage_range[stage]);
bf1[5] = clamp_value(bf0[4] - bf0[5], stage_range[stage]);
bf1[6] = clamp_value(-bf0[6] + bf0[7], stage_range[stage]);
bf1[7] = clamp_value(bf0[6] + bf0[7], stage_range[stage]);
av1_range_check_buf(stage, input, bf1, size, stage_range[stage]);
// stage 4
stage++;
bf0 = output;
bf1 = step;
bf1[0] = clamp_value(bf0[0] + bf0[3], stage_range[stage]);
bf1[1] = clamp_value(bf0[1] + bf0[2], stage_range[stage]);
bf1[2] = clamp_value(bf0[1] - bf0[2], stage_range[stage]);
bf1[3] = clamp_value(bf0[0] - bf0[3], stage_range[stage]);
bf1[4] = bf0[4];
bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit);
bf1[6] = half_btf(cospi[32], bf0[5], cospi[32], bf0[6], cos_bit);
bf1[7] = bf0[7];
av1_range_check_buf(stage, input, bf1, size, stage_range[stage]);
// stage 5
stage++;
bf0 = step;
bf1 = output;
bf1[0] = clamp_value(bf0[0] + bf0[7], stage_range[stage]);
bf1[1] = clamp_value(bf0[1] + bf0[6], stage_range[stage]);
bf1[2] = clamp_value(bf0[2] + bf0[5], stage_range[stage]);
bf1[3] = clamp_value(bf0[3] + bf0[4], stage_range[stage]);
bf1[4] = clamp_value(bf0[3] - bf0[4], stage_range[stage]);
bf1[5] = clamp_value(bf0[2] - bf0[5], stage_range[stage]);
bf1[6] = clamp_value(bf0[1] - bf0[6], stage_range[stage]);
bf1[7] = clamp_value(bf0[0] - bf0[7], stage_range[stage]);
} | subq $0x68, %rsp
movb %dl, %al
movq %rdi, 0x60(%rsp)
movq %rsi, 0x58(%rsp)
movb %al, 0x57(%rsp)
movq %rcx, 0x48(%rsp)
movl $0x8, 0x44(%rsp)
movsbl 0x57(%rsp), %edi
callq 0x9f4a90
movq %rax, 0x38(%rsp)
movl $0x0, 0x34(%rsp)
movl 0x34(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x34(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x20(%rsp)
movq 0x60(%rsp), %rax
movl (%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, (%rax)
movq 0x60(%rsp), %rax
movl 0x10(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x60(%rsp), %rax
movl 0x8(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x60(%rsp), %rax
movl 0x18(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0xc(%rax)
movq 0x60(%rsp), %rax
movl 0x4(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x10(%rax)
movq 0x60(%rsp), %rax
movl 0x14(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x14(%rax)
movq 0x60(%rsp), %rax
movl 0xc(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x18(%rax)
movq 0x60(%rsp), %rax
movl 0x1c(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x1c(%rax)
movl 0x34(%rsp), %edi
movq 0x60(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %r8
movl $0x8, %ecx
movsbl (%rax,%r8), %r8d
callq 0x5be6a0
movl 0x34(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x34(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x28(%rsp)
movq %rsp, %rax
movq %rax, 0x20(%rsp)
movq 0x28(%rsp), %rax
movl (%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, (%rax)
movq 0x28(%rsp), %rax
movl 0x4(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x28(%rsp), %rax
movl 0x8(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x28(%rsp), %rax
movl 0xc(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0xc(%rax)
movq 0x38(%rsp), %rax
movl 0xe0(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x10(%rax), %esi
movq 0x38(%rsp), %rax
xorl %edx, %edx
subl 0x20(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x1c(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x10(%rax)
movq 0x38(%rsp), %rax
movl 0x60(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x14(%rax), %esi
movq 0x38(%rsp), %rax
xorl %edx, %edx
subl 0xa0(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x18(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x14(%rax)
movq 0x38(%rsp), %rax
movl 0xa0(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x14(%rax), %esi
movq 0x38(%rsp), %rax
movl 0x60(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x18(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x18(%rax)
movq 0x38(%rsp), %rax
movl 0x20(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x10(%rax), %esi
movq 0x38(%rsp), %rax
movl 0xe0(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x1c(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x1c(%rax)
movl 0x34(%rsp), %edi
movq 0x60(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %r8
movl $0x8, %ecx
movsbl (%rax,%r8), %r8d
callq 0x5be6a0
movl 0x34(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x34(%rsp)
movq %rsp, %rax
movq %rax, 0x28(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x20(%rsp)
movq 0x38(%rsp), %rax
movl 0x80(%rax), %edi
movq 0x28(%rsp), %rax
movl (%rax), %esi
movq 0x38(%rsp), %rax
movl 0x80(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x4(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, (%rax)
movq 0x38(%rsp), %rax
movl 0x80(%rax), %edi
movq 0x28(%rsp), %rax
movl (%rax), %esi
movq 0x38(%rsp), %rax
xorl %edx, %edx
subl 0x80(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x4(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x38(%rsp), %rax
movl 0xc0(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x8(%rax), %esi
movq 0x38(%rsp), %rax
xorl %edx, %edx
subl 0x40(%rax), %edx
movq 0x28(%rsp), %rax
movl 0xc(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x38(%rsp), %rax
movl 0x40(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x8(%rax), %esi
movq 0x38(%rsp), %rax
movl 0xc0(%rax), %edx
movq 0x28(%rsp), %rax
movl 0xc(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0xc(%rax)
movq 0x28(%rsp), %rax
movl 0x10(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x14(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x10(%rax)
movq 0x28(%rsp), %rax
movl 0x10(%rax), %edi
movq 0x28(%rsp), %rax
subl 0x14(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x14(%rax)
movq 0x28(%rsp), %rax
xorl %edi, %edi
subl 0x18(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x1c(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x18(%rax)
movq 0x28(%rsp), %rax
movl 0x18(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x1c(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x1c(%rax)
movl 0x34(%rsp), %edi
movq 0x60(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %r8
movl $0x8, %ecx
movsbl (%rax,%r8), %r8d
callq 0x5be6a0
movl 0x34(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x34(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x28(%rsp)
movq %rsp, %rax
movq %rax, 0x20(%rsp)
movq 0x28(%rsp), %rax
movl (%rax), %edi
movq 0x28(%rsp), %rax
addl 0xc(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, (%rax)
movq 0x28(%rsp), %rax
movl 0x4(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x8(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x28(%rsp), %rax
movl 0x4(%rax), %edi
movq 0x28(%rsp), %rax
subl 0x8(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x28(%rsp), %rax
movl (%rax), %edi
movq 0x28(%rsp), %rax
subl 0xc(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0xc(%rax)
movq 0x28(%rsp), %rax
movl 0x10(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x10(%rax)
movq 0x38(%rsp), %rax
xorl %edi, %edi
subl 0x80(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x14(%rax), %esi
movq 0x38(%rsp), %rax
movl 0x80(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x18(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x14(%rax)
movq 0x38(%rsp), %rax
movl 0x80(%rax), %edi
movq 0x28(%rsp), %rax
movl 0x14(%rax), %esi
movq 0x38(%rsp), %rax
movl 0x80(%rax), %edx
movq 0x28(%rsp), %rax
movl 0x18(%rax), %ecx
movsbl 0x57(%rsp), %r8d
callq 0x9f4ab0
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x18(%rax)
movq 0x28(%rsp), %rax
movl 0x1c(%rax), %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x1c(%rax)
movl 0x34(%rsp), %edi
movq 0x60(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %r8
movl $0x8, %ecx
movsbl (%rax,%r8), %r8d
callq 0x5be6a0
movl 0x34(%rsp), %eax
addl $0x1, %eax
movl %eax, 0x34(%rsp)
movq %rsp, %rax
movq %rax, 0x28(%rsp)
movq 0x58(%rsp), %rax
movq %rax, 0x20(%rsp)
movq 0x28(%rsp), %rax
movl (%rax), %edi
movq 0x28(%rsp), %rax
addl 0x1c(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, (%rax)
movq 0x28(%rsp), %rax
movl 0x4(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x18(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x4(%rax)
movq 0x28(%rsp), %rax
movl 0x8(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x14(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x8(%rax)
movq 0x28(%rsp), %rax
movl 0xc(%rax), %edi
movq 0x28(%rsp), %rax
addl 0x10(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0xc(%rax)
movq 0x28(%rsp), %rax
movl 0xc(%rax), %edi
movq 0x28(%rsp), %rax
subl 0x10(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x10(%rax)
movq 0x28(%rsp), %rax
movl 0x8(%rax), %edi
movq 0x28(%rsp), %rax
subl 0x14(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x14(%rax)
movq 0x28(%rsp), %rax
movl 0x4(%rax), %edi
movq 0x28(%rsp), %rax
subl 0x18(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x18(%rax)
movq 0x28(%rsp), %rax
movl (%rax), %edi
movq 0x28(%rsp), %rax
subl 0x1c(%rax), %edi
movq 0x48(%rsp), %rax
movslq 0x34(%rsp), %rcx
movsbl (%rax,%rcx), %esi
callq 0x9f4b20
movl %eax, %ecx
movq 0x20(%rsp), %rax
movl %ecx, 0x1c(%rax)
addq $0x68, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/common/av1_inv_txfm1d.c |
av1_iadst4 | void av1_iadst4(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
int bit = cos_bit;
const int32_t *sinpi = sinpi_arr(bit);
int32_t s0, s1, s2, s3, s4, s5, s6, s7;
int32_t x0 = input[0];
int32_t x1 = input[1];
int32_t x2 = input[2];
int32_t x3 = input[3];
if (!(x0 | x1 | x2 | x3)) {
output[0] = output[1] = output[2] = output[3] = 0;
return;
}
assert(sinpi[1] + sinpi[2] == sinpi[4]);
// stage 1
s0 = range_check_value(sinpi[1] * x0, stage_range[1] + bit);
s1 = range_check_value(sinpi[2] * x0, stage_range[1] + bit);
s2 = range_check_value(sinpi[3] * x1, stage_range[1] + bit);
s3 = range_check_value(sinpi[4] * x2, stage_range[1] + bit);
s4 = range_check_value(sinpi[1] * x2, stage_range[1] + bit);
s5 = range_check_value(sinpi[2] * x3, stage_range[1] + bit);
s6 = range_check_value(sinpi[4] * x3, stage_range[1] + bit);
// stage 2
// NOTICE: (x0 - x2) here may use one extra bit compared to the
// opt_range_row/col specified in av1_gen_inv_stage_range()
s7 = range_check_value((x0 - x2) + x3, stage_range[2]);
// stage 3
s0 = range_check_value(s0 + s3, stage_range[3] + bit);
s1 = range_check_value(s1 - s4, stage_range[3] + bit);
s3 = range_check_value(s2, stage_range[3] + bit);
s2 = range_check_value(sinpi[3] * s7, stage_range[3] + bit);
// stage 4
s0 = range_check_value(s0 + s5, stage_range[4] + bit);
s1 = range_check_value(s1 - s6, stage_range[4] + bit);
// stage 5
x0 = range_check_value(s0 + s3, stage_range[5] + bit);
x1 = range_check_value(s1 + s3, stage_range[5] + bit);
x2 = range_check_value(s2, stage_range[5] + bit);
x3 = range_check_value(s0 + s1, stage_range[5] + bit);
// stage 6
x3 = range_check_value(x3 - s3, stage_range[6] + bit);
output[0] = round_shift(x0, bit);
output[1] = round_shift(x1, bit);
output[2] = round_shift(x2, bit);
output[3] = round_shift(x3, bit);
} | subq $0x68, %rsp
movb %dl, %al
movq %rdi, 0x60(%rsp)
movq %rsi, 0x58(%rsp)
movb %al, 0x57(%rsp)
movq %rcx, 0x48(%rsp)
movsbl 0x57(%rsp), %eax
movl %eax, 0x44(%rsp)
movl 0x44(%rsp), %edi
callq 0x9fa6e0
movq %rax, 0x38(%rsp)
movq 0x60(%rsp), %rax
movl (%rax), %eax
movl %eax, 0x14(%rsp)
movq 0x60(%rsp), %rax
movl 0x4(%rax), %eax
movl %eax, 0x10(%rsp)
movq 0x60(%rsp), %rax
movl 0x8(%rax), %eax
movl %eax, 0xc(%rsp)
movq 0x60(%rsp), %rax
movl 0xc(%rax), %eax
movl %eax, 0x8(%rsp)
movl 0x14(%rsp), %eax
orl 0x10(%rsp), %eax
orl 0xc(%rsp), %eax
orl 0x8(%rsp), %eax
cmpl $0x0, %eax
jne 0x9fa3e8
movq 0x58(%rsp), %rax
movl $0x0, 0xc(%rax)
movq 0x58(%rsp), %rax
movl $0x0, 0x8(%rax)
movq 0x58(%rsp), %rax
movl $0x0, 0x4(%rax)
movq 0x58(%rsp), %rax
movl $0x0, (%rax)
jmp 0x9fa6d7
movq 0x38(%rsp), %rax
movl 0x4(%rax), %edi
imull 0x14(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x34(%rsp)
movq 0x38(%rsp), %rax
movl 0x8(%rax), %edi
imull 0x14(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x30(%rsp)
movq 0x38(%rsp), %rax
movl 0xc(%rax), %edi
imull 0x10(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x2c(%rsp)
movq 0x38(%rsp), %rax
movl 0x10(%rax), %edi
imull 0xc(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x28(%rsp)
movq 0x38(%rsp), %rax
movl 0x4(%rax), %edi
imull 0xc(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x24(%rsp)
movq 0x38(%rsp), %rax
movl 0x8(%rax), %edi
imull 0x8(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x20(%rsp)
movq 0x38(%rsp), %rax
movl 0x10(%rax), %edi
imull 0x8(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x1(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x1c(%rsp)
movl 0x14(%rsp), %edi
subl 0xc(%rsp), %edi
addl 0x8(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x2(%rax), %esi
callq 0x9fa700
movl %eax, 0x18(%rsp)
movl 0x34(%rsp), %edi
addl 0x28(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x3(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x34(%rsp)
movl 0x30(%rsp), %edi
subl 0x24(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x3(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x30(%rsp)
movl 0x2c(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x3(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x28(%rsp)
movq 0x38(%rsp), %rax
movl 0xc(%rax), %edi
imull 0x18(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x3(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x2c(%rsp)
movl 0x34(%rsp), %edi
addl 0x20(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x4(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x34(%rsp)
movl 0x30(%rsp), %edi
subl 0x1c(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x4(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x30(%rsp)
movl 0x34(%rsp), %edi
addl 0x28(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x5(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x14(%rsp)
movl 0x30(%rsp), %edi
addl 0x28(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x5(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x10(%rsp)
movl 0x2c(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x5(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0xc(%rsp)
movl 0x34(%rsp), %edi
addl 0x30(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x5(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x8(%rsp)
movl 0x8(%rsp), %edi
subl 0x28(%rsp), %edi
movq 0x48(%rsp), %rax
movsbl 0x6(%rax), %eax
addl 0x44(%rsp), %eax
movsbl %al, %esi
callq 0x9fa700
movl %eax, 0x8(%rsp)
movslq 0x14(%rsp), %rdi
movl 0x44(%rsp), %esi
callq 0x9fa710
movl %eax, %ecx
movq 0x58(%rsp), %rax
movl %ecx, (%rax)
movslq 0x10(%rsp), %rdi
movl 0x44(%rsp), %esi
callq 0x9fa710
movl %eax, %ecx
movq 0x58(%rsp), %rax
movl %ecx, 0x4(%rax)
movslq 0xc(%rsp), %rdi
movl 0x44(%rsp), %esi
callq 0x9fa710
movl %eax, %ecx
movq 0x58(%rsp), %rax
movl %ecx, 0x8(%rax)
movslq 0x8(%rsp), %rdi
movl 0x44(%rsp), %esi
callq 0x9fa710
movl %eax, %ecx
movq 0x58(%rsp), %rax
movl %ecx, 0xc(%rax)
addq $0x68, %rsp
retq
nopl (%rax)
| /m-ab-s[P]aom/av1/common/av1_inv_txfm1d.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.