name
stringlengths
1
473k
code
stringlengths
7
647k
asm
stringlengths
4
3.39M
file
stringlengths
8
196
av1_iadst16
void av1_iadst16(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { assert(output != input); const int32_t size = 16; const int32_t *cospi = cospi_arr(cos_bit); int32_t stage = 0; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; stage++; bf1 = output; bf1[0] = input[15]; bf1[1] = input[0]; bf1[2] = input[13]; bf1[3] = input[2]; bf1[4] = input[11]; bf1[5] = input[4]; bf1[6] = input[9]; bf1[7] = input[6]; bf1[8] = input[7]; bf1[9] = input[8]; bf1[10] = input[5]; bf1[11] = input[10]; bf1[12] = input[3]; bf1[13] = input[12]; bf1[14] = input[1]; bf1[15] = input[14]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 2 stage++; bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit); bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit); bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit); bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit); bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit); bf1[5] = half_btf(cospi[46], bf0[4], -cospi[18], bf0[5], cos_bit); bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit); bf1[7] = half_btf(cospi[38], bf0[6], -cospi[26], bf0[7], cos_bit); bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit); bf1[9] = half_btf(cospi[30], bf0[8], -cospi[34], bf0[9], cos_bit); bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit); bf1[11] = half_btf(cospi[22], bf0[10], -cospi[42], bf0[11], cos_bit); bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit); bf1[13] = half_btf(cospi[14], bf0[12], -cospi[50], bf0[13], cos_bit); bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit); bf1[15] = half_btf(cospi[6], bf0[14], -cospi[58], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 3 stage++; bf0 = step; bf1 = output; bf1[0] = clamp_value(bf0[0] + bf0[8], stage_range[stage]); bf1[1] = clamp_value(bf0[1] + bf0[9], stage_range[stage]); bf1[2] = clamp_value(bf0[2] + bf0[10], stage_range[stage]); bf1[3] = clamp_value(bf0[3] + bf0[11], stage_range[stage]); bf1[4] = clamp_value(bf0[4] + bf0[12], stage_range[stage]); bf1[5] = clamp_value(bf0[5] + bf0[13], stage_range[stage]); bf1[6] = clamp_value(bf0[6] + bf0[14], stage_range[stage]); bf1[7] = clamp_value(bf0[7] + bf0[15], stage_range[stage]); bf1[8] = clamp_value(bf0[0] - bf0[8], stage_range[stage]); bf1[9] = clamp_value(bf0[1] - bf0[9], stage_range[stage]); bf1[10] = clamp_value(bf0[2] - bf0[10], stage_range[stage]); bf1[11] = clamp_value(bf0[3] - bf0[11], stage_range[stage]); bf1[12] = clamp_value(bf0[4] - bf0[12], stage_range[stage]); bf1[13] = clamp_value(bf0[5] - bf0[13], stage_range[stage]); bf1[14] = clamp_value(bf0[6] - bf0[14], stage_range[stage]); bf1[15] = clamp_value(bf0[7] - bf0[15], stage_range[stage]); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 4 stage++; bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit); bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit); bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit); bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit); bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit); bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit); bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 5 stage++; bf0 = step; bf1 = output; bf1[0] = clamp_value(bf0[0] + bf0[4], stage_range[stage]); bf1[1] = clamp_value(bf0[1] + bf0[5], stage_range[stage]); bf1[2] = clamp_value(bf0[2] + bf0[6], stage_range[stage]); bf1[3] = clamp_value(bf0[3] + bf0[7], stage_range[stage]); bf1[4] = clamp_value(bf0[0] - bf0[4], stage_range[stage]); bf1[5] = clamp_value(bf0[1] - bf0[5], stage_range[stage]); bf1[6] = clamp_value(bf0[2] - bf0[6], stage_range[stage]); bf1[7] = clamp_value(bf0[3] - bf0[7], stage_range[stage]); bf1[8] = clamp_value(bf0[8] + bf0[12], stage_range[stage]); bf1[9] = clamp_value(bf0[9] + bf0[13], stage_range[stage]); bf1[10] = clamp_value(bf0[10] + bf0[14], stage_range[stage]); bf1[11] = clamp_value(bf0[11] + bf0[15], stage_range[stage]); bf1[12] = clamp_value(bf0[8] - bf0[12], stage_range[stage]); bf1[13] = clamp_value(bf0[9] - bf0[13], stage_range[stage]); bf1[14] = clamp_value(bf0[10] - bf0[14], stage_range[stage]); bf1[15] = clamp_value(bf0[11] - bf0[15], stage_range[stage]); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 6 stage++; bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit); bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit); bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 7 stage++; bf0 = step; bf1 = output; bf1[0] = clamp_value(bf0[0] + bf0[2], stage_range[stage]); bf1[1] = clamp_value(bf0[1] + bf0[3], stage_range[stage]); bf1[2] = clamp_value(bf0[0] - bf0[2], stage_range[stage]); bf1[3] = clamp_value(bf0[1] - bf0[3], stage_range[stage]); bf1[4] = clamp_value(bf0[4] + bf0[6], stage_range[stage]); bf1[5] = clamp_value(bf0[5] + bf0[7], stage_range[stage]); bf1[6] = clamp_value(bf0[4] - bf0[6], stage_range[stage]); bf1[7] = clamp_value(bf0[5] - bf0[7], stage_range[stage]); bf1[8] = clamp_value(bf0[8] + bf0[10], stage_range[stage]); bf1[9] = clamp_value(bf0[9] + bf0[11], stage_range[stage]); bf1[10] = clamp_value(bf0[8] - bf0[10], stage_range[stage]); bf1[11] = clamp_value(bf0[9] - bf0[11], stage_range[stage]); bf1[12] = clamp_value(bf0[12] + bf0[14], stage_range[stage]); bf1[13] = clamp_value(bf0[13] + bf0[15], stage_range[stage]); bf1[14] = clamp_value(bf0[12] - bf0[14], stage_range[stage]); bf1[15] = clamp_value(bf0[13] - bf0[15], stage_range[stage]); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 8 stage++; bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit); bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit); bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit); bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = -bf0[8]; bf1[2] = bf0[12]; bf1[3] = -bf0[4]; bf1[4] = bf0[6]; bf1[5] = -bf0[14]; bf1[6] = bf0[10]; bf1[7] = -bf0[2]; bf1[8] = bf0[3]; bf1[9] = -bf0[11]; bf1[10] = bf0[15]; bf1[11] = -bf0[7]; bf1[12] = bf0[5]; bf1[13] = -bf0[13]; bf1[14] = bf0[9]; bf1[15] = -bf0[1]; }
subq $0x88, %rsp movb %dl, %al movq %rdi, 0x80(%rsp) movq %rsi, 0x78(%rsp) movb %al, 0x77(%rsp) movq %rcx, 0x68(%rsp) movl $0x10, 0x64(%rsp) movsbl 0x77(%rsp), %edi callq 0x9f4a90 movq %rax, 0x58(%rsp) movl $0x0, 0x54(%rsp) movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x80(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x80(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x80(%rsp), %rax movl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x80(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x80(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x80(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x80(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x80(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x80(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x80(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x80(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x80(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x80(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x80(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x80(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x80(%rsp), %rax movl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x58(%rsp), %rax movl 0x8(%rax), %edi movq 0x48(%rsp), %rax movl (%rax), %esi movq 0x58(%rsp), %rax movl 0xf8(%rax), %edx movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x58(%rsp), %rax movl 0xf8(%rax), %edi movq 0x48(%rsp), %rax movl (%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x8(%rax), %edx movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x58(%rsp), %rax movl 0x28(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax movl 0xd8(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x58(%rsp), %rax movl 0xd8(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x28(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x58(%rsp), %rax movl 0x48(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax movl 0xb8(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x58(%rsp), %rax movl 0xb8(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x48(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax movl 0x68(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x98(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0x98(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x68(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x58(%rsp), %rax movl 0x88(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax movl 0x78(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x58(%rsp), %rax movl 0x78(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x88(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0xa8(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x58(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x58(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xa8(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax movl 0xc8(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0x38(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0x38(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xc8(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax movl 0xe8(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x18(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0x18(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xe8(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %edi movq 0x48(%rsp), %rax addl 0x20(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %edi movq 0x48(%rsp), %rax addl 0x24(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %edi movq 0x48(%rsp), %rax addl 0x28(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %edi movq 0x48(%rsp), %rax addl 0x2c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %edi movq 0x48(%rsp), %rax addl 0x30(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %edi movq 0x48(%rsp), %rax addl 0x34(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %edi movq 0x48(%rsp), %rax addl 0x38(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %edi movq 0x48(%rsp), %rax addl 0x3c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl (%rax), %edi movq 0x48(%rsp), %rax subl 0x20(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %edi movq 0x48(%rsp), %rax subl 0x24(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %edi movq 0x48(%rsp), %rax subl 0x28(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %edi movq 0x48(%rsp), %rax subl 0x2c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %edi movq 0x48(%rsp), %rax subl 0x30(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %edi movq 0x48(%rsp), %rax subl 0x34(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %edi movq 0x48(%rsp), %rax subl 0x38(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %edi movq 0x48(%rsp), %rax subl 0x3c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x58(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax movl 0xe0(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x58(%rsp), %rax movl 0xe0(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x20(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0xa0(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x60(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x60(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xa0(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xe0(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0x20(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0xe0(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x60(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0xa0(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0xa0(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x60(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %edi movq 0x48(%rsp), %rax addl 0x10(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %edi movq 0x48(%rsp), %rax addl 0x14(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %edi movq 0x48(%rsp), %rax addl 0x18(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %edi movq 0x48(%rsp), %rax addl 0x1c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl (%rax), %edi movq 0x48(%rsp), %rax subl 0x10(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %edi movq 0x48(%rsp), %rax subl 0x14(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %edi movq 0x48(%rsp), %rax subl 0x18(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %edi movq 0x48(%rsp), %rax subl 0x1c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax addl 0x30(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %edi movq 0x48(%rsp), %rax addl 0x34(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %edi movq 0x48(%rsp), %rax addl 0x38(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %edi movq 0x48(%rsp), %rax addl 0x3c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax subl 0x30(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %edi movq 0x48(%rsp), %rax subl 0x34(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %edi movq 0x48(%rsp), %rax subl 0x38(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %edi movq 0x48(%rsp), %rax subl 0x3c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %edi movq 0x48(%rsp), %rax addl 0x8(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %edi movq 0x48(%rsp), %rax addl 0xc(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl (%rax), %edi movq 0x48(%rsp), %rax subl 0x8(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %edi movq 0x48(%rsp), %rax subl 0xc(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %edi movq 0x48(%rsp), %rax addl 0x18(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %edi movq 0x48(%rsp), %rax addl 0x1c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %edi movq 0x48(%rsp), %rax subl 0x18(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %edi movq 0x48(%rsp), %rax subl 0x1c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax addl 0x28(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %edi movq 0x48(%rsp), %rax addl 0x2c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax subl 0x28(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %edi movq 0x48(%rsp), %rax subl 0x2c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %edi movq 0x48(%rsp), %rax addl 0x38(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %edi movq 0x48(%rsp), %rax addl 0x3c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %edi movq 0x48(%rsp), %rax subl 0x38(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %edi movq 0x48(%rsp), %rax subl 0x3c(%rax), %edi movq 0x68(%rsp), %rax movslq 0x54(%rsp), %rcx movsbl (%rax,%rcx), %esi callq 0x9f4b20 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0x9f4ab0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) addq $0x88, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/av1_inv_txfm1d.c
av1_fdct4
void av1_fdct4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { const int32_t size = 4; const int32_t *cospi; int32_t stage = 0; int32_t *bf0, *bf1; int32_t step[4]; // stage 0; av1_range_check_buf(stage, input, input, size, stage_range[stage]); // stage 1; stage++; bf1 = output; bf1[0] = input[0] + input[3]; bf1[1] = input[1] + input[2]; bf1[2] = -input[2] + input[1]; bf1[3] = -input[3] + input[0]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 2 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 3 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[2]; bf1[2] = bf0[1]; bf1[3] = bf0[3]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); }
subq $0x58, %rsp movb %dl, %al movq %rdi, 0x50(%rsp) movq %rsi, 0x48(%rsp) movb %al, 0x47(%rsp) movq %rcx, 0x38(%rsp) movl $0x4, 0x34(%rsp) movl $0x0, 0x24(%rsp) movl 0x24(%rsp), %edi movq 0x50(%rsp), %rsi movq 0x50(%rsp), %rdx movq 0x38(%rsp), %rax movslq 0x24(%rsp), %r8 movl $0x4, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) movq 0x48(%rsp), %rax movq %rax, 0x10(%rsp) movq 0x50(%rsp), %rax movl (%rax), %ecx movq 0x50(%rsp), %rax addl 0xc(%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, (%rax) movq 0x50(%rsp), %rax movl 0x4(%rax), %ecx movq 0x50(%rsp), %rax addl 0x8(%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x50(%rsp), %rax xorl %ecx, %ecx subl 0x8(%rax), %ecx movq 0x50(%rsp), %rax addl 0x4(%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x50(%rsp), %rax xorl %ecx, %ecx subl 0xc(%rax), %ecx movq 0x50(%rsp), %rax addl (%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, 0xc(%rax) movl 0x24(%rsp), %edi movq 0x50(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x38(%rsp), %rax movslq 0x24(%rsp), %r8 movl $0x4, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) movsbl 0x47(%rsp), %edi callq 0xa06b80 movq %rax, 0x28(%rsp) movq 0x48(%rsp), %rax movq %rax, 0x18(%rsp) movq %rsp, %rax movq %rax, 0x10(%rsp) movq 0x28(%rsp), %rax movl 0x80(%rax), %edi movq 0x18(%rsp), %rax movl (%rax), %esi movq 0x28(%rsp), %rax movl 0x80(%rax), %edx movq 0x18(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x47(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x10(%rsp), %rax movl %ecx, (%rax) movq 0x28(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x18(%rsp), %rax movl 0x4(%rax), %esi movq 0x28(%rsp), %rax movl 0x80(%rax), %edx movq 0x18(%rsp), %rax movl (%rax), %ecx movsbl 0x47(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x10(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x28(%rsp), %rax movl 0xc0(%rax), %edi movq 0x18(%rsp), %rax movl 0x8(%rax), %esi movq 0x28(%rsp), %rax movl 0x40(%rax), %edx movq 0x18(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x47(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x10(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x28(%rsp), %rax movl 0xc0(%rax), %edi movq 0x18(%rsp), %rax movl 0xc(%rax), %esi movq 0x28(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x18(%rsp), %rax movl 0x8(%rax), %ecx movsbl 0x47(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x10(%rsp), %rax movl %ecx, 0xc(%rax) movl 0x24(%rsp), %edi movq 0x50(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x38(%rsp), %rax movslq 0x24(%rsp), %r8 movl $0x4, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x24(%rsp), %eax addl $0x1, %eax movl %eax, 0x24(%rsp) movq %rsp, %rax movq %rax, 0x18(%rsp) movq 0x48(%rsp), %rax movq %rax, 0x10(%rsp) movq 0x18(%rsp), %rax movl (%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, (%rax) movq 0x18(%rsp), %rax movl 0x8(%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x18(%rsp), %rax movl 0x4(%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x18(%rsp), %rax movl 0xc(%rax), %ecx movq 0x10(%rsp), %rax movl %ecx, 0xc(%rax) movl 0x24(%rsp), %edi movq 0x50(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x38(%rsp), %rax movslq 0x24(%rsp), %r8 movl $0x4, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 addq $0x58, %rsp retq nop
/m-ab-s[P]aom/av1/encoder/av1_fwd_txfm1d.c
av1_fdct8
void av1_fdct8(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { const int32_t size = 8; const int32_t *cospi; int32_t stage = 0; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; av1_range_check_buf(stage, input, input, size, stage_range[stage]); // stage 1; stage++; bf1 = output; bf1[0] = input[0] + input[7]; bf1[1] = input[1] + input[6]; bf1[2] = input[2] + input[5]; bf1[3] = input[3] + input[4]; bf1[4] = -input[4] + input[3]; bf1[5] = -input[5] + input[2]; bf1[6] = -input[6] + input[1]; bf1[7] = -input[7] + input[0]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 2 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 3 stage++; cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 4 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 5 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[4]; bf1[2] = bf0[2]; bf1[3] = bf0[6]; bf1[4] = bf0[1]; bf1[5] = bf0[5]; bf1[6] = bf0[3]; bf1[7] = bf0[7]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); }
subq $0x68, %rsp movb %dl, %al movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movb %al, 0x57(%rsp) movq %rcx, 0x48(%rsp) movl $0x8, 0x44(%rsp) movl $0x0, 0x34(%rsp) movl 0x34(%rsp), %edi movq 0x60(%rsp), %rsi movq 0x60(%rsp), %rdx movq 0x48(%rsp), %rax movslq 0x34(%rsp), %r8 movl $0x8, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movq 0x58(%rsp), %rax movq %rax, 0x20(%rsp) movq 0x60(%rsp), %rax movl (%rax), %ecx movq 0x60(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, (%rax) movq 0x60(%rsp), %rax movl 0x4(%rax), %ecx movq 0x60(%rsp), %rax addl 0x18(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x60(%rsp), %rax movl 0x8(%rax), %ecx movq 0x60(%rsp), %rax addl 0x14(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x60(%rsp), %rax movl 0xc(%rax), %ecx movq 0x60(%rsp), %rax addl 0x10(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x60(%rsp), %rax xorl %ecx, %ecx subl 0x10(%rax), %ecx movq 0x60(%rsp), %rax addl 0xc(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x60(%rsp), %rax xorl %ecx, %ecx subl 0x14(%rax), %ecx movq 0x60(%rsp), %rax addl 0x8(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x60(%rsp), %rax xorl %ecx, %ecx subl 0x18(%rax), %ecx movq 0x60(%rsp), %rax addl 0x4(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x60(%rsp), %rax xorl %ecx, %ecx subl 0x1c(%rax), %ecx movq 0x60(%rsp), %rax addl (%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x1c(%rax) movl 0x34(%rsp), %edi movq 0x60(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x48(%rsp), %rax movslq 0x34(%rsp), %r8 movl $0x8, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movsbl 0x57(%rsp), %edi callq 0xa06b80 movq %rax, 0x38(%rsp) movq 0x58(%rsp), %rax movq %rax, 0x28(%rsp) movq %rsp, %rax movq %rax, 0x20(%rsp) movq 0x28(%rsp), %rax movl (%rax), %ecx movq 0x28(%rsp), %rax addl 0xc(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, (%rax) movq 0x28(%rsp), %rax movl 0x4(%rax), %ecx movq 0x28(%rsp), %rax addl 0x8(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x28(%rsp), %rax xorl %ecx, %ecx subl 0x8(%rax), %ecx movq 0x28(%rsp), %rax addl 0x4(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x28(%rsp), %rax xorl %ecx, %ecx subl 0xc(%rax), %ecx movq 0x28(%rsp), %rax addl (%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x28(%rsp), %rax movl 0x10(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x38(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x28(%rsp), %rax movl 0x14(%rax), %esi movq 0x38(%rsp), %rax movl 0x80(%rax), %edx movq 0x28(%rsp), %rax movl 0x18(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x38(%rsp), %rax movl 0x80(%rax), %edi movq 0x28(%rsp), %rax movl 0x18(%rax), %esi movq 0x38(%rsp), %rax movl 0x80(%rax), %edx movq 0x28(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x28(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x1c(%rax) movl 0x34(%rsp), %edi movq 0x60(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x48(%rsp), %rax movslq 0x34(%rsp), %r8 movl $0x8, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movsbl 0x57(%rsp), %edi callq 0xa06b80 movq %rax, 0x38(%rsp) movq %rsp, %rax movq %rax, 0x28(%rsp) movq 0x58(%rsp), %rax movq %rax, 0x20(%rsp) movq 0x38(%rsp), %rax movl 0x80(%rax), %edi movq 0x28(%rsp), %rax movl (%rax), %esi movq 0x38(%rsp), %rax movl 0x80(%rax), %edx movq 0x28(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, (%rax) movq 0x38(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x28(%rsp), %rax movl 0x4(%rax), %esi movq 0x38(%rsp), %rax movl 0x80(%rax), %edx movq 0x28(%rsp), %rax movl (%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x38(%rsp), %rax movl 0xc0(%rax), %edi movq 0x28(%rsp), %rax movl 0x8(%rax), %esi movq 0x38(%rsp), %rax movl 0x40(%rax), %edx movq 0x28(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x38(%rsp), %rax movl 0xc0(%rax), %edi movq 0x28(%rsp), %rax movl 0xc(%rax), %esi movq 0x38(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x28(%rsp), %rax movl 0x8(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x28(%rsp), %rax movl 0x10(%rax), %ecx movq 0x28(%rsp), %rax addl 0x14(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x28(%rsp), %rax xorl %ecx, %ecx subl 0x14(%rax), %ecx movq 0x28(%rsp), %rax addl 0x10(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x28(%rsp), %rax xorl %ecx, %ecx subl 0x18(%rax), %ecx movq 0x28(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x28(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x28(%rsp), %rax addl 0x18(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x1c(%rax) movl 0x34(%rsp), %edi movq 0x60(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x48(%rsp), %rax movslq 0x34(%rsp), %r8 movl $0x8, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movsbl 0x57(%rsp), %edi callq 0xa06b80 movq %rax, 0x38(%rsp) movq 0x58(%rsp), %rax movq %rax, 0x28(%rsp) movq %rsp, %rax movq %rax, 0x20(%rsp) movq 0x28(%rsp), %rax movl (%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, (%rax) movq 0x28(%rsp), %rax movl 0x4(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x28(%rsp), %rax movl 0x8(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x28(%rsp), %rax movl 0xc(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x38(%rsp), %rax movl 0xe0(%rax), %edi movq 0x28(%rsp), %rax movl 0x10(%rax), %esi movq 0x38(%rsp), %rax movl 0x20(%rax), %edx movq 0x28(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x38(%rsp), %rax movl 0x60(%rax), %edi movq 0x28(%rsp), %rax movl 0x14(%rax), %esi movq 0x38(%rsp), %rax movl 0xa0(%rax), %edx movq 0x28(%rsp), %rax movl 0x18(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x38(%rsp), %rax movl 0x60(%rax), %edi movq 0x28(%rsp), %rax movl 0x18(%rax), %esi movq 0x38(%rsp), %rax xorl %edx, %edx subl 0xa0(%rax), %edx movq 0x28(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x38(%rsp), %rax movl 0xe0(%rax), %edi movq 0x28(%rsp), %rax movl 0x1c(%rax), %esi movq 0x38(%rsp), %rax xorl %edx, %edx subl 0x20(%rax), %edx movq 0x28(%rsp), %rax movl 0x10(%rax), %ecx movsbl 0x57(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x20(%rsp), %rax movl %ecx, 0x1c(%rax) movl 0x34(%rsp), %edi movq 0x60(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x48(%rsp), %rax movslq 0x34(%rsp), %r8 movl $0x8, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x34(%rsp), %eax addl $0x1, %eax movl %eax, 0x34(%rsp) movq %rsp, %rax movq %rax, 0x28(%rsp) movq 0x58(%rsp), %rax movq %rax, 0x20(%rsp) movq 0x28(%rsp), %rax movl (%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, (%rax) movq 0x28(%rsp), %rax movl 0x10(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x28(%rsp), %rax movl 0x8(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x28(%rsp), %rax movl 0x18(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x28(%rsp), %rax movl 0x4(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x28(%rsp), %rax movl 0x14(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x28(%rsp), %rax movl 0xc(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x28(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x20(%rsp), %rax movl %ecx, 0x1c(%rax) movl 0x34(%rsp), %edi movq 0x60(%rsp), %rsi movq 0x20(%rsp), %rdx movq 0x48(%rsp), %rax movslq 0x34(%rsp), %r8 movl $0x8, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 addq $0x68, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/av1_fwd_txfm1d.c
av1_fdct16
void av1_fdct16(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { const int32_t size = 16; const int32_t *cospi; int32_t stage = 0; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; av1_range_check_buf(stage, input, input, size, stage_range[stage]); // stage 1; stage++; bf1 = output; bf1[0] = input[0] + input[15]; bf1[1] = input[1] + input[14]; bf1[2] = input[2] + input[13]; bf1[3] = input[3] + input[12]; bf1[4] = input[4] + input[11]; bf1[5] = input[5] + input[10]; bf1[6] = input[6] + input[9]; bf1[7] = input[7] + input[8]; bf1[8] = -input[8] + input[7]; bf1[9] = -input[9] + input[6]; bf1[10] = -input[10] + input[5]; bf1[11] = -input[11] + input[4]; bf1[12] = -input[12] + input[3]; bf1[13] = -input[13] + input[2]; bf1[14] = -input[14] + input[1]; bf1[15] = -input[15] + input[0]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 2 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 3 stage++; cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 4 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 5 stage++; cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 6 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 7 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[8]; bf1[2] = bf0[4]; bf1[3] = bf0[12]; bf1[4] = bf0[2]; bf1[5] = bf0[10]; bf1[6] = bf0[6]; bf1[7] = bf0[14]; bf1[8] = bf0[1]; bf1[9] = bf0[9]; bf1[10] = bf0[5]; bf1[11] = bf0[13]; bf1[12] = bf0[3]; bf1[13] = bf0[11]; bf1[14] = bf0[7]; bf1[15] = bf0[15]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); }
subq $0x88, %rsp movb %dl, %al movq %rdi, 0x80(%rsp) movq %rsi, 0x78(%rsp) movb %al, 0x77(%rsp) movq %rcx, 0x68(%rsp) movl $0x10, 0x64(%rsp) movl $0x0, 0x54(%rsp) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x80(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x80(%rsp), %rax movl (%rax), %ecx movq 0x80(%rsp), %rax addl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x80(%rsp), %rax movl 0x4(%rax), %ecx movq 0x80(%rsp), %rax addl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x80(%rsp), %rax movl 0x8(%rax), %ecx movq 0x80(%rsp), %rax addl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x80(%rsp), %rax movl 0xc(%rax), %ecx movq 0x80(%rsp), %rax addl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x80(%rsp), %rax movl 0x10(%rax), %ecx movq 0x80(%rsp), %rax addl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x80(%rsp), %rax movl 0x14(%rax), %ecx movq 0x80(%rsp), %rax addl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x80(%rsp), %rax movl 0x18(%rax), %ecx movq 0x80(%rsp), %rax addl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x80(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x80(%rsp), %rax addl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x20(%rax), %ecx movq 0x80(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x24(%rax), %ecx movq 0x80(%rsp), %rax addl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x28(%rax), %ecx movq 0x80(%rsp), %rax addl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x2c(%rax), %ecx movq 0x80(%rsp), %rax addl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x30(%rax), %ecx movq 0x80(%rsp), %rax addl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x34(%rax), %ecx movq 0x80(%rsp), %rax addl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x38(%rax), %ecx movq 0x80(%rsp), %rax addl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x3c(%rax), %ecx movq 0x80(%rsp), %rax addl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax addl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x48(%rsp), %rax addl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x48(%rsp), %rax addl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x10(%rax), %ecx movq 0x48(%rsp), %rax addl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x14(%rax), %ecx movq 0x48(%rsp), %rax addl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x18(%rax), %ecx movq 0x48(%rsp), %rax addl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x1c(%rax), %ecx movq 0x48(%rsp), %rax addl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x2c(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x34(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax addl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax addl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x8(%rax), %ecx movq 0x48(%rsp), %rax addl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0xc(%rax), %ecx movq 0x48(%rsp), %rax addl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x14(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x48(%rsp), %rax addl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x48(%rsp), %rax addl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x28(%rax), %ecx movq 0x48(%rsp), %rax addl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x2c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x30(%rax), %ecx movq 0x48(%rsp), %rax addl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x34(%rax), %ecx movq 0x48(%rsp), %rax addl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x38(%rax), %ecx movq 0x48(%rsp), %rax addl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl (%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x4(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl (%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax movl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0xc(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x48(%rsp), %rax addl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x14(%rax), %ecx movq 0x48(%rsp), %rax addl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x18(%rax), %ecx movq 0x48(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x24(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x38(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x34(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x58(%rsp), %rax movl 0xe0(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax movl 0x20(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x58(%rsp), %rax movl 0x60(%rax), %edi movq 0x48(%rsp), %rax movl 0x14(%rax), %esi movq 0x58(%rsp), %rax movl 0xa0(%rax), %edx movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax movl 0x60(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xa0(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0xe0(%rax), %edi movq 0x48(%rsp), %rax movl 0x1c(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x20(%rax), %edx movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x48(%rsp), %rax addl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x24(%rax), %ecx movq 0x48(%rsp), %rax addl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x28(%rax), %ecx movq 0x48(%rsp), %rax addl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x48(%rsp), %rax addl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x34(%rax), %ecx movq 0x48(%rsp), %rax addl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax xorl %ecx, %ecx subl 0x38(%rax), %ecx movq 0x48(%rsp), %rax addl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x58(%rsp), %rax movl 0xf0(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax movl 0x10(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x58(%rsp), %rax movl 0x70(%rax), %edi movq 0x48(%rsp), %rax movl 0x24(%rax), %esi movq 0x58(%rsp), %rax movl 0x90(%rax), %edx movq 0x48(%rsp), %rax movl 0x38(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0xb0(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x50(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x30(%rax), %edi movq 0x48(%rsp), %rax movl 0x2c(%rax), %esi movq 0x58(%rsp), %rax movl 0xd0(%rax), %edx movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax movl 0x30(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xd0(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0xb0(%rax), %edi movq 0x48(%rsp), %rax movl 0x34(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x50(%rax), %edx movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax movl 0x70(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x90(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0xf0(%rax), %edi movq 0x48(%rsp), %rax movl 0x3c(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x10(%rax), %edx movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 addq $0x88, %rsp retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/av1_fwd_txfm1d.c
av1_fadst4
void av1_fadst4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { int bit = cos_bit; const int32_t *sinpi = sinpi_arr(bit); int32_t x0, x1, x2, x3; int32_t s0, s1, s2, s3, s4, s5, s6, s7; // stage 0 av1_range_check_buf(0, input, input, 4, stage_range[0]); x0 = input[0]; x1 = input[1]; x2 = input[2]; x3 = input[3]; if (!(x0 | x1 | x2 | x3)) { output[0] = output[1] = output[2] = output[3] = 0; return; } // stage 1 s0 = range_check_value(sinpi[1] * x0, bit + stage_range[1]); s1 = range_check_value(sinpi[4] * x0, bit + stage_range[1]); s2 = range_check_value(sinpi[2] * x1, bit + stage_range[1]); s3 = range_check_value(sinpi[1] * x1, bit + stage_range[1]); s4 = range_check_value(sinpi[3] * x2, bit + stage_range[1]); s5 = range_check_value(sinpi[4] * x3, bit + stage_range[1]); s6 = range_check_value(sinpi[2] * x3, bit + stage_range[1]); s7 = range_check_value(x0 + x1, stage_range[1]); // stage 2 s7 = range_check_value(s7 - x3, stage_range[2]); // stage 3 x0 = range_check_value(s0 + s2, bit + stage_range[3]); x1 = range_check_value(sinpi[3] * s7, bit + stage_range[3]); x2 = range_check_value(s1 - s3, bit + stage_range[3]); x3 = range_check_value(s4, bit + stage_range[3]); // stage 4 x0 = range_check_value(x0 + s5, bit + stage_range[4]); x2 = range_check_value(x2 + s6, bit + stage_range[4]); // stage 5 s0 = range_check_value(x0 + x3, bit + stage_range[5]); s1 = range_check_value(x1, bit + stage_range[5]); s2 = range_check_value(x2 - x3, bit + stage_range[5]); s3 = range_check_value(x2 - x0, bit + stage_range[5]); // stage 6 s3 = range_check_value(s3 + x3, bit + stage_range[6]); // 1-D transform scaling factor is sqrt(2). output[0] = round_shift(s0, bit); output[1] = round_shift(s1, bit); output[2] = round_shift(s2, bit); output[3] = round_shift(s3, bit); av1_range_check_buf(6, input, output, 4, stage_range[6]); }
subq $0x68, %rsp movb %dl, %al movq %rdi, 0x60(%rsp) movq %rsi, 0x58(%rsp) movb %al, 0x57(%rsp) movq %rcx, 0x48(%rsp) movsbl 0x57(%rsp), %eax movl %eax, 0x44(%rsp) movl 0x44(%rsp), %edi callq 0xa0b7a0 movq %rax, 0x38(%rsp) movq 0x60(%rsp), %rsi movq 0x60(%rsp), %rdx movq 0x48(%rsp), %rax xorl %edi, %edi movl $0x4, %ecx movsbl (%rax), %r8d callq 0x5be6a0 movq 0x60(%rsp), %rax movl (%rax), %eax movl %eax, 0x34(%rsp) movq 0x60(%rsp), %rax movl 0x4(%rax), %eax movl %eax, 0x30(%rsp) movq 0x60(%rsp), %rax movl 0x8(%rax), %eax movl %eax, 0x2c(%rsp) movq 0x60(%rsp), %rax movl 0xc(%rax), %eax movl %eax, 0x28(%rsp) movl 0x34(%rsp), %eax orl 0x30(%rsp), %eax orl 0x2c(%rsp), %eax orl 0x28(%rsp), %eax cmpl $0x0, %eax jne 0xa0b447 movq 0x58(%rsp), %rax movl $0x0, 0xc(%rax) movq 0x58(%rsp), %rax movl $0x0, 0x8(%rax) movq 0x58(%rsp), %rax movl $0x0, 0x4(%rax) movq 0x58(%rsp), %rax movl $0x0, (%rax) jmp 0xa0b793 movq 0x38(%rsp), %rax movl 0x4(%rax), %edi imull 0x34(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x24(%rsp) movq 0x38(%rsp), %rax movl 0x10(%rax), %edi imull 0x34(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x20(%rsp) movq 0x38(%rsp), %rax movl 0x8(%rax), %edi imull 0x30(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x1c(%rsp) movq 0x38(%rsp), %rax movl 0x4(%rax), %edi imull 0x30(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x18(%rsp) movq 0x38(%rsp), %rax movl 0xc(%rax), %edi imull 0x2c(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x14(%rsp) movq 0x38(%rsp), %rax movl 0x10(%rax), %edi imull 0x28(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x10(%rsp) movq 0x38(%rsp), %rax movl 0x8(%rax), %edi imull 0x28(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x1(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0xc(%rsp) movl 0x34(%rsp), %edi addl 0x30(%rsp), %edi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %esi callq 0xa0b7c0 movl %eax, 0x8(%rsp) movl 0x8(%rsp), %edi subl 0x28(%rsp), %edi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %esi callq 0xa0b7c0 movl %eax, 0x8(%rsp) movl 0x24(%rsp), %edi addl 0x1c(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x3(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x34(%rsp) movq 0x38(%rsp), %rax movl 0xc(%rax), %edi imull 0x8(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x3(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x30(%rsp) movl 0x20(%rsp), %edi subl 0x18(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x3(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x2c(%rsp) movl 0x14(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x3(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x28(%rsp) movl 0x34(%rsp), %edi addl 0x10(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x4(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x34(%rsp) movl 0x2c(%rsp), %edi addl 0xc(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x4(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x2c(%rsp) movl 0x34(%rsp), %edi addl 0x28(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x5(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x24(%rsp) movl 0x30(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x5(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x20(%rsp) movl 0x2c(%rsp), %edi subl 0x28(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x5(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x1c(%rsp) movl 0x2c(%rsp), %edi subl 0x34(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x5(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x18(%rsp) movl 0x18(%rsp), %edi addl 0x28(%rsp), %edi movl 0x44(%rsp), %eax movq 0x48(%rsp), %rcx movsbl 0x6(%rcx), %ecx addl %ecx, %eax movsbl %al, %esi callq 0xa0b7c0 movl %eax, 0x18(%rsp) movslq 0x24(%rsp), %rdi movl 0x44(%rsp), %esi callq 0xa0b7d0 movl %eax, %ecx movq 0x58(%rsp), %rax movl %ecx, (%rax) movslq 0x20(%rsp), %rdi movl 0x44(%rsp), %esi callq 0xa0b7d0 movl %eax, %ecx movq 0x58(%rsp), %rax movl %ecx, 0x4(%rax) movslq 0x1c(%rsp), %rdi movl 0x44(%rsp), %esi callq 0xa0b7d0 movl %eax, %ecx movq 0x58(%rsp), %rax movl %ecx, 0x8(%rax) movslq 0x18(%rsp), %rdi movl 0x44(%rsp), %esi callq 0xa0b7d0 movl %eax, %ecx movq 0x58(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x60(%rsp), %rsi movq 0x58(%rsp), %rdx movq 0x48(%rsp), %rax movl $0x6, %edi movl $0x4, %ecx movsbl 0x6(%rax), %r8d callq 0x5be6a0 addq $0x68, %rsp retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/av1_fwd_txfm1d.c
av1_fadst16
void av1_fadst16(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { const int32_t size = 16; const int32_t *cospi; int32_t stage = 0; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; av1_range_check_buf(stage, input, input, size, stage_range[stage]); // stage 1; stage++; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[15]; bf1[2] = -input[7]; bf1[3] = input[8]; bf1[4] = -input[3]; bf1[5] = input[12]; bf1[6] = input[4]; bf1[7] = -input[11]; bf1[8] = -input[1]; bf1[9] = input[14]; bf1[10] = input[6]; bf1[11] = -input[9]; bf1[12] = input[2]; bf1[13] = -input[13]; bf1[14] = -input[5]; bf1[15] = input[10]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 2 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit); bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit); bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit); bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 3 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; bf1[8] = bf0[8] + bf0[10]; bf1[9] = bf0[9] + bf0[11]; bf1[10] = bf0[8] - bf0[10]; bf1[11] = bf0[9] - bf0[11]; bf1[12] = bf0[12] + bf0[14]; bf1[13] = bf0[13] + bf0[15]; bf1[14] = bf0[12] - bf0[14]; bf1[15] = bf0[13] - bf0[15]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 4 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit); bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit); bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 5 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = bf0[0] - bf0[4]; bf1[5] = bf0[1] - bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; bf1[8] = bf0[8] + bf0[12]; bf1[9] = bf0[9] + bf0[13]; bf1[10] = bf0[10] + bf0[14]; bf1[11] = bf0[11] + bf0[15]; bf1[12] = bf0[8] - bf0[12]; bf1[13] = bf0[9] - bf0[13]; bf1[14] = bf0[10] - bf0[14]; bf1[15] = bf0[11] - bf0[15]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 6 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit); bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit); bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit); bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit); bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit); bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit); bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 7 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[8]; bf1[1] = bf0[1] + bf0[9]; bf1[2] = bf0[2] + bf0[10]; bf1[3] = bf0[3] + bf0[11]; bf1[4] = bf0[4] + bf0[12]; bf1[5] = bf0[5] + bf0[13]; bf1[6] = bf0[6] + bf0[14]; bf1[7] = bf0[7] + bf0[15]; bf1[8] = bf0[0] - bf0[8]; bf1[9] = bf0[1] - bf0[9]; bf1[10] = bf0[2] - bf0[10]; bf1[11] = bf0[3] - bf0[11]; bf1[12] = bf0[4] - bf0[12]; bf1[13] = bf0[5] - bf0[13]; bf1[14] = bf0[6] - bf0[14]; bf1[15] = bf0[7] - bf0[15]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 8 stage++; cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit); bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit); bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit); bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit); bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit); bf1[5] = half_btf(cospi[46], bf0[4], -cospi[18], bf0[5], cos_bit); bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit); bf1[7] = half_btf(cospi[38], bf0[6], -cospi[26], bf0[7], cos_bit); bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit); bf1[9] = half_btf(cospi[30], bf0[8], -cospi[34], bf0[9], cos_bit); bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit); bf1[11] = half_btf(cospi[22], bf0[10], -cospi[42], bf0[11], cos_bit); bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit); bf1[13] = half_btf(cospi[14], bf0[12], -cospi[50], bf0[13], cos_bit); bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit); bf1[15] = half_btf(cospi[6], bf0[14], -cospi[58], bf0[15], cos_bit); av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); // stage 9 stage++; bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[14]; bf1[2] = bf0[3]; bf1[3] = bf0[12]; bf1[4] = bf0[5]; bf1[5] = bf0[10]; bf1[6] = bf0[7]; bf1[7] = bf0[8]; bf1[8] = bf0[9]; bf1[9] = bf0[6]; bf1[10] = bf0[11]; bf1[11] = bf0[4]; bf1[12] = bf0[13]; bf1[13] = bf0[2]; bf1[14] = bf0[15]; bf1[15] = bf0[0]; av1_range_check_buf(stage, input, bf1, size, stage_range[stage]); }
subq $0x88, %rsp movb %dl, %al movq %rdi, 0x80(%rsp) movq %rsi, 0x78(%rsp) movb %al, 0x77(%rsp) movq %rcx, 0x68(%rsp) movl $0x10, 0x64(%rsp) movl $0x0, 0x54(%rsp) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x80(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x80(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x80(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x80(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x80(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x80(%rsp), %rax movl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x80(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x80(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x80(%rsp), %rax xorl %ecx, %ecx subl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x80(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0x80(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x80(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax addl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax addl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax subl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax subl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x48(%rsp), %rax addl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x48(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x48(%rsp), %rax subl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x48(%rsp), %rax subl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x48(%rsp), %rax addl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x48(%rsp), %rax addl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x48(%rsp), %rax subl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x48(%rsp), %rax subl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x48(%rsp), %rax addl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movq 0x48(%rsp), %rax addl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x48(%rsp), %rax subl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movq 0x48(%rsp), %rax subl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xc0(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x40(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0x40(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0xc0(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax addl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax addl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x48(%rsp), %rax addl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x48(%rsp), %rax addl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax subl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax subl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x48(%rsp), %rax subl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x48(%rsp), %rax subl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x48(%rsp), %rax addl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x48(%rsp), %rax addl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x48(%rsp), %rax addl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x48(%rsp), %rax subl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x48(%rsp), %rax subl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x48(%rsp), %rax subl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x48(%rsp), %rax subl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x58(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax movl 0xe0(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x58(%rsp), %rax movl 0xe0(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x20(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0xa0(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x60(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x60(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xa0(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0xe0(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0x20(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0x20(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0xe0(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax xorl %edi, %edi subl 0x60(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0xa0(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0xa0(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x60(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax addl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax addl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x48(%rsp), %rax addl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x48(%rsp), %rax addl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x48(%rsp), %rax addl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x48(%rsp), %rax addl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x48(%rsp), %rax addl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x48(%rsp), %rax addl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x48(%rsp), %rax subl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x48(%rsp), %rax subl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x48(%rsp), %rax subl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x48(%rsp), %rax subl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x48(%rsp), %rax subl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x48(%rsp), %rax subl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x48(%rsp), %rax subl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x48(%rsp), %rax subl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movsbl 0x77(%rsp), %edi callq 0xa06b80 movq %rax, 0x58(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x48(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x58(%rsp), %rax movl 0x8(%rax), %edi movq 0x48(%rsp), %rax movl (%rax), %esi movq 0x58(%rsp), %rax movl 0xf8(%rax), %edx movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x58(%rsp), %rax movl 0xf8(%rax), %edi movq 0x48(%rsp), %rax movl (%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x8(%rax), %edx movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x58(%rsp), %rax movl 0x28(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax movl 0xd8(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x58(%rsp), %rax movl 0xd8(%rax), %edi movq 0x48(%rsp), %rax movl 0x8(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x28(%rax), %edx movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x58(%rsp), %rax movl 0x48(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax movl 0xb8(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x58(%rsp), %rax movl 0xb8(%rax), %edi movq 0x48(%rsp), %rax movl 0x10(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x48(%rax), %edx movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x58(%rsp), %rax movl 0x68(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax movl 0x98(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x58(%rsp), %rax movl 0x98(%rax), %edi movq 0x48(%rsp), %rax movl 0x18(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x68(%rax), %edx movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x58(%rsp), %rax movl 0x88(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax movl 0x78(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x58(%rsp), %rax movl 0x78(%rax), %edi movq 0x48(%rsp), %rax movl 0x20(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0x88(%rax), %edx movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x58(%rsp), %rax movl 0xa8(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax movl 0x58(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x58(%rsp), %rax movl 0x58(%rax), %edi movq 0x48(%rsp), %rax movl 0x28(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xa8(%rax), %edx movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x58(%rsp), %rax movl 0xc8(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax movl 0x38(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x58(%rsp), %rax movl 0x38(%rax), %edi movq 0x48(%rsp), %rax movl 0x30(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xc8(%rax), %edx movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x58(%rsp), %rax movl 0xe8(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax movl 0x18(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x58(%rsp), %rax movl 0x18(%rax), %edi movq 0x48(%rsp), %rax movl 0x38(%rax), %esi movq 0x58(%rsp), %rax xorl %edx, %edx subl 0xe8(%rax), %edx movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movsbl 0x77(%rsp), %r8d callq 0xa06ba0 movl %eax, %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 movl 0x54(%rsp), %eax addl $0x1, %eax movl %eax, 0x54(%rsp) movq %rsp, %rax movq %rax, 0x48(%rsp) movq 0x78(%rsp), %rax movq %rax, 0x40(%rsp) movq 0x48(%rsp), %rax movl 0x4(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, (%rax) movq 0x48(%rsp), %rax movl 0x38(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x4(%rax) movq 0x48(%rsp), %rax movl 0xc(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x8(%rax) movq 0x48(%rsp), %rax movl 0x30(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0xc(%rax) movq 0x48(%rsp), %rax movl 0x14(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x10(%rax) movq 0x48(%rsp), %rax movl 0x28(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x14(%rax) movq 0x48(%rsp), %rax movl 0x1c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x18(%rax) movq 0x48(%rsp), %rax movl 0x20(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x1c(%rax) movq 0x48(%rsp), %rax movl 0x24(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x20(%rax) movq 0x48(%rsp), %rax movl 0x18(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x24(%rax) movq 0x48(%rsp), %rax movl 0x2c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x28(%rax) movq 0x48(%rsp), %rax movl 0x10(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x2c(%rax) movq 0x48(%rsp), %rax movl 0x34(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x30(%rax) movq 0x48(%rsp), %rax movl 0x8(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x34(%rax) movq 0x48(%rsp), %rax movl 0x3c(%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x38(%rax) movq 0x48(%rsp), %rax movl (%rax), %ecx movq 0x40(%rsp), %rax movl %ecx, 0x3c(%rax) movl 0x54(%rsp), %edi movq 0x80(%rsp), %rsi movq 0x40(%rsp), %rdx movq 0x68(%rsp), %rax movslq 0x54(%rsp), %r8 movl $0x10, %ecx movsbl (%rax,%r8), %r8d callq 0x5be6a0 addq $0x88, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/av1_fwd_txfm1d.c
round_shift_16bit
static inline void round_shift_16bit(__m128i *in, int size, int bit) { if (bit < 0) { bit = -bit; __m128i rounding = _mm_set1_epi16(1 << (bit - 1)); for (int i = 0; i < size; ++i) { in[i] = _mm_adds_epi16(in[i], rounding); in[i] = _mm_srai_epi16(in[i], bit); } } else if (bit > 0) { for (int i = 0; i < size; ++i) { in[i] = _mm_slli_epi16(in[i], bit); } } }
subq $0x48, %rsp movq %rdi, -0x58(%rsp) movl %esi, -0x5c(%rsp) movl %edx, -0x60(%rsp) cmpl $0x0, -0x60(%rsp) jge 0xa400bc movl -0x60(%rsp), %eax negl %eax movl %eax, -0x60(%rsp) movb -0x60(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movw %ax, -0x22(%rsp) movw -0x22(%rsp), %ax movw %ax, -0x7a(%rsp) movw %ax, 0xa(%rsp) movw %ax, 0x8(%rsp) movw %ax, 0x6(%rsp) movw %ax, 0x4(%rsp) movw %ax, 0x2(%rsp) movw %ax, (%rsp) movw %ax, -0x2(%rsp) movw %ax, -0x4(%rsp) movzwl 0xa(%rsp), %eax movd %eax, %xmm1 movzwl 0x8(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x6(%rsp), %eax movd %eax, %xmm2 movzwl 0x4(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x2(%rsp), %eax movd %eax, %xmm0 movzwl (%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl -0x2(%rsp), %eax movd %eax, %xmm3 movzwl -0x4(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, -0x20(%rsp) movdqa -0x20(%rsp), %xmm0 movdqa %xmm0, -0x70(%rsp) movl $0x0, -0x74(%rsp) movl -0x74(%rsp), %eax cmpl -0x5c(%rsp), %eax jge 0xa400ba movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa (%rax), %xmm1 movdqa -0x70(%rsp), %xmm0 movdqa %xmm1, -0x40(%rsp) movdqa %xmm0, -0x50(%rsp) movdqa -0x40(%rsp), %xmm0 movdqa -0x50(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa (%rax), %xmm0 movl -0x60(%rsp), %eax movdqa %xmm0, 0x10(%rsp) movl %eax, 0xc(%rsp) movdqa 0x10(%rsp), %xmm0 movl 0xc(%rsp), %eax movd %eax, %xmm1 psraw %xmm1, %xmm0 movq -0x58(%rsp), %rax movslq -0x74(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl -0x74(%rsp), %eax addl $0x1, %eax movl %eax, -0x74(%rsp) jmp 0xa40006 jmp 0xa40130 cmpl $0x0, -0x60(%rsp) jle 0xa4012e movl $0x0, -0x78(%rsp) movl -0x78(%rsp), %eax cmpl -0x5c(%rsp), %eax jge 0xa4012c movq -0x58(%rsp), %rax movslq -0x78(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa (%rax), %xmm0 movl -0x60(%rsp), %eax movdqa %xmm0, 0x30(%rsp) movl %eax, 0x2c(%rsp) movdqa 0x30(%rsp), %xmm0 movl 0x2c(%rsp), %eax movd %eax, %xmm1 psllw %xmm1, %xmm0 movq -0x58(%rsp), %rax movslq -0x78(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl -0x78(%rsp), %eax addl $0x1, %eax movl %eax, -0x78(%rsp) jmp 0xa400cb jmp 0xa4012e jmp 0xa40130 addq $0x48, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/common/x86/av1_txfm_sse2.h
transpose_16bit_4x4
static inline void transpose_16bit_4x4(const __m128i *const in, __m128i *const out) { // Unpack 16 bit elements. Goes from: // in[0]: 00 01 02 03 XX XX XX XX // in[1]: 10 11 12 13 XX XX XX XX // in[2]: 20 21 22 23 XX XX XX XX // in[3]: 30 31 32 33 XX XX XX XX // to: // a0: 00 10 01 11 02 12 03 13 // a1: 20 30 21 31 22 32 23 33 const __m128i a0 = _mm_unpacklo_epi16(in[0], in[1]); const __m128i a1 = _mm_unpacklo_epi16(in[2], in[3]); // Unpack 32 bit elements resulting in: // out[0]: 00 10 20 30 01 11 21 31 // out[1]: 01 11 21 31 __ __ __ __ // out[2]: 02 12 22 32 03 13 23 33 // out[3]: 03 13 23 33 __ __ __ __ // // Note: The high 64 bits of the output registers are shown for informational // purposes only. Callers should only use the low 64 bits of the output // registers. "__" indicates zeros. out[0] = _mm_unpacklo_epi32(a0, a1); out[1] = _mm_srli_si128(out[0], 8); out[2] = _mm_unpackhi_epi32(a0, a1); out[3] = _mm_srli_si128(out[2], 8); }
subq $0x38, %rsp movq %rdi, -0x58(%rsp) movq %rsi, -0x60(%rsp) movq -0x58(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, -0x20(%rsp) movaps %xmm0, -0x30(%rsp) movaps -0x20(%rsp), %xmm0 movaps -0x30(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x70(%rsp) movq -0x58(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, -0x40(%rsp) movaps %xmm0, -0x50(%rsp) movaps -0x40(%rsp), %xmm0 movaps -0x50(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x80(%rsp) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, (%rsp) movaps %xmm0, -0x10(%rsp) movaps (%rsp), %xmm0 movaps -0x10(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movq -0x60(%rsp), %rax movaps %xmm0, (%rax) movq -0x60(%rsp), %rax movaps (%rax), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movaps %xmm0, 0x10(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x20(%rsp) movaps %xmm0, 0x10(%rsp) movaps 0x20(%rsp), %xmm0 movaps 0x10(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movq -0x60(%rsp), %rax movaps %xmm0, 0x20(%rax) movq -0x60(%rsp), %rax movaps 0x20(%rax), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movq -0x60(%rsp), %rax movdqa %xmm0, 0x30(%rax) addq $0x38, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/transpose_sse2.h
av1_lowbd_fwd_txfm2d_4x8_sse2
void av1_lowbd_fwd_txfm2d_4x8_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)stride; (void)bd; __m128i buf0[8], buf1[8], *buf; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X8]; const int txw_idx = get_txw_idx(TX_4X8); const int txh_idx = get_txh_idx(TX_4X8); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 4; const int height = 8; const transform_1d_sse2 col_txfm = col_txfm4x8_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x4_arr[tx_type]; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); if (ud_flip) { load_buffer_16bit_to_16bit_w4_flip(input, stride, buf0, height); } else { load_buffer_16bit_to_16bit_w4(input, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_4x8(buf0, buf1); if (lr_flip) { buf = buf0; flip_buf_sse2(buf1, buf, width); } else { buf = buf1; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_rect_buffer_16bit_to_32bit_w8(buf, output, height, width); }
subq $0x168, %rsp # imm = 0x168 movb %cl, %al movq %rdi, 0x160(%rsp) movq %rsi, 0x158(%rsp) movl %edx, 0x154(%rsp) movb %al, 0x153(%rsp) movl %r8d, 0x14c(%rsp) leaq 0x1657ca(%rip), %rax # 0xba5b10 movq 0x28(%rax), %rax movq %rax, 0x30(%rsp) movl $0x5, %edi callq 0xa3fd00 movl %eax, 0x2c(%rsp) movl $0x5, %edi callq 0xa3fd20 movl %eax, 0x28(%rsp) movslq 0x2c(%rsp), %rcx leaq 0xd6c99(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x28(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x24(%rsp) movslq 0x2c(%rsp), %rcx leaq 0xd6c99(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x28(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x20(%rsp) movl $0x4, 0x1c(%rsp) movl $0x8, 0x18(%rsp) movzbl 0x153(%rsp), %eax movl %eax, %ecx leaq 0x15e4d4(%rip), %rax # 0xb9e8a0 movq (%rax,%rcx,8), %rax movq %rax, 0x10(%rsp) movzbl 0x153(%rsp), %eax movl %eax, %ecx leaq 0x15e53a(%rip), %rax # 0xb9e920 movq (%rax,%rcx,8), %rax movq %rax, 0x8(%rsp) leaq 0x4(%rsp), %rsi movq %rsp, %rdx movzbl 0x153(%rsp), %edi callq 0xa3fd40 cmpl $0x0, 0x4(%rsp) je 0xa4042e movq 0x160(%rsp), %rdi movl 0x154(%rsp), %esi leaq 0xc0(%rsp), %rdx movl $0x8, %ecx callq 0xa3fe10 jmp 0xa4044f movq 0x160(%rsp), %rdi movl 0x154(%rsp), %esi leaq 0xc0(%rsp), %rdx movl $0x8, %ecx callq 0xa3fea0 leaq 0xc0(%rsp), %rdi movq 0x30(%rsp), %rax movsbl (%rax), %edx movl $0x8, %esi callq 0xa3ff20 movq 0x10(%rsp), %rax leaq 0xc0(%rsp), %rdi leaq 0xc0(%rsp), %rsi movl 0x24(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0xc0(%rsp), %rdi movq 0x30(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x8, %esi callq 0xa3ff20 leaq 0xc0(%rsp), %rdi leaq 0x40(%rsp), %rsi callq 0xa40540 cmpl $0x0, (%rsp) je 0xa404dd leaq 0xc0(%rsp), %rax movq %rax, 0x38(%rsp) leaq 0x40(%rsp), %rdi movq 0x38(%rsp), %rsi movl $0x4, %edx callq 0xa40230 jmp 0xa404e7 leaq 0x40(%rsp), %rax movq %rax, 0x38(%rsp) movq 0x8(%rsp), %rax movq 0x38(%rsp), %rdi movq 0x38(%rsp), %rsi movl 0x20(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x38(%rsp), %rdi movq 0x30(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x4, %esi callq 0xa3ff20 movq 0x38(%rsp), %rdi movq 0x158(%rsp), %rsi movl $0x8, %edx movl $0x4, %ecx callq 0xa407b0 addq $0x168, %rsp # imm = 0x168 retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_4x16_sse2
void av1_lowbd_fwd_txfm2d_4x16_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[16], buf1[16]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X16]; const int txw_idx = get_txw_idx(TX_4X16); const int txh_idx = get_txh_idx(TX_4X16); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 4; const int height = 16; const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x4_arr[tx_type]; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); if (ud_flip) { load_buffer_16bit_to_16bit_w4_flip(input, stride, buf0, height); } else { load_buffer_16bit_to_16bit_w4(input, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_4x8(buf0, buf1); transpose_16bit_4x8(buf0 + 8, buf1 + 8); for (int i = 0; i < 2; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + 8 * i, buf, width); } else { buf = buf1 + 8 * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } }
subq $0x278, %rsp # imm = 0x278 movb %cl, %al movq %rdi, 0x270(%rsp) movq %rsi, 0x268(%rsp) movl %edx, 0x264(%rsp) movb %al, 0x263(%rsp) movl %r8d, 0x25c(%rsp) leaq 0x1652ba(%rip), %rax # 0xba5b10 movq 0x68(%rax), %rax movq %rax, 0x48(%rsp) movl $0xd, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0xd, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd6789(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd6789(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x4, 0x34(%rsp) movl $0x10, 0x30(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15e0c4(%rip), %rax # 0xb9e9a0 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15e02a(%rip), %rax # 0xb9e920 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0x263(%rsp), %edi callq 0xa3fd40 cmpl $0x0, 0x1c(%rsp) je 0xa40940 movq 0x270(%rsp), %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x10, %ecx callq 0xa3fe10 jmp 0xa40961 movq 0x270(%rsp), %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x10, %ecx callq 0xa3fea0 leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x150(%rsp), %rdi leaq 0x150(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x10, %esi callq 0xa3ff20 leaq 0x150(%rsp), %rdi leaq 0x50(%rsp), %rsi callq 0xa40540 leaq 0x150(%rsp), %rdi addq $0x80, %rdi leaq 0x50(%rsp), %rsi addq $0x80, %rsi callq 0xa40540 movl $0x0, 0x14(%rsp) cmpl $0x2, 0x14(%rsp) jge 0xa40aba cmpl $0x0, 0x18(%rsp) je 0xa40a33 leaq 0x150(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x4, %edx callq 0xa40230 jmp 0xa40a4e leaq 0x50(%rsp), %rax movl 0x14(%rsp), %ecx shll $0x3, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x4, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0x268(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x10, %edx movl $0x4, %ecx callq 0xa40ad0 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa409ee addq $0x278, %rsp # imm = 0x278 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_8x4_sse2
void av1_lowbd_fwd_txfm2d_8x4_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[8], buf1[8], *buf; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X4]; const int txw_idx = get_txw_idx(TX_8X4); const int txh_idx = get_txh_idx(TX_8X4); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 8; const int height = 4; const transform_1d_sse2 col_txfm = col_txfm8x4_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm4x8_arr[tx_type]; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); if (ud_flip) load_buffer_16bit_to_16bit_flip(input, stride, buf0, height); else load_buffer_16bit_to_16bit(input, stride, buf0, height); round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0, buf1); if (lr_flip) { buf = buf0; flip_buf_sse2(buf1, buf, width); } else { buf = buf1; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_rect_buffer_16bit_to_32bit_w4(buf, output, height, width); }
subq $0x168, %rsp # imm = 0x168 movb %cl, %al movq %rdi, 0x160(%rsp) movq %rsi, 0x158(%rsp) movl %edx, 0x154(%rsp) movb %al, 0x153(%rsp) movl %r8d, 0x14c(%rsp) leaq 0x164f9a(%rip), %rax # 0xba5b10 movq 0x30(%rax), %rax movq %rax, 0x30(%rsp) movl $0x6, %edi callq 0xa3fd00 movl %eax, 0x2c(%rsp) movl $0x6, %edi callq 0xa3fd20 movl %eax, 0x28(%rsp) movslq 0x2c(%rsp), %rcx leaq 0xd6469(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x28(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x24(%rsp) movslq 0x2c(%rsp), %rcx leaq 0xd6469(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x28(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x20(%rsp) movl $0x8, 0x1c(%rsp) movl $0x4, 0x18(%rsp) movzbl 0x153(%rsp), %eax movl %eax, %ecx leaq 0x15de24(%rip), %rax # 0xb9ea20 movq (%rax,%rcx,8), %rax movq %rax, 0x10(%rsp) movzbl 0x153(%rsp), %eax movl %eax, %ecx leaq 0x15de8a(%rip), %rax # 0xb9eaa0 movq (%rax,%rcx,8), %rax movq %rax, 0x8(%rsp) leaq 0x4(%rsp), %rsi movq %rsp, %rdx movzbl 0x153(%rsp), %edi callq 0xa3fd40 cmpl $0x0, 0x4(%rsp) je 0xa40c5e movq 0x160(%rsp), %rdi movl 0x154(%rsp), %esi leaq 0xc0(%rsp), %rdx movl $0x4, %ecx callq 0xa40d70 jmp 0xa40c7f movq 0x160(%rsp), %rdi movl 0x154(%rsp), %esi leaq 0xc0(%rsp), %rdx movl $0x4, %ecx callq 0xa40df0 leaq 0xc0(%rsp), %rdi movq 0x30(%rsp), %rax movsbl (%rax), %edx movl $0x4, %esi callq 0xa3ff20 movq 0x10(%rsp), %rax leaq 0xc0(%rsp), %rdi leaq 0xc0(%rsp), %rsi movl 0x24(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0xc0(%rsp), %rdi movq 0x30(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x4, %esi callq 0xa3ff20 leaq 0xc0(%rsp), %rdi leaq 0x40(%rsp), %rsi callq 0xa40e60 cmpl $0x0, (%rsp) je 0xa40d0d leaq 0xc0(%rsp), %rax movq %rax, 0x38(%rsp) leaq 0x40(%rsp), %rdi movq 0x38(%rsp), %rsi movl $0x8, %edx callq 0xa40230 jmp 0xa40d17 leaq 0x40(%rsp), %rax movq %rax, 0x38(%rsp) movq 0x8(%rsp), %rax movq 0x38(%rsp), %rdi movq 0x38(%rsp), %rsi movl 0x20(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x38(%rsp), %rdi movq 0x30(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x8, %esi callq 0xa3ff20 movq 0x38(%rsp), %rdi movq 0x158(%rsp), %rsi movl $0x4, %edx movl $0x8, %ecx callq 0xa413b0 addq $0x168, %rsp # imm = 0x168 retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
transpose_16bit_8x8
static inline void transpose_16bit_8x8(const __m128i *const in, __m128i *const out) { // Unpack 16 bit elements. Goes from: // in[0]: 00 01 02 03 04 05 06 07 // in[1]: 10 11 12 13 14 15 16 17 // in[2]: 20 21 22 23 24 25 26 27 // in[3]: 30 31 32 33 34 35 36 37 // in[4]: 40 41 42 43 44 45 46 47 // in[5]: 50 51 52 53 54 55 56 57 // in[6]: 60 61 62 63 64 65 66 67 // in[7]: 70 71 72 73 74 75 76 77 // to: // a0: 00 10 01 11 02 12 03 13 // a1: 20 30 21 31 22 32 23 33 // a2: 40 50 41 51 42 52 43 53 // a3: 60 70 61 71 62 72 63 73 // a4: 04 14 05 15 06 16 07 17 // a5: 24 34 25 35 26 36 27 37 // a6: 44 54 45 55 46 56 47 57 // a7: 64 74 65 75 66 76 67 77 const __m128i a0 = _mm_unpacklo_epi16(in[0], in[1]); const __m128i a1 = _mm_unpacklo_epi16(in[2], in[3]); const __m128i a2 = _mm_unpacklo_epi16(in[4], in[5]); const __m128i a3 = _mm_unpacklo_epi16(in[6], in[7]); const __m128i a4 = _mm_unpackhi_epi16(in[0], in[1]); const __m128i a5 = _mm_unpackhi_epi16(in[2], in[3]); const __m128i a6 = _mm_unpackhi_epi16(in[4], in[5]); const __m128i a7 = _mm_unpackhi_epi16(in[6], in[7]); // Unpack 32 bit elements resulting in: // b0: 00 10 20 30 01 11 21 31 // b1: 40 50 60 70 41 51 61 71 // b2: 04 14 24 34 05 15 25 35 // b3: 44 54 64 74 45 55 65 75 // b4: 02 12 22 32 03 13 23 33 // b5: 42 52 62 72 43 53 63 73 // b6: 06 16 26 36 07 17 27 37 // b7: 46 56 66 76 47 57 67 77 const __m128i b0 = _mm_unpacklo_epi32(a0, a1); const __m128i b1 = _mm_unpacklo_epi32(a2, a3); const __m128i b2 = _mm_unpacklo_epi32(a4, a5); const __m128i b3 = _mm_unpacklo_epi32(a6, a7); const __m128i b4 = _mm_unpackhi_epi32(a0, a1); const __m128i b5 = _mm_unpackhi_epi32(a2, a3); const __m128i b6 = _mm_unpackhi_epi32(a4, a5); const __m128i b7 = _mm_unpackhi_epi32(a6, a7); // Unpack 64 bit elements resulting in: // out[0]: 00 10 20 30 40 50 60 70 // out[1]: 01 11 21 31 41 51 61 71 // out[2]: 02 12 22 32 42 52 62 72 // out[3]: 03 13 23 33 43 53 63 73 // out[4]: 04 14 24 34 44 54 64 74 // out[5]: 05 15 25 35 45 55 65 75 // out[6]: 06 16 26 36 46 56 66 76 // out[7]: 07 17 27 37 47 57 67 77 out[0] = _mm_unpacklo_epi64(b0, b1); out[1] = _mm_unpackhi_epi64(b0, b1); out[2] = _mm_unpacklo_epi64(b4, b5); out[3] = _mm_unpackhi_epi64(b4, b5); out[4] = _mm_unpacklo_epi64(b2, b3); out[5] = _mm_unpackhi_epi64(b2, b3); out[6] = _mm_unpacklo_epi64(b6, b7); out[7] = _mm_unpackhi_epi64(b6, b7); }
subq $0x398, %rsp # imm = 0x398 movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movq 0x88(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm0 movaps 0xf0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x70(%rsp) movq 0x88(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x60(%rsp) movq 0x88(%rsp), %rax movaps 0x40(%rax), %xmm1 movaps 0x50(%rax), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x50(%rsp) movq 0x88(%rsp), %rax movaps 0x60(%rax), %xmm1 movaps 0x70(%rax), %xmm0 movaps %xmm1, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x40(%rsp) movq 0x88(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 movaps 0x170(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, 0x30(%rsp) movq 0x88(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, 0x20(%rsp) movq 0x88(%rsp), %rax movaps 0x40(%rax), %xmm1 movaps 0x50(%rax), %xmm0 movaps %xmm1, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, 0x10(%rsp) movq 0x88(%rsp), %rax movaps 0x60(%rax), %xmm1 movaps 0x70(%rax), %xmm0 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, (%rsp) movaps 0x70(%rsp), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0x200(%rsp) movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm0 movaps 0x1f0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x10(%rsp) movaps 0x50(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x1e0(%rsp) movaps %xmm0, 0x1d0(%rsp) movaps 0x1e0(%rsp), %xmm0 movaps 0x1d0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x20(%rsp) movaps 0x30(%rsp), %xmm1 movaps 0x20(%rsp), %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x30(%rsp) movaps 0x10(%rsp), %xmm1 movaps (%rsp), %xmm0 movaps %xmm1, 0x1a0(%rsp) movaps %xmm0, 0x190(%rsp) movaps 0x1a0(%rsp), %xmm0 movaps 0x190(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x40(%rsp) movaps 0x70(%rsp), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0x280(%rsp) movaps %xmm0, 0x270(%rsp) movaps 0x280(%rsp), %xmm0 movaps 0x270(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x50(%rsp) movaps 0x50(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x260(%rsp) movaps %xmm0, 0x250(%rsp) movaps 0x260(%rsp), %xmm0 movaps 0x250(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x60(%rsp) movaps 0x30(%rsp), %xmm1 movaps 0x20(%rsp), %xmm0 movaps %xmm1, 0x240(%rsp) movaps %xmm0, 0x230(%rsp) movaps 0x240(%rsp), %xmm0 movaps 0x230(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x70(%rsp) movaps 0x10(%rsp), %xmm1 movaps (%rsp), %xmm0 movaps %xmm1, 0x220(%rsp) movaps %xmm0, 0x210(%rsp) movaps 0x220(%rsp), %xmm0 movaps 0x210(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x80(%rsp) movaps -0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x300(%rsp) movaps %xmm0, 0x2f0(%rsp) movaps 0x300(%rsp), %xmm0 movaps 0x2f0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, (%rax) movaps -0x10(%rsp), %xmm1 movaps -0x20(%rsp), %xmm0 movaps %xmm1, 0x380(%rsp) movaps %xmm0, 0x370(%rsp) movaps 0x380(%rsp), %xmm0 movaps 0x370(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movaps %xmm0, 0x10(%rax) movaps -0x50(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x2e0(%rsp) movaps %xmm0, 0x2d0(%rsp) movaps 0x2e0(%rsp), %xmm0 movaps 0x2d0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, 0x20(%rax) movaps -0x50(%rsp), %xmm1 movaps -0x60(%rsp), %xmm0 movaps %xmm1, 0x360(%rsp) movaps %xmm0, 0x350(%rsp) movaps 0x360(%rsp), %xmm0 movaps 0x350(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movaps %xmm0, 0x30(%rax) movaps -0x30(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x2c0(%rsp) movaps %xmm0, 0x2b0(%rsp) movaps 0x2c0(%rsp), %xmm0 movaps 0x2b0(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, 0x40(%rax) movaps -0x30(%rsp), %xmm1 movaps -0x40(%rsp), %xmm0 movaps %xmm1, 0x340(%rsp) movaps %xmm0, 0x330(%rsp) movaps 0x340(%rsp), %xmm0 movaps 0x330(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movaps %xmm0, 0x50(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x2a0(%rsp) movaps %xmm0, 0x290(%rsp) movaps 0x2a0(%rsp), %xmm0 movaps 0x290(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x80(%rsp), %rax movaps %xmm0, 0x60(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x320(%rsp) movaps %xmm0, 0x310(%rsp) movaps 0x320(%rsp), %xmm0 movaps 0x310(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x80(%rsp), %rax movdqa %xmm0, 0x70(%rax) addq $0x398, %rsp # imm = 0x398 retq nopw (%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/transpose_sse2.h
av1_lowbd_fwd_txfm2d_8x16_sse2
void av1_lowbd_fwd_txfm2d_8x16_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[16], buf1[16]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X16]; const int txw_idx = get_txw_idx(TX_8X16); const int txh_idx = get_txh_idx(TX_8X16); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 8; const int height = 16; const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x8_arr[tx_type]; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); if (ud_flip) { load_buffer_16bit_to_16bit_flip(input, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0, buf1); transpose_16bit_8x8(buf0 + 8, buf1 + 8); for (int i = 0; i < 2; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + width * i, buf, width); } else { buf = buf1 + width * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_rect_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } }
subq $0x278, %rsp # imm = 0x278 movb %cl, %al movq %rdi, 0x270(%rsp) movq %rsi, 0x268(%rsp) movl %edx, 0x264(%rsp) movb %al, 0x263(%rsp) movl %r8d, 0x25c(%rsp) leaq 0x16448a(%rip), %rax # 0xba5b10 movq 0x38(%rax), %rax movq %rax, 0x48(%rsp) movl $0x7, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x7, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd5959(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd5959(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x8, 0x34(%rsp) movl $0x10, 0x30(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15d294(%rip), %rax # 0xb9e9a0 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15d47a(%rip), %rax # 0xb9eba0 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0x263(%rsp), %edi callq 0xa3fd40 cmpl $0x0, 0x1c(%rsp) je 0xa41770 movq 0x270(%rsp), %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x10, %ecx callq 0xa40d70 jmp 0xa41791 movq 0x270(%rsp), %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x10, %ecx callq 0xa40df0 leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x150(%rsp), %rdi leaq 0x150(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x10, %esi callq 0xa3ff20 leaq 0x150(%rsp), %rdi leaq 0x50(%rsp), %rsi callq 0xa40e60 leaq 0x150(%rsp), %rdi addq $0x80, %rdi leaq 0x50(%rsp), %rsi addq $0x80, %rsi callq 0xa40e60 movl $0x0, 0x14(%rsp) cmpl $0x2, 0x14(%rsp) jge 0xa418ea cmpl $0x0, 0x18(%rsp) je 0xa41863 leaq 0x150(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x8, %edx callq 0xa40230 jmp 0xa4187e leaq 0x50(%rsp), %rax movl 0x14(%rsp), %ecx shll $0x3, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x8, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0x268(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x10, %edx movl $0x8, %ecx callq 0xa407b0 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa4181e addq $0x278, %rsp # imm = 0x278 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_8x32_sse2
void av1_lowbd_fwd_txfm2d_8x32_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[32], buf1[32]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X32]; const int txw_idx = get_txw_idx(TX_8X32); const int txh_idx = get_txh_idx(TX_8X32); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 8; const int height = 32; const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x8_arr[tx_type]; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); if (ud_flip) { load_buffer_16bit_to_16bit_flip(input, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0, buf1); transpose_16bit_8x8(buf0 + 8, buf1 + 8); transpose_16bit_8x8(buf0 + 16, buf1 + 16); transpose_16bit_8x8(buf0 + 24, buf1 + 24); for (int i = 0; i < 4; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + width * i, buf, width); } else { buf = buf1 + width * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } }
subq $0x478, %rsp # imm = 0x478 movb %cl, %al movq %rdi, 0x470(%rsp) movq %rsi, 0x468(%rsp) movl %edx, 0x464(%rsp) movb %al, 0x463(%rsp) movl %r8d, 0x45c(%rsp) leaq 0x1641da(%rip), %rax # 0xba5b10 movq 0x78(%rax), %rax movq %rax, 0x48(%rsp) movl $0xf, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0xf, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd56a9(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd56a9(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x8, 0x34(%rsp) movl $0x20, 0x30(%rsp) movzbl 0x463(%rsp), %eax movl %eax, %ecx leaq 0x15d264(%rip), %rax # 0xb9ec20 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x463(%rsp), %eax movl %eax, %ecx leaq 0x15d1ca(%rip), %rax # 0xb9eba0 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0x463(%rsp), %edi callq 0xa3fd40 cmpl $0x0, 0x1c(%rsp) je 0xa41a20 movq 0x470(%rsp), %rdi movl 0x464(%rsp), %esi leaq 0x250(%rsp), %rdx movl $0x20, %ecx callq 0xa40d70 jmp 0xa41a41 movq 0x470(%rsp), %rdi movl 0x464(%rsp), %esi leaq 0x250(%rsp), %rdx movl $0x20, %ecx callq 0xa40df0 leaq 0x250(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x20, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x250(%rsp), %rdi leaq 0x250(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x250(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x20, %esi callq 0xa3ff20 leaq 0x250(%rsp), %rdi leaq 0x50(%rsp), %rsi callq 0xa40e60 leaq 0x250(%rsp), %rdi addq $0x80, %rdi leaq 0x50(%rsp), %rsi addq $0x80, %rsi callq 0xa40e60 leaq 0x250(%rsp), %rdi addq $0x100, %rdi # imm = 0x100 leaq 0x50(%rsp), %rsi addq $0x100, %rsi # imm = 0x100 callq 0xa40e60 leaq 0x250(%rsp), %rdi addq $0x180, %rdi # imm = 0x180 leaq 0x50(%rsp), %rsi addq $0x180, %rsi # imm = 0x180 callq 0xa40e60 movl $0x0, 0x14(%rsp) cmpl $0x4, 0x14(%rsp) jge 0xa41bda cmpl $0x0, 0x18(%rsp) je 0xa41b53 leaq 0x250(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x8, %edx callq 0xa40230 jmp 0xa41b6e leaq 0x50(%rsp), %rax movl 0x14(%rsp), %ecx shll $0x3, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x8, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0x468(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x20, %edx movl $0x8, %ecx callq 0xa40ad0 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa41b0e addq $0x478, %rsp # imm = 0x478 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_16x4_sse2
void av1_lowbd_fwd_txfm2d_16x4_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[16], buf1[16]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X4]; const int txw_idx = get_txw_idx(TX_16X4); const int txh_idx = get_txh_idx(TX_16X4); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 16; const int height = 4; const transform_1d_sse2 col_txfm = col_txfm8x4_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type]; __m128i *buf; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); for (int i = 0; i < 2; i++) { if (ud_flip) { load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x4(buf0, buf1 + 8 * i); } if (lr_flip) { buf = buf0; flip_buf_sse2(buf1, buf, width); } else { buf = buf1; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w4(buf, output, height, width); }
subq $0x278, %rsp # imm = 0x278 movb %cl, %al movq %rdi, 0x270(%rsp) movq %rsi, 0x268(%rsp) movl %edx, 0x264(%rsp) movb %al, 0x263(%rsp) movl %r8d, 0x25c(%rsp) leaq 0x163eea(%rip), %rax # 0xba5b10 movq 0x70(%rax), %rax movq %rax, 0x48(%rsp) movl $0xe, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0xe, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd53b9(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd53b9(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x10, 0x34(%rsp) movl $0x4, 0x30(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15cd74(%rip), %rax # 0xb9ea20 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15cfda(%rip), %rax # 0xb9eca0 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) leaq 0x14(%rsp), %rsi leaq 0x10(%rsp), %rdx movzbl 0x263(%rsp), %edi callq 0xa3fd40 movl $0x0, 0xc(%rsp) cmpl $0x2, 0xc(%rsp) jge 0xa41de7 cmpl $0x0, 0x14(%rsp) je 0xa41d32 movq 0x270(%rsp), %rdi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x4, %ecx callq 0xa40d70 jmp 0xa41d62 movq 0x270(%rsp), %rdi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x4, %ecx callq 0xa40df0 leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x4, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x150(%rsp), %rdi leaq 0x150(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x4, %esi callq 0xa3ff20 leaq 0x150(%rsp), %rdi leaq 0x50(%rsp), %rsi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa41e70 movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0xa41cee cmpl $0x0, 0x10(%rsp) je 0xa41e11 leaq 0x150(%rsp), %rax movq %rax, 0x18(%rsp) leaq 0x50(%rsp), %rdi movq 0x18(%rsp), %rsi movl $0x10, %edx callq 0xa40230 jmp 0xa41e1b leaq 0x50(%rsp), %rax movq %rax, 0x18(%rsp) movq 0x20(%rsp), %rax movq 0x18(%rsp), %rdi movq 0x18(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x18(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x18(%rsp), %rdi movq 0x268(%rsp), %rsi movl $0x4, %edx movl $0x10, %ecx callq 0xa402a0 addq $0x278, %rsp # imm = 0x278 retq nop
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
transpose_16bit_8x4
static inline void transpose_16bit_8x4(const __m128i *const in, __m128i *const out) { // Unpack 16 bit elements. Goes from: // in[0]: 00 01 02 03 04 05 06 07 // in[1]: 10 11 12 13 14 15 16 17 // in[2]: 20 21 22 23 24 25 26 27 // in[3]: 30 31 32 33 34 35 36 37 // to: // a0: 00 10 01 11 02 12 03 13 // a1: 20 30 21 31 22 32 23 33 // a4: 04 14 05 15 06 16 07 17 // a5: 24 34 25 35 26 36 27 37 const __m128i a0 = _mm_unpacklo_epi16(in[0], in[1]); const __m128i a1 = _mm_unpacklo_epi16(in[2], in[3]); const __m128i a4 = _mm_unpackhi_epi16(in[0], in[1]); const __m128i a5 = _mm_unpackhi_epi16(in[2], in[3]); // Unpack 32 bit elements resulting in: // b0: 00 10 20 30 01 11 21 31 // b2: 04 14 24 34 05 15 25 35 // b4: 02 12 22 32 03 13 23 33 // b6: 06 16 26 36 07 17 27 37 const __m128i b0 = _mm_unpacklo_epi32(a0, a1); const __m128i b2 = _mm_unpacklo_epi32(a4, a5); const __m128i b4 = _mm_unpackhi_epi32(a0, a1); const __m128i b6 = _mm_unpackhi_epi32(a4, a5); // Unpack 64 bit elements resulting in: // out[0]: 00 10 20 30 XX XX XX XX // out[1]: 01 11 21 31 XX XX XX XX // out[2]: 02 12 22 32 XX XX XX XX // out[3]: 03 13 23 33 XX XX XX XX // out[4]: 04 14 24 34 XX XX XX XX // out[5]: 05 15 25 35 XX XX XX XX // out[6]: 06 16 26 36 XX XX XX XX // out[7]: 07 17 27 37 XX XX XX XX const __m128i zeros = _mm_setzero_si128(); out[0] = _mm_unpacklo_epi64(b0, zeros); out[1] = _mm_unpackhi_epi64(b0, zeros); out[2] = _mm_unpacklo_epi64(b4, zeros); out[3] = _mm_unpackhi_epi64(b4, zeros); out[4] = _mm_unpacklo_epi64(b2, zeros); out[5] = _mm_unpackhi_epi64(b2, zeros); out[6] = _mm_unpacklo_epi64(b6, zeros); out[7] = _mm_unpackhi_epi64(b6, zeros); }
subq $0x238, %rsp # imm = 0x238 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x18(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x50(%rsp) movaps %xmm0, 0x40(%rsp) movaps 0x50(%rsp), %xmm0 movaps 0x40(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, (%rsp) movq 0x18(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0x30(%rsp) movaps %xmm0, 0x20(%rsp) movaps 0x30(%rsp), %xmm0 movaps 0x20(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x10(%rsp) movq 0x18(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x90(%rsp) movaps %xmm0, 0x80(%rsp) movaps 0x90(%rsp), %xmm0 movaps 0x80(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, -0x20(%rsp) movq 0x18(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0x70(%rsp) movaps %xmm0, 0x60(%rsp) movaps 0x70(%rsp), %xmm0 movaps 0x60(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movaps %xmm0, -0x30(%rsp) movaps (%rsp), %xmm1 movaps -0x10(%rsp), %xmm0 movaps %xmm1, 0xe0(%rsp) movaps %xmm0, 0xd0(%rsp) movaps 0xe0(%rsp), %xmm0 movaps 0xd0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x40(%rsp) movaps -0x20(%rsp), %xmm1 movaps -0x30(%rsp), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 punpckldq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] movaps %xmm0, -0x50(%rsp) movaps (%rsp), %xmm1 movaps -0x10(%rsp), %xmm0 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x60(%rsp) movaps -0x20(%rsp), %xmm1 movaps -0x30(%rsp), %xmm0 movaps %xmm1, 0x100(%rsp) movaps %xmm0, 0xf0(%rsp) movaps 0x100(%rsp), %xmm0 movaps 0xf0(%rsp), %xmm1 punpckhdq %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x70(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0xa0(%rsp) movaps 0xa0(%rsp), %xmm0 movaps %xmm0, -0x80(%rsp) movaps -0x40(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x1a0(%rsp) movaps %xmm0, 0x190(%rsp) movaps 0x1a0(%rsp), %xmm0 movaps 0x190(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x10(%rsp), %rax movaps %xmm0, (%rax) movaps -0x40(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x220(%rsp) movaps %xmm0, 0x210(%rsp) movaps 0x220(%rsp), %xmm0 movaps 0x210(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x10(%rsp), %rax movaps %xmm0, 0x10(%rax) movaps -0x60(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 movaps 0x170(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x10(%rsp), %rax movaps %xmm0, 0x20(%rax) movaps -0x60(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x200(%rsp) movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm0 movaps 0x1f0(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x10(%rsp), %rax movaps %xmm0, 0x30(%rax) movaps -0x50(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x10(%rsp), %rax movaps %xmm0, 0x40(%rax) movaps -0x50(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x1e0(%rsp) movaps %xmm0, 0x1d0(%rsp) movaps 0x1e0(%rsp), %xmm0 movaps 0x1d0(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x10(%rsp), %rax movaps %xmm0, 0x50(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movq 0x10(%rsp), %rax movaps %xmm0, 0x60(%rax) movaps -0x70(%rsp), %xmm1 movaps -0x80(%rsp), %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 punpckhqdq %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1] movq 0x10(%rsp), %rax movdqa %xmm0, 0x70(%rax) addq $0x238, %rsp # imm = 0x238 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/aom_dsp/x86/transpose_sse2.h
av1_lowbd_fwd_txfm2d_16x8_sse2
void av1_lowbd_fwd_txfm2d_16x8_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[16], buf1[16]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X8]; const int txw_idx = get_txw_idx(TX_16X8); const int txh_idx = get_txh_idx(TX_16X8); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 16; const int height = 8; const transform_1d_sse2 col_txfm = col_txfm8x8_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type]; __m128i *buf; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); for (int i = 0; i < 2; i++) { if (ud_flip) { load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0, buf1 + 8 * i); } if (lr_flip) { buf = buf0; flip_buf_sse2(buf1, buf, width); } else { buf = buf1; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_rect_buffer_16bit_to_32bit_w8(buf, output, height, width); }
subq $0x278, %rsp # imm = 0x278 movb %cl, %al movq %rdi, 0x270(%rsp) movq %rsi, 0x268(%rsp) movl %edx, 0x264(%rsp) movb %al, 0x263(%rsp) movl %r8d, 0x25c(%rsp) leaq 0x1638fa(%rip), %rax # 0xba5b10 movq 0x40(%rax), %rax movq %rax, 0x48(%rsp) movl $0x8, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x8, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4dc9(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4dc9(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x10, 0x34(%rsp) movl $0x8, 0x30(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15c884(%rip), %rax # 0xb9eb20 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x263(%rsp), %eax movl %eax, %ecx leaq 0x15c9ea(%rip), %rax # 0xb9eca0 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) leaq 0x14(%rsp), %rsi leaq 0x10(%rsp), %rdx movzbl 0x263(%rsp), %edi callq 0xa3fd40 movl $0x0, 0xc(%rsp) cmpl $0x2, 0xc(%rsp) jge 0xa423d7 cmpl $0x0, 0x14(%rsp) je 0xa42322 movq 0x270(%rsp), %rdi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x8, %ecx callq 0xa40d70 jmp 0xa42352 movq 0x270(%rsp), %rdi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x264(%rsp), %esi leaq 0x150(%rsp), %rdx movl $0x8, %ecx callq 0xa40df0 leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x8, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x150(%rsp), %rdi leaq 0x150(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x150(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x8, %esi callq 0xa3ff20 leaq 0x150(%rsp), %rdi leaq 0x50(%rsp), %rsi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0xa422de cmpl $0x0, 0x10(%rsp) je 0xa42401 leaq 0x150(%rsp), %rax movq %rax, 0x18(%rsp) leaq 0x50(%rsp), %rdi movq 0x18(%rsp), %rsi movl $0x10, %edx callq 0xa40230 jmp 0xa4240b leaq 0x50(%rsp), %rax movq %rax, 0x18(%rsp) movq 0x20(%rsp), %rax movq 0x18(%rsp), %rdi movq 0x18(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x18(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x18(%rsp), %rdi movq 0x268(%rsp), %rsi movl $0x8, %edx movl $0x10, %ecx callq 0xa407b0 addq $0x278, %rsp # imm = 0x278 retq nop
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_16x16_sse2
void av1_lowbd_fwd_txfm2d_16x16_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[16], buf1[32]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X16]; const int txw_idx = get_txw_idx(TX_16X16); const int txh_idx = get_txh_idx(TX_16X16); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 16; const int height = 16; const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type]; int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); for (int i = 0; i < 2; i++) { if (ud_flip) { load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0, buf1 + 0 * width + 8 * i); transpose_16bit_8x8(buf0 + 8, buf1 + 1 * width + 8 * i); } for (int i = 0; i < 2; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + width * i, buf, width); } else { buf = buf1 + width * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } }
subq $0x378, %rsp # imm = 0x378 movb %cl, %al movq %rdi, 0x370(%rsp) movq %rsi, 0x368(%rsp) movl %edx, 0x364(%rsp) movb %al, 0x363(%rsp) movl %r8d, 0x35c(%rsp) leaq 0x16367a(%rip), %rax # 0xba5b10 movq 0x10(%rax), %rax movq %rax, 0x48(%rsp) movl $0x2, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x2, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4b49(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4b49(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x10, 0x34(%rsp) movl $0x10, 0x30(%rsp) movzbl 0x363(%rsp), %eax movl %eax, %ecx leaq 0x15c484(%rip), %rax # 0xb9e9a0 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x363(%rsp), %eax movl %eax, %ecx leaq 0x15c76a(%rip), %rax # 0xb9eca0 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0x363(%rsp), %edi callq 0xa3fd40 movl $0x0, 0x14(%rsp) cmpl $0x2, 0x14(%rsp) jge 0xa42687 cmpl $0x0, 0x1c(%rsp) je 0xa425a2 movq 0x370(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x364(%rsp), %esi leaq 0x250(%rsp), %rdx movl $0x10, %ecx callq 0xa40d70 jmp 0xa425d2 movq 0x370(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x364(%rsp), %esi leaq 0x250(%rsp), %rdx movl $0x10, %ecx callq 0xa40df0 leaq 0x250(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x250(%rsp), %rdi leaq 0x250(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x250(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x10, %esi callq 0xa3ff20 leaq 0x250(%rsp), %rdi leaq 0x50(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x250(%rsp), %rdi addq $0x80, %rdi leaq 0x50(%rsp), %rsi addq $0x100, %rsi # imm = 0x100 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa4255e movl $0x0, 0x10(%rsp) cmpl $0x2, 0x10(%rsp) jge 0xa42758 cmpl $0x0, 0x18(%rsp) je 0xa426d4 leaq 0x250(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x10(%rsp), %eax shll $0x4, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x10, %edx callq 0xa40230 jmp 0xa426ef leaq 0x50(%rsp), %rax movl 0x10(%rsp), %ecx shll $0x4, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0x368(%rsp), %rsi movl 0x10(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x10, %ecx movl %ecx, %edx callq 0xa40ad0 movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0xa4268f addq $0x378, %rsp # imm = 0x378 retq
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_16x32_sse2
void av1_lowbd_fwd_txfm2d_16x32_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[32], buf1[64]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X32]; const int txw_idx = get_txw_idx(TX_16X32); const int txh_idx = get_txh_idx(TX_16X32); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 16; const int height = 32; const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type]; if (col_txfm != NULL && row_txfm != NULL) { int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); for (int i = 0; i < 2; i++) { if (ud_flip) { load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0 + 0 * 8, buf1 + 0 * width + 8 * i); transpose_16bit_8x8(buf0 + 1 * 8, buf1 + 1 * width + 8 * i); transpose_16bit_8x8(buf0 + 2 * 8, buf1 + 2 * width + 8 * i); transpose_16bit_8x8(buf0 + 3 * 8, buf1 + 3 * width + 8 * i); } for (int i = 0; i < 4; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + width * i, buf, width); } else { buf = buf1 + width * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_rect_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } } else { av1_fwd_txfm2d_16x32_c(input, output, stride, tx_type, bd); } }
subq $0x678, %rsp # imm = 0x678 movb %cl, %al movq %rdi, 0x670(%rsp) movq %rsi, 0x668(%rsp) movl %edx, 0x664(%rsp) movb %al, 0x663(%rsp) movl %r8d, 0x65c(%rsp) leaq 0x16337a(%rip), %rax # 0xba5b10 movq 0x48(%rax), %rax movq %rax, 0x48(%rsp) movl $0x9, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x9, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4849(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4849(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x10, 0x34(%rsp) movl $0x20, 0x30(%rsp) movzbl 0x663(%rsp), %eax movl %eax, %ecx leaq 0x15c404(%rip), %rax # 0xb9ec20 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x663(%rsp), %eax movl %eax, %ecx leaq 0x15c46a(%rip), %rax # 0xb9eca0 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) cmpq $0x0, 0x28(%rsp) je 0xa42ad5 cmpq $0x0, 0x20(%rsp) je 0xa42ad5 leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0x663(%rsp), %edi callq 0xa3fd40 movl $0x0, 0x14(%rsp) cmpl $0x2, 0x14(%rsp) jge 0xa429ff cmpl $0x0, 0x1c(%rsp) je 0xa428ba movq 0x670(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x664(%rsp), %esi leaq 0x450(%rsp), %rdx movl $0x20, %ecx callq 0xa40d70 jmp 0xa428ea movq 0x670(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x664(%rsp), %esi leaq 0x450(%rsp), %rdx movl $0x20, %ecx callq 0xa40df0 leaq 0x450(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x20, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x450(%rsp), %rdi leaq 0x450(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x450(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x20, %esi callq 0xa3ff20 leaq 0x450(%rsp), %rdi leaq 0x50(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x450(%rsp), %rdi addq $0x80, %rdi leaq 0x50(%rsp), %rsi addq $0x100, %rsi # imm = 0x100 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x450(%rsp), %rdi addq $0x100, %rdi # imm = 0x100 leaq 0x50(%rsp), %rsi addq $0x200, %rsi # imm = 0x200 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x450(%rsp), %rdi addq $0x180, %rdi # imm = 0x180 leaq 0x50(%rsp), %rsi addq $0x300, %rsi # imm = 0x300 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa42876 movl $0x0, 0x10(%rsp) cmpl $0x4, 0x10(%rsp) jge 0xa42ad3 cmpl $0x0, 0x18(%rsp) je 0xa42a4c leaq 0x450(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x10(%rsp), %eax shll $0x4, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x10, %edx callq 0xa40230 jmp 0xa42a67 leaq 0x50(%rsp), %rax movl 0x10(%rsp), %ecx shll $0x4, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x10, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0x668(%rsp), %rsi movl 0x10(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x20, %edx movl $0x10, %ecx callq 0xa407b0 movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0xa42a07 jmp 0xa42b03 movq 0x670(%rsp), %rdi movq 0x668(%rsp), %rsi movl 0x664(%rsp), %edx movb 0x663(%rsp), %al movl 0x65c(%rsp), %r8d movzbl %al, %ecx callq 0x5fc950 addq $0x678, %rsp # imm = 0x678 retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_32x8_sse2
void av1_lowbd_fwd_txfm2d_32x8_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[32], buf1[32]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X8]; const int txw_idx = get_txw_idx(TX_32X8); const int txh_idx = get_txh_idx(TX_32X8); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 32; const int height = 8; const transform_1d_sse2 col_txfm = col_txfm8x8_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x32_arr[tx_type]; if (col_txfm != NULL && row_txfm != NULL) { int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); for (int i = 0; i < 4; i++) { if (ud_flip) { load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0, buf1 + 0 * width + 8 * i); } for (int i = 0; i < 1; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + width * i, buf, width); } else { buf = buf1 + width * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } } else { av1_fwd_txfm2d_32x16_c(input, output, stride, tx_type, bd); } }
subq $0x478, %rsp # imm = 0x478 movb %cl, %al movq %rdi, 0x470(%rsp) movq %rsi, 0x468(%rsp) movl %edx, 0x464(%rsp) movb %al, 0x463(%rsp) movl %r8d, 0x45c(%rsp) leaq 0x162fca(%rip), %rax # 0xba5b10 movq 0x80(%rax), %rax movq %rax, 0x48(%rsp) movl $0x10, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x10, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4496(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd4496(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x20, 0x34(%rsp) movl $0x8, 0x30(%rsp) movzbl 0x463(%rsp), %eax movl %eax, %ecx leaq 0x15bf51(%rip), %rax # 0xb9eb20 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0x463(%rsp), %eax movl %eax, %ecx leaq 0x15c137(%rip), %rax # 0xb9ed20 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) cmpq $0x0, 0x28(%rsp) je 0xa42df8 cmpq $0x0, 0x20(%rsp) je 0xa42df8 leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0x463(%rsp), %edi callq 0xa3fd40 movl $0x0, 0x14(%rsp) cmpl $0x4, 0x14(%rsp) jge 0xa42d22 cmpl $0x0, 0x1c(%rsp) je 0xa42c6d movq 0x470(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x464(%rsp), %esi leaq 0x250(%rsp), %rdx movl $0x8, %ecx callq 0xa40d70 jmp 0xa42c9d movq 0x470(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0x464(%rsp), %esi leaq 0x250(%rsp), %rdx movl $0x8, %ecx callq 0xa40df0 leaq 0x250(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x8, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x250(%rsp), %rdi leaq 0x250(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x250(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x8, %esi callq 0xa3ff20 leaq 0x250(%rsp), %rdi leaq 0x50(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa42c29 movl $0x0, 0x10(%rsp) cmpl $0x1, 0x10(%rsp) jge 0xa42df6 cmpl $0x0, 0x18(%rsp) je 0xa42d6f leaq 0x250(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x10(%rsp), %eax shll $0x5, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x20, %edx callq 0xa40230 jmp 0xa42d8a leaq 0x50(%rsp), %rax movl 0x10(%rsp), %ecx shll $0x5, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x20, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0x468(%rsp), %rsi movl 0x10(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x8, %edx movl $0x20, %ecx callq 0xa40ad0 movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0xa42d2a jmp 0xa42e26 movq 0x470(%rsp), %rdi movq 0x468(%rsp), %rsi movl 0x464(%rsp), %edx movb 0x463(%rsp), %al movl 0x45c(%rsp), %r8d movzbl %al, %ecx callq 0x5fc9d0 addq $0x478, %rsp # imm = 0x478 retq nop
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_32x32_sse2
void av1_lowbd_fwd_txfm2d_32x32_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; __m128i buf0[32], buf1[128]; const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X32]; const int txw_idx = get_txw_idx(TX_32X32); const int txh_idx = get_txh_idx(TX_32X32); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = 32; const int height = 32; const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type]; const transform_1d_sse2 row_txfm = row_txfm8x32_arr[tx_type]; if (col_txfm != NULL && row_txfm != NULL) { int ud_flip, lr_flip; get_flip_cfg(tx_type, &ud_flip, &lr_flip); for (int i = 0; i < 4; i++) { if (ud_flip) { load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height); } else { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); } round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); transpose_16bit_8x8(buf0 + 0 * 8, buf1 + 0 * width + 8 * i); transpose_16bit_8x8(buf0 + 1 * 8, buf1 + 1 * width + 8 * i); transpose_16bit_8x8(buf0 + 2 * 8, buf1 + 2 * width + 8 * i); transpose_16bit_8x8(buf0 + 3 * 8, buf1 + 3 * width + 8 * i); } for (int i = 0; i < 4; i++) { __m128i *buf; if (lr_flip) { buf = buf0; flip_buf_sse2(buf1 + width * i, buf, width); } else { buf = buf1 + width * i; } row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width); } } else { av1_fwd_txfm2d_32x32_c(input, output, stride, tx_type, bd); } }
subq $0xa78, %rsp # imm = 0xA78 movb %cl, %al movq %rdi, 0xa70(%rsp) movq %rsi, 0xa68(%rsp) movl %edx, 0xa64(%rsp) movb %al, 0xa63(%rsp) movl %r8d, 0xa5c(%rsp) leaq 0x16295a(%rip), %rax # 0xba5b10 movq 0x18(%rax), %rax movq %rax, 0x48(%rsp) movl $0x3, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x3, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd3e29(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd3e29(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl $0x20, 0x34(%rsp) movl $0x20, 0x30(%rsp) movzbl 0xa63(%rsp), %eax movl %eax, %ecx leaq 0x15b9e4(%rip), %rax # 0xb9ec20 movq (%rax,%rcx,8), %rax movq %rax, 0x28(%rsp) movzbl 0xa63(%rsp), %eax movl %eax, %ecx leaq 0x15baca(%rip), %rax # 0xb9ed20 movq (%rax,%rcx,8), %rax movq %rax, 0x20(%rsp) cmpq $0x0, 0x28(%rsp) je 0xa434f2 cmpq $0x0, 0x20(%rsp) je 0xa434f2 leaq 0x1c(%rsp), %rsi leaq 0x18(%rsp), %rdx movzbl 0xa63(%rsp), %edi callq 0xa3fd40 movl $0x0, 0x14(%rsp) cmpl $0x4, 0x14(%rsp) jge 0xa4341f cmpl $0x0, 0x1c(%rsp) je 0xa432da movq 0xa70(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0xa64(%rsp), %esi leaq 0x850(%rsp), %rdx movl $0x20, %ecx callq 0xa40d70 jmp 0xa4330a movq 0xa70(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0xa64(%rsp), %esi leaq 0x850(%rsp), %rdx movl $0x20, %ecx callq 0xa40df0 leaq 0x850(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x20, %esi callq 0xa3ff20 movq 0x28(%rsp), %rax leaq 0x850(%rsp), %rdi leaq 0x850(%rsp), %rsi movl 0x3c(%rsp), %ecx movsbl %cl, %edx callq *%rax leaq 0x850(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x20, %esi callq 0xa3ff20 leaq 0x850(%rsp), %rdi leaq 0x50(%rsp), %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x850(%rsp), %rdi addq $0x80, %rdi leaq 0x50(%rsp), %rsi addq $0x200, %rsi # imm = 0x200 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x850(%rsp), %rdi addq $0x100, %rdi # imm = 0x100 leaq 0x50(%rsp), %rsi addq $0x400, %rsi # imm = 0x400 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 leaq 0x850(%rsp), %rdi addq $0x180, %rdi # imm = 0x180 leaq 0x50(%rsp), %rsi addq $0x600, %rsi # imm = 0x600 movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa43296 movl $0x0, 0x10(%rsp) cmpl $0x4, 0x10(%rsp) jge 0xa434f0 cmpl $0x0, 0x18(%rsp) je 0xa4346c leaq 0x850(%rsp), %rax movq %rax, 0x8(%rsp) leaq 0x50(%rsp), %rdi movl 0x10(%rsp), %eax shll $0x5, %eax cltq shlq $0x4, %rax addq %rax, %rdi movq 0x8(%rsp), %rsi movl $0x20, %edx callq 0xa40230 jmp 0xa43487 leaq 0x50(%rsp), %rax movl 0x10(%rsp), %ecx shll $0x5, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, 0x8(%rsp) movq 0x20(%rsp), %rax movq 0x8(%rsp), %rdi movq 0x8(%rsp), %rsi movl 0x38(%rsp), %ecx movsbl %cl, %edx callq *%rax movq 0x8(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x20, %esi callq 0xa3ff20 movq 0x8(%rsp), %rdi movq 0xa68(%rsp), %rsi movl 0x10(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x20, %ecx movl %ecx, %edx callq 0xa40ad0 movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0xa43427 jmp 0xa43520 movq 0xa70(%rsp), %rdi movq 0xa68(%rsp), %rsi movl 0xa64(%rsp), %edx movb 0xa63(%rsp), %al movl 0xa5c(%rsp), %r8d movzbl %al, %ecx callq 0x5fcdd0 addq $0xa78, %rsp # imm = 0xA78 retq nopl (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
av1_lowbd_fwd_txfm2d_64x16_sse2
void av1_lowbd_fwd_txfm2d_64x16_sse2(const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd) { (void)bd; (void)tx_type; assert(tx_type == DCT_DCT); const TX_SIZE tx_size = TX_64X16; __m128i buf0[64], buf1[128]; const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size]; const int txw_idx = get_txw_idx(tx_size); const int txh_idx = get_txh_idx(tx_size); const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx]; const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx]; const int width = tx_size_wide[tx_size]; const int height = tx_size_high[tx_size]; const transform_1d_sse2 col_txfm = fdct8x16_new_sse2; const transform_1d_sse2 row_txfm = av1_fdct8x64_new_sse2; const int width_div8 = (width >> 3); const int height_div8 = (height >> 3); for (int i = 0; i < width_div8; i++) { load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height); round_shift_16bit(buf0, height, shift[0]); col_txfm(buf0, buf0, cos_bit_col); round_shift_16bit(buf0, height, shift[1]); for (int j = 0; j < height_div8; ++j) { transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i); } } for (int i = 0; i < height_div8; i++) { __m128i *buf = buf1 + width * i; row_txfm(buf, buf, cos_bit_row); round_shift_16bit(buf, width, shift[2]); store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, 16, 32); } // Zero out the bottom 16x32 area. memset(output + 16 * 32, 0, 16 * 32 * sizeof(*output)); }
subq $0xc78, %rsp # imm = 0xC78 movb %cl, %al movq %rdi, 0xc70(%rsp) movq %rsi, 0xc68(%rsp) movl %edx, 0xc64(%rsp) movb %al, 0xc63(%rsp) movl %r8d, 0xc5c(%rsp) movb $0x12, 0xc5b(%rsp) leaq 0x1625a2(%rip), %rax # 0xba5b10 movq 0x90(%rax), %rax movq %rax, 0x48(%rsp) movl $0x12, %edi callq 0xa3fd00 movl %eax, 0x44(%rsp) movl $0x12, %edi callq 0xa3fd20 movl %eax, 0x40(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd3a6e(%rip), %rax # 0xb17010 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x3c(%rsp) movslq 0x44(%rsp), %rcx leaq 0xd3a6e(%rip), %rax # 0xb17030 imulq $0x5, %rcx, %rcx addq %rcx, %rax movslq 0x40(%rsp), %rcx movsbl (%rax,%rcx), %eax movl %eax, 0x38(%rsp) movl 0xd85cc(%rip), %eax # 0xb1bba8 movl %eax, 0x34(%rsp) movl 0xd8612(%rip), %eax # 0xb1bbf8 movl %eax, 0x30(%rsp) leaq 0x1df(%rip), %rax # 0xa437d0 movq %rax, 0x28(%rsp) leaq -0x2197d(%rip), %rax # 0xa21c80 movq %rax, 0x20(%rsp) movl $0x8, 0x1c(%rsp) movl $0x2, 0x18(%rsp) movl $0x0, 0x14(%rsp) cmpl $0x8, 0x14(%rsp) jge 0xa43716 movq 0xc70(%rsp), %rdi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq %rax addq %rax, %rdi movl 0xc64(%rsp), %esi leaq 0x850(%rsp), %rdx movl $0x10, %ecx callq 0xa40df0 leaq 0x850(%rsp), %rdi movq 0x48(%rsp), %rax movsbl (%rax), %edx movl $0x10, %esi callq 0xa3ff20 leaq 0x850(%rsp), %rdi leaq 0x850(%rsp), %rsi movl 0x3c(%rsp), %eax movsbl %al, %edx callq 0xa437d0 leaq 0x850(%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x1(%rax), %edx movl $0x10, %esi callq 0xa3ff20 movl $0x0, 0x10(%rsp) cmpl $0x2, 0x10(%rsp) jge 0xa43704 leaq 0x850(%rsp), %rdi movl 0x10(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rdi leaq 0x50(%rsp), %rsi movl 0x10(%rsp), %eax shll $0x6, %eax cltq shlq $0x4, %rax addq %rax, %rsi movl 0x14(%rsp), %eax shll $0x3, %eax cltq shlq $0x4, %rax addq %rax, %rsi callq 0xa40e60 movl 0x10(%rsp), %eax addl $0x1, %eax movl %eax, 0x10(%rsp) jmp 0xa436ae jmp 0xa43706 movl 0x14(%rsp), %eax addl $0x1, %eax movl %eax, 0x14(%rsp) jmp 0xa4361a movl $0x0, 0xc(%rsp) cmpl $0x2, 0xc(%rsp) jge 0xa437a9 leaq 0x50(%rsp), %rax movl 0xc(%rsp), %ecx shll $0x6, %ecx movslq %ecx, %rcx shlq $0x4, %rcx addq %rcx, %rax movq %rax, (%rsp) movq (%rsp), %rdi movq (%rsp), %rsi movl 0x38(%rsp), %eax movsbl %al, %edx callq 0xa21c80 movq (%rsp), %rdi movq 0x48(%rsp), %rax movsbl 0x2(%rax), %edx movl $0x40, %esi callq 0xa3ff20 movq (%rsp), %rdi movq 0xc68(%rsp), %rsi movl 0xc(%rsp), %eax shll $0x3, %eax cltq shlq $0x2, %rax addq %rax, %rsi movl $0x10, %edx movl $0x20, %ecx callq 0xa40ad0 movl 0xc(%rsp), %eax addl $0x1, %eax movl %eax, 0xc(%rsp) jmp 0xa4371e movq 0xc68(%rsp), %rdi addq $0x800, %rdi # imm = 0x800 xorl %esi, %esi movl $0x800, %edx # imm = 0x800 callq 0x18280 addq $0xc78, %rsp # imm = 0xC78 retq nopl (%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
fdct4x4_new_sse2
static void fdct4x4_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { const int32_t *cospi = cospi_arr(cos_bit); const __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]); const __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]); const __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]); const __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]); const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1)); __m128i u[4], v[4]; u[0] = _mm_unpacklo_epi16(input[0], input[1]); u[1] = _mm_unpacklo_epi16(input[3], input[2]); v[0] = _mm_add_epi16(u[0], u[1]); v[1] = _mm_sub_epi16(u[0], u[1]); u[0] = _mm_madd_epi16(v[0], cospi_p32_p32); // 0 u[1] = _mm_madd_epi16(v[0], cospi_p32_m32); // 2 u[2] = _mm_madd_epi16(v[1], cospi_p16_p48); // 1 u[3] = _mm_madd_epi16(v[1], cospi_p48_m16); // 3 v[0] = _mm_add_epi32(u[0], __rounding); v[1] = _mm_add_epi32(u[1], __rounding); v[2] = _mm_add_epi32(u[2], __rounding); v[3] = _mm_add_epi32(u[3], __rounding); u[0] = _mm_srai_epi32(v[0], cos_bit); u[1] = _mm_srai_epi32(v[1], cos_bit); u[2] = _mm_srai_epi32(v[2], cos_bit); u[3] = _mm_srai_epi32(v[3], cos_bit); output[0] = _mm_packs_epi32(u[0], u[1]); output[1] = _mm_packs_epi32(u[2], u[3]); output[2] = _mm_srli_si128(output[0], 8); output[3] = _mm_srli_si128(output[1], 8); }
subq $0x3f8, %rsp # imm = 0x3F8 movb %dl, %al movq %rdi, 0xf0(%rsp) movq %rsi, 0xe8(%rsp) movb %al, 0xe7(%rsp) movsbl 0xe7(%rsp), %edi callq 0xa21c60 movq %rax, 0xd8(%rsp) movq 0xd8(%rsp), %rax movzwl 0x80(%rax), %eax movl %eax, %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x10c(%rsp) movl 0x10c(%rsp), %eax movl %eax, 0x32c(%rsp) movl %eax, 0x328(%rsp) movl %eax, 0x324(%rsp) movl %eax, 0x320(%rsp) movd 0x32c(%rsp), %xmm0 movd 0x328(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x324(%rsp), %xmm2 movd 0x320(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x310(%rsp) movaps 0x310(%rsp), %xmm0 movaps %xmm0, 0xc0(%rsp) movq 0xd8(%rsp), %rax movl 0x80(%rax), %ecx movw %cx, %ax movzwl %ax, %eax shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x108(%rsp) movl 0x108(%rsp), %eax movl %eax, 0x34c(%rsp) movl %eax, 0x348(%rsp) movl %eax, 0x344(%rsp) movl %eax, 0x340(%rsp) movd 0x34c(%rsp), %xmm0 movd 0x348(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x344(%rsp), %xmm2 movd 0x340(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x330(%rsp) movaps 0x330(%rsp), %xmm0 movaps %xmm0, 0xb0(%rsp) movq 0xd8(%rsp), %rcx movzwl 0x40(%rcx), %eax movl 0xc0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x104(%rsp) movl 0x104(%rsp), %eax movl %eax, 0x36c(%rsp) movl %eax, 0x368(%rsp) movl %eax, 0x364(%rsp) movl %eax, 0x360(%rsp) movd 0x36c(%rsp), %xmm0 movd 0x368(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x364(%rsp), %xmm2 movd 0x360(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x350(%rsp) movaps 0x350(%rsp), %xmm0 movaps %xmm0, 0xa0(%rsp) movq 0xd8(%rsp), %rcx movzwl 0xc0(%rcx), %eax movl 0x40(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x100(%rsp) movl 0x100(%rsp), %eax movl %eax, 0x38c(%rsp) movl %eax, 0x388(%rsp) movl %eax, 0x384(%rsp) movl %eax, 0x380(%rsp) movd 0x38c(%rsp), %xmm0 movd 0x388(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x384(%rsp), %xmm2 movd 0x380(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x370(%rsp) movaps 0x370(%rsp), %xmm0 movaps %xmm0, 0x90(%rsp) movb 0xe7(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0xfc(%rsp) movl 0xfc(%rsp), %eax movl %eax, 0x3ac(%rsp) movl %eax, 0x3a8(%rsp) movl %eax, 0x3a4(%rsp) movl %eax, 0x3a0(%rsp) movd 0x3ac(%rsp), %xmm0 movd 0x3a8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x3a4(%rsp), %xmm2 movd 0x3a0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x390(%rsp) movaps 0x390(%rsp), %xmm0 movaps %xmm0, 0x80(%rsp) movq 0xf0(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x140(%rsp) movaps %xmm0, 0x130(%rsp) movaps 0x140(%rsp), %xmm0 movaps 0x130(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x40(%rsp) movq 0xf0(%rsp), %rax movaps 0x20(%rax), %xmm0 movaps 0x30(%rax), %xmm1 movaps %xmm1, 0x120(%rsp) movaps %xmm0, 0x110(%rsp) movaps 0x120(%rsp), %xmm0 movaps 0x110(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x50(%rsp) movaps 0x40(%rsp), %xmm1 movaps 0x50(%rsp), %xmm0 movaps %xmm1, 0x3c0(%rsp) movaps %xmm0, 0x3b0(%rsp) movaps 0x3c0(%rsp), %xmm0 movaps 0x3b0(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x40(%rsp), %xmm1 movaps 0x50(%rsp), %xmm0 movaps %xmm1, 0x3e0(%rsp) movaps %xmm0, 0x3d0(%rsp) movaps 0x3e0(%rsp), %xmm0 movaps 0x3d0(%rsp), %xmm1 psubw %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps (%rsp), %xmm1 movaps 0xc0(%rsp), %xmm0 movaps %xmm1, 0x1c0(%rsp) movaps %xmm0, 0x1b0(%rsp) movaps 0x1c0(%rsp), %xmm0 movaps 0x1b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps (%rsp), %xmm1 movaps 0xb0(%rsp), %xmm0 movaps %xmm1, 0x1a0(%rsp) movaps %xmm0, 0x190(%rsp) movaps 0x1a0(%rsp), %xmm0 movaps 0x190(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x50(%rsp) movaps 0x10(%rsp), %xmm1 movaps 0xa0(%rsp), %xmm0 movaps %xmm1, 0x180(%rsp) movaps %xmm0, 0x170(%rsp) movaps 0x180(%rsp), %xmm0 movaps 0x170(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x60(%rsp) movaps 0x10(%rsp), %xmm1 movaps 0x90(%rsp), %xmm0 movaps %xmm1, 0x160(%rsp) movaps %xmm0, 0x150(%rsp) movaps 0x160(%rsp), %xmm0 movaps 0x150(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x70(%rsp) movaps 0x40(%rsp), %xmm1 movaps 0x80(%rsp), %xmm0 movaps %xmm1, 0x240(%rsp) movaps %xmm0, 0x230(%rsp) movaps 0x240(%rsp), %xmm0 movaps 0x230(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x50(%rsp), %xmm1 movaps 0x80(%rsp), %xmm0 movaps %xmm1, 0x220(%rsp) movaps %xmm0, 0x210(%rsp) movaps 0x220(%rsp), %xmm0 movaps 0x210(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0x60(%rsp), %xmm1 movaps 0x80(%rsp), %xmm0 movaps %xmm1, 0x200(%rsp) movaps %xmm0, 0x1f0(%rsp) movaps 0x200(%rsp), %xmm0 movaps 0x1f0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x20(%rsp) movaps 0x70(%rsp), %xmm1 movaps 0x80(%rsp), %xmm0 movaps %xmm1, 0x1e0(%rsp) movaps %xmm0, 0x1d0(%rsp) movaps 0x1e0(%rsp), %xmm0 movaps 0x1d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps (%rsp), %xmm0 movsbl 0xe7(%rsp), %eax movaps %xmm0, 0x2c0(%rsp) movl %eax, 0x2bc(%rsp) movaps 0x2c0(%rsp), %xmm0 movd 0x2bc(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps 0x10(%rsp), %xmm0 movsbl 0xe7(%rsp), %eax movaps %xmm0, 0x2a0(%rsp) movl %eax, 0x29c(%rsp) movaps 0x2a0(%rsp), %xmm0 movd 0x29c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0x50(%rsp) movaps 0x20(%rsp), %xmm0 movsbl 0xe7(%rsp), %eax movaps %xmm0, 0x280(%rsp) movl %eax, 0x27c(%rsp) movaps 0x280(%rsp), %xmm0 movd 0x27c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0x60(%rsp) movaps 0x30(%rsp), %xmm0 movsbl 0xe7(%rsp), %eax movaps %xmm0, 0x260(%rsp) movl %eax, 0x25c(%rsp) movaps 0x260(%rsp), %xmm0 movd 0x25c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0x70(%rsp) movaps 0x40(%rsp), %xmm1 movaps 0x50(%rsp), %xmm0 movaps %xmm1, 0x300(%rsp) movaps %xmm0, 0x2f0(%rsp) movaps 0x300(%rsp), %xmm0 movaps 0x2f0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xe8(%rsp), %rax movaps %xmm0, (%rax) movaps 0x60(%rsp), %xmm1 movaps 0x70(%rsp), %xmm0 movaps %xmm1, 0x2e0(%rsp) movaps %xmm0, 0x2d0(%rsp) movaps 0x2e0(%rsp), %xmm0 movaps 0x2d0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xe8(%rsp), %rax movaps %xmm0, 0x10(%rax) movq 0xe8(%rsp), %rax movaps (%rax), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movaps %xmm0, 0x20(%rax) movq 0xe8(%rsp), %rax movaps 0x10(%rax), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movq 0xe8(%rsp), %rax movdqa %xmm0, 0x30(%rax) addq $0x3f8, %rsp # imm = 0x3F8 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
fadst4x4_new_sse2
static void fadst4x4_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { const int32_t *sinpi = sinpi_arr(cos_bit); const __m128i sinpi_p01_p02 = pair_set_epi16(sinpi[1], sinpi[2]); const __m128i sinpi_p04_m01 = pair_set_epi16(sinpi[4], -sinpi[1]); const __m128i sinpi_p03_p04 = pair_set_epi16(sinpi[3], sinpi[4]); const __m128i sinpi_m03_p02 = pair_set_epi16(-sinpi[3], sinpi[2]); const __m128i sinpi_p03_p03 = _mm_set1_epi16((int16_t)sinpi[3]); const __m128i __zero = _mm_setzero_si128(); const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1)); const __m128i in7 = _mm_add_epi16(input[0], input[1]); __m128i u[8], v[8]; u[0] = _mm_unpacklo_epi16(input[0], input[1]); u[1] = _mm_unpacklo_epi16(input[2], input[3]); u[2] = _mm_unpacklo_epi16(in7, __zero); u[3] = _mm_unpacklo_epi16(input[2], __zero); u[4] = _mm_unpacklo_epi16(input[3], __zero); v[0] = _mm_madd_epi16(u[0], sinpi_p01_p02); // s0 + s2 v[1] = _mm_madd_epi16(u[1], sinpi_p03_p04); // s4 + s5 v[2] = _mm_madd_epi16(u[2], sinpi_p03_p03); // x1 v[3] = _mm_madd_epi16(u[0], sinpi_p04_m01); // s1 - s3 v[4] = _mm_madd_epi16(u[1], sinpi_m03_p02); // -s4 + s6 v[5] = _mm_madd_epi16(u[3], sinpi_p03_p03); // s4 v[6] = _mm_madd_epi16(u[4], sinpi_p03_p03); u[0] = _mm_add_epi32(v[0], v[1]); u[1] = _mm_sub_epi32(v[2], v[6]); u[2] = _mm_add_epi32(v[3], v[4]); u[3] = _mm_sub_epi32(u[2], u[0]); u[4] = _mm_slli_epi32(v[5], 2); u[5] = _mm_sub_epi32(u[4], v[5]); u[6] = _mm_add_epi32(u[3], u[5]); v[0] = _mm_add_epi32(u[0], __rounding); v[1] = _mm_add_epi32(u[1], __rounding); v[2] = _mm_add_epi32(u[2], __rounding); v[3] = _mm_add_epi32(u[6], __rounding); u[0] = _mm_srai_epi32(v[0], cos_bit); u[1] = _mm_srai_epi32(v[1], cos_bit); u[2] = _mm_srai_epi32(v[2], cos_bit); u[3] = _mm_srai_epi32(v[3], cos_bit); output[0] = _mm_packs_epi32(u[0], u[2]); output[1] = _mm_packs_epi32(u[1], u[3]); output[2] = _mm_srli_si128(output[0], 8); output[3] = _mm_srli_si128(output[1], 8); }
subq $0x668, %rsp # imm = 0x668 movb %dl, %al movq %rdi, 0x1a0(%rsp) movq %rsi, 0x198(%rsp) movb %al, 0x197(%rsp) movsbl 0x197(%rsp), %edi callq 0xa49e40 movq %rax, 0x188(%rsp) movq 0x188(%rsp), %rcx movzwl 0x4(%rcx), %eax movl 0x8(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x1bc(%rsp) movl 0x1bc(%rsp), %eax movl %eax, 0x4fc(%rsp) movl %eax, 0x4f8(%rsp) movl %eax, 0x4f4(%rsp) movl %eax, 0x4f0(%rsp) movd 0x4fc(%rsp), %xmm0 movd 0x4f8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x4f4(%rsp), %xmm2 movd 0x4f0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x4e0(%rsp) movaps 0x4e0(%rsp), %xmm0 movaps %xmm0, 0x170(%rsp) movq 0x188(%rsp), %rcx movzwl 0x10(%rcx), %eax movl 0x4(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x1b8(%rsp) movl 0x1b8(%rsp), %eax movl %eax, 0x51c(%rsp) movl %eax, 0x518(%rsp) movl %eax, 0x514(%rsp) movl %eax, 0x510(%rsp) movd 0x51c(%rsp), %xmm0 movd 0x518(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x514(%rsp), %xmm2 movd 0x510(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x500(%rsp) movaps 0x500(%rsp), %xmm0 movaps %xmm0, 0x160(%rsp) movq 0x188(%rsp), %rcx movzwl 0xc(%rcx), %eax movl 0x10(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x1b4(%rsp) movl 0x1b4(%rsp), %eax movl %eax, 0x53c(%rsp) movl %eax, 0x538(%rsp) movl %eax, 0x534(%rsp) movl %eax, 0x530(%rsp) movd 0x53c(%rsp), %xmm0 movd 0x538(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x534(%rsp), %xmm2 movd 0x530(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x520(%rsp) movaps 0x520(%rsp), %xmm0 movaps %xmm0, 0x150(%rsp) movq 0x188(%rsp), %rax movl 0x8(%rax), %ecx movl 0xc(%rax), %eax negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x1b0(%rsp) movl 0x1b0(%rsp), %eax movl %eax, 0x55c(%rsp) movl %eax, 0x558(%rsp) movl %eax, 0x554(%rsp) movl %eax, 0x550(%rsp) movd 0x55c(%rsp), %xmm0 movd 0x558(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x554(%rsp), %xmm2 movd 0x550(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x540(%rsp) movaps 0x540(%rsp), %xmm0 movaps %xmm0, 0x140(%rsp) movq 0x188(%rsp), %rax movw 0xc(%rax), %ax movw %ax, 0x5ae(%rsp) movw 0x5ae(%rsp), %ax movw %ax, 0x666(%rsp) movw %ax, 0x664(%rsp) movw %ax, 0x662(%rsp) movw %ax, 0x660(%rsp) movw %ax, 0x65e(%rsp) movw %ax, 0x65c(%rsp) movw %ax, 0x65a(%rsp) movw %ax, 0x658(%rsp) movzwl 0x666(%rsp), %eax movd %eax, %xmm1 movzwl 0x664(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x662(%rsp), %eax movd %eax, %xmm2 movzwl 0x660(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x65e(%rsp), %eax movd %eax, %xmm0 movzwl 0x65c(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x65a(%rsp), %eax movd %eax, %xmm3 movzwl 0x658(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x640(%rsp) movaps 0x640(%rsp), %xmm0 movaps %xmm0, 0x130(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x5b0(%rsp) movaps 0x5b0(%rsp), %xmm0 movaps %xmm0, 0x120(%rsp) movb 0x197(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0x1ac(%rsp) movl 0x1ac(%rsp), %eax movl %eax, 0x57c(%rsp) movl %eax, 0x578(%rsp) movl %eax, 0x574(%rsp) movl %eax, 0x570(%rsp) movd 0x57c(%rsp), %xmm0 movd 0x578(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x574(%rsp), %xmm2 movd 0x570(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x560(%rsp) movaps 0x560(%rsp), %xmm0 movaps %xmm0, 0x110(%rsp) movq 0x1a0(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x590(%rsp) movaps %xmm0, 0x580(%rsp) movaps 0x590(%rsp), %xmm0 movaps 0x580(%rsp), %xmm1 paddw %xmm1, %xmm0 movaps %xmm0, 0x100(%rsp) movq 0x1a0(%rsp), %rax movaps (%rax), %xmm1 movaps 0x10(%rax), %xmm0 movaps %xmm1, 0x250(%rsp) movaps %xmm0, 0x240(%rsp) movaps 0x250(%rsp), %xmm0 movaps 0x240(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x80(%rsp) movq 0x1a0(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x30(%rax), %xmm0 movaps %xmm1, 0x230(%rsp) movaps %xmm0, 0x220(%rsp) movaps 0x230(%rsp), %xmm0 movaps 0x220(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x90(%rsp) movaps 0x100(%rsp), %xmm1 movaps 0x120(%rsp), %xmm0 movaps %xmm1, 0x210(%rsp) movaps %xmm0, 0x200(%rsp) movaps 0x210(%rsp), %xmm0 movaps 0x200(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0xa0(%rsp) movq 0x1a0(%rsp), %rax movaps 0x20(%rax), %xmm1 movaps 0x120(%rsp), %xmm0 movaps %xmm1, 0x1f0(%rsp) movaps %xmm0, 0x1e0(%rsp) movaps 0x1f0(%rsp), %xmm0 movaps 0x1e0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0xb0(%rsp) movq 0x1a0(%rsp), %rax movaps 0x30(%rax), %xmm1 movaps 0x120(%rsp), %xmm0 movaps %xmm1, 0x1d0(%rsp) movaps %xmm0, 0x1c0(%rsp) movaps 0x1d0(%rsp), %xmm0 movaps 0x1c0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0xc0(%rsp) movaps 0x80(%rsp), %xmm1 movaps 0x170(%rsp), %xmm0 movaps %xmm1, 0x330(%rsp) movaps %xmm0, 0x320(%rsp) movaps 0x330(%rsp), %xmm0 movaps 0x320(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x90(%rsp), %xmm1 movaps 0x150(%rsp), %xmm0 movaps %xmm1, 0x310(%rsp) movaps %xmm0, 0x300(%rsp) movaps 0x310(%rsp), %xmm0 movaps 0x300(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0xa0(%rsp), %xmm1 movaps 0x130(%rsp), %xmm0 movaps %xmm1, 0x2f0(%rsp) movaps %xmm0, 0x2e0(%rsp) movaps 0x2f0(%rsp), %xmm0 movaps 0x2e0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x20(%rsp) movaps 0x80(%rsp), %xmm1 movaps 0x160(%rsp), %xmm0 movaps %xmm1, 0x2d0(%rsp) movaps %xmm0, 0x2c0(%rsp) movaps 0x2d0(%rsp), %xmm0 movaps 0x2c0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps 0x90(%rsp), %xmm1 movaps 0x140(%rsp), %xmm0 movaps %xmm1, 0x2b0(%rsp) movaps %xmm0, 0x2a0(%rsp) movaps 0x2b0(%rsp), %xmm0 movaps 0x2a0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x40(%rsp) movaps 0xb0(%rsp), %xmm1 movaps 0x130(%rsp), %xmm0 movaps %xmm1, 0x290(%rsp) movaps %xmm0, 0x280(%rsp) movaps 0x290(%rsp), %xmm0 movaps 0x280(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x50(%rsp) movaps 0xc0(%rsp), %xmm1 movaps 0x130(%rsp), %xmm0 movaps %xmm1, 0x270(%rsp) movaps %xmm0, 0x260(%rsp) movaps 0x270(%rsp), %xmm0 movaps 0x260(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movaps %xmm0, 0x60(%rsp) movaps (%rsp), %xmm1 movaps 0x10(%rsp), %xmm0 movaps %xmm1, 0x410(%rsp) movaps %xmm0, 0x400(%rsp) movaps 0x410(%rsp), %xmm0 movaps 0x400(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x80(%rsp) movaps 0x20(%rsp), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0x610(%rsp) movaps %xmm0, 0x600(%rsp) movaps 0x610(%rsp), %xmm0 movaps 0x600(%rsp), %xmm1 psubd %xmm1, %xmm0 movaps %xmm0, 0x90(%rsp) movaps 0x30(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x3f0(%rsp) movaps %xmm0, 0x3e0(%rsp) movaps 0x3f0(%rsp), %xmm0 movaps 0x3e0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0xa0(%rsp) movaps 0x80(%rsp), %xmm0 movaps 0xa0(%rsp), %xmm1 movaps %xmm1, 0x5f0(%rsp) movaps %xmm0, 0x5e0(%rsp) movaps 0x5f0(%rsp), %xmm0 movaps 0x5e0(%rsp), %xmm1 psubd %xmm1, %xmm0 movaps %xmm0, 0xb0(%rsp) movaps 0x50(%rsp), %xmm0 movaps %xmm0, 0x630(%rsp) movl $0x2, 0x62c(%rsp) movaps 0x630(%rsp), %xmm0 movd 0x62c(%rsp), %xmm1 pslld %xmm1, %xmm0 movaps %xmm0, 0xc0(%rsp) movaps 0xc0(%rsp), %xmm1 movaps 0x50(%rsp), %xmm0 movaps %xmm1, 0x5d0(%rsp) movaps %xmm0, 0x5c0(%rsp) movaps 0x5d0(%rsp), %xmm0 movaps 0x5c0(%rsp), %xmm1 psubd %xmm1, %xmm0 movaps %xmm0, 0xd0(%rsp) movaps 0xb0(%rsp), %xmm1 movaps 0xd0(%rsp), %xmm0 movaps %xmm1, 0x3d0(%rsp) movaps %xmm0, 0x3c0(%rsp) movaps 0x3d0(%rsp), %xmm0 movaps 0x3c0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0xe0(%rsp) movaps 0x80(%rsp), %xmm1 movaps 0x110(%rsp), %xmm0 movaps %xmm1, 0x3b0(%rsp) movaps %xmm0, 0x3a0(%rsp) movaps 0x3b0(%rsp), %xmm0 movaps 0x3a0(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, (%rsp) movaps 0x90(%rsp), %xmm1 movaps 0x110(%rsp), %xmm0 movaps %xmm1, 0x390(%rsp) movaps %xmm0, 0x380(%rsp) movaps 0x390(%rsp), %xmm0 movaps 0x380(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x10(%rsp) movaps 0xa0(%rsp), %xmm1 movaps 0x110(%rsp), %xmm0 movaps %xmm1, 0x370(%rsp) movaps %xmm0, 0x360(%rsp) movaps 0x370(%rsp), %xmm0 movaps 0x360(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x20(%rsp) movaps 0xe0(%rsp), %xmm1 movaps 0x110(%rsp), %xmm0 movaps %xmm1, 0x350(%rsp) movaps %xmm0, 0x340(%rsp) movaps 0x350(%rsp), %xmm0 movaps 0x340(%rsp), %xmm1 paddd %xmm1, %xmm0 movaps %xmm0, 0x30(%rsp) movaps (%rsp), %xmm0 movsbl 0x197(%rsp), %eax movaps %xmm0, 0x490(%rsp) movl %eax, 0x48c(%rsp) movaps 0x490(%rsp), %xmm0 movd 0x48c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0x80(%rsp) movaps 0x10(%rsp), %xmm0 movsbl 0x197(%rsp), %eax movaps %xmm0, 0x470(%rsp) movl %eax, 0x46c(%rsp) movaps 0x470(%rsp), %xmm0 movd 0x46c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0x90(%rsp) movaps 0x20(%rsp), %xmm0 movsbl 0x197(%rsp), %eax movaps %xmm0, 0x450(%rsp) movl %eax, 0x44c(%rsp) movaps 0x450(%rsp), %xmm0 movd 0x44c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0xa0(%rsp) movaps 0x30(%rsp), %xmm0 movsbl 0x197(%rsp), %eax movaps %xmm0, 0x430(%rsp) movl %eax, 0x42c(%rsp) movaps 0x430(%rsp), %xmm0 movd 0x42c(%rsp), %xmm1 psrad %xmm1, %xmm0 movaps %xmm0, 0xb0(%rsp) movaps 0x80(%rsp), %xmm1 movaps 0xa0(%rsp), %xmm0 movaps %xmm1, 0x4d0(%rsp) movaps %xmm0, 0x4c0(%rsp) movaps 0x4d0(%rsp), %xmm0 movaps 0x4c0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0x198(%rsp), %rax movaps %xmm0, (%rax) movaps 0x90(%rsp), %xmm1 movaps 0xb0(%rsp), %xmm0 movaps %xmm1, 0x4b0(%rsp) movaps %xmm0, 0x4a0(%rsp) movaps 0x4b0(%rsp), %xmm0 movaps 0x4a0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0x198(%rsp), %rax movaps %xmm0, 0x10(%rax) movq 0x198(%rsp), %rax movaps (%rax), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movaps %xmm0, 0x20(%rax) movq 0x198(%rsp), %rax movaps 0x10(%rax), %xmm0 psrldq $0x8, %xmm0 # xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero movq 0x198(%rsp), %rax movdqa %xmm0, 0x30(%rax) addq $0x668, %rsp # imm = 0x668 retq
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
fidentity4x4_new_sse2
static inline void fidentity4x4_new_sse2(const __m128i *const input, __m128i *const output, const int8_t cos_bit) { (void)cos_bit; const __m128i one = _mm_set1_epi16(1); for (int i = 0; i < 4; ++i) { const __m128i a = _mm_unpacklo_epi16(input[i], one); const __m128i b = scale_round_sse2(a, NewSqrt2); output[i] = _mm_packs_epi32(b, b); } }
subq $0xe8, %rsp movb %dl, %al movq %rdi, 0x68(%rsp) movq %rsi, 0x60(%rsp) movb %al, 0x5f(%rsp) movw $0x1, 0xbe(%rsp) movw 0xbe(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0xe6(%rsp) movw %ax, 0xe4(%rsp) movw %ax, 0xe2(%rsp) movw %ax, 0xe0(%rsp) movw %ax, 0xde(%rsp) movw %ax, 0xdc(%rsp) movw %ax, 0xda(%rsp) movw %ax, 0xd8(%rsp) movzwl 0xe6(%rsp), %eax movd %eax, %xmm1 movzwl 0xe4(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0xe2(%rsp), %eax movd %eax, %xmm2 movzwl 0xe0(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0xde(%rsp), %eax movd %eax, %xmm0 movzwl 0xdc(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0xda(%rsp), %eax movd %eax, %xmm3 movzwl 0xd8(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0xc0(%rsp) movdqa 0xc0(%rsp), %xmm0 movdqa %xmm0, 0x40(%rsp) movl $0x0, 0x3c(%rsp) cmpl $0x4, 0x3c(%rsp) jge 0xa49e2f movq 0x68(%rsp), %rax movslq 0x3c(%rsp), %rcx shlq $0x4, %rcx movaps (%rax,%rcx), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x80(%rsp) movaps %xmm0, 0x70(%rsp) movaps 0x80(%rsp), %xmm0 movaps 0x70(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movdqa %xmm0, 0x20(%rsp) movdqa 0x20(%rsp), %xmm0 movl $0x16a1, %edi # imm = 0x16A1 callq 0xa49e60 movdqa %xmm0, 0x10(%rsp) movdqa 0x10(%rsp), %xmm1 movdqa 0x10(%rsp), %xmm0 movdqa %xmm1, 0xa0(%rsp) movdqa %xmm0, 0x90(%rsp) movdqa 0xa0(%rsp), %xmm0 movdqa 0x90(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0x60(%rsp), %rax movslq 0x3c(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl 0x3c(%rsp), %eax addl $0x1, %eax movl %eax, 0x3c(%rsp) jmp 0xa49d7a addq $0xe8, %rsp retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
fdct4x8_new_sse2
static void fdct4x8_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { const int32_t *cospi = cospi_arr(cos_bit); const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1)); __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]); __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]); __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]); __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]); __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]); __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]); __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]); __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]); __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]); // stage 1 __m128i x1[8]; x1[0] = _mm_adds_epi16(input[0], input[7]); x1[7] = _mm_subs_epi16(input[0], input[7]); x1[1] = _mm_adds_epi16(input[1], input[6]); x1[6] = _mm_subs_epi16(input[1], input[6]); x1[2] = _mm_adds_epi16(input[2], input[5]); x1[5] = _mm_subs_epi16(input[2], input[5]); x1[3] = _mm_adds_epi16(input[3], input[4]); x1[4] = _mm_subs_epi16(input[3], input[4]); // stage 2 __m128i x2[8]; x2[0] = _mm_adds_epi16(x1[0], x1[3]); x2[3] = _mm_subs_epi16(x1[0], x1[3]); x2[1] = _mm_adds_epi16(x1[1], x1[2]); x2[2] = _mm_subs_epi16(x1[1], x1[2]); x2[4] = x1[4]; btf_16_w4_sse2(&cospi_m32_p32, &cospi_p32_p32, __rounding, cos_bit, &x1[5], &x1[6], &x2[5], &x2[6]); x2[7] = x1[7]; // stage 3 __m128i x3[8]; btf_16_w4_sse2(&cospi_p32_p32, &cospi_p32_m32, __rounding, cos_bit, &x2[0], &x2[1], &x3[0], &x3[1]); btf_16_w4_sse2(&cospi_p48_p16, &cospi_m16_p48, __rounding, cos_bit, &x2[2], &x2[3], &x3[2], &x3[3]); x3[4] = _mm_adds_epi16(x2[4], x2[5]); x3[5] = _mm_subs_epi16(x2[4], x2[5]); x3[6] = _mm_subs_epi16(x2[7], x2[6]); x3[7] = _mm_adds_epi16(x2[7], x2[6]); // stage 4 __m128i x4[8]; x4[0] = x3[0]; x4[1] = x3[1]; x4[2] = x3[2]; x4[3] = x3[3]; btf_16_w4_sse2(&cospi_p56_p08, &cospi_m08_p56, __rounding, cos_bit, &x3[4], &x3[7], &x4[4], &x4[7]); btf_16_w4_sse2(&cospi_p24_p40, &cospi_m40_p24, __rounding, cos_bit, &x3[5], &x3[6], &x4[5], &x4[6]); // stage 5 output[0] = x4[0]; output[1] = x4[4]; output[2] = x4[2]; output[3] = x4[6]; output[4] = x4[1]; output[5] = x4[5]; output[6] = x4[3]; output[7] = x4[7]; }
subq $0x648, %rsp # imm = 0x648 movb %dl, %al movq %rdi, 0x2d0(%rsp) movq %rsi, 0x2c8(%rsp) movb %al, 0x2c7(%rsp) movsbl 0x2c7(%rsp), %edi callq 0xa21c60 movq %rax, 0x2b8(%rsp) movb 0x2c7(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0x2fc(%rsp) movl 0x2fc(%rsp), %eax movl %eax, 0x51c(%rsp) movl %eax, 0x518(%rsp) movl %eax, 0x514(%rsp) movl %eax, 0x510(%rsp) movd 0x51c(%rsp), %xmm0 movd 0x518(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x514(%rsp), %xmm2 movd 0x510(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x500(%rsp) movaps 0x500(%rsp), %xmm0 movaps %xmm0, 0x2a0(%rsp) movq 0x2b8(%rsp), %rax movl 0x80(%rax), %ecx movl %ecx, %eax negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2f8(%rsp) movl 0x2f8(%rsp), %eax movl %eax, 0x53c(%rsp) movl %eax, 0x538(%rsp) movl %eax, 0x534(%rsp) movl %eax, 0x530(%rsp) movd 0x53c(%rsp), %xmm0 movd 0x538(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x534(%rsp), %xmm2 movd 0x530(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x520(%rsp) movaps 0x520(%rsp), %xmm0 movaps %xmm0, 0x290(%rsp) movq 0x2b8(%rsp), %rax movzwl 0x80(%rax), %eax movl %eax, %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2f4(%rsp) movl 0x2f4(%rsp), %eax movl %eax, 0x55c(%rsp) movl %eax, 0x558(%rsp) movl %eax, 0x554(%rsp) movl %eax, 0x550(%rsp) movd 0x55c(%rsp), %xmm0 movd 0x558(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x554(%rsp), %xmm2 movd 0x550(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x540(%rsp) movaps 0x540(%rsp), %xmm0 movaps %xmm0, 0x280(%rsp) movq 0x2b8(%rsp), %rax movl 0x80(%rax), %ecx movw %cx, %ax movzwl %ax, %eax shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x2f0(%rsp) movl 0x2f0(%rsp), %eax movl %eax, 0x57c(%rsp) movl %eax, 0x578(%rsp) movl %eax, 0x574(%rsp) movl %eax, 0x570(%rsp) movd 0x57c(%rsp), %xmm0 movd 0x578(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x574(%rsp), %xmm2 movd 0x570(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x560(%rsp) movaps 0x560(%rsp), %xmm0 movaps %xmm0, 0x270(%rsp) movq 0x2b8(%rsp), %rcx movzwl 0xc0(%rcx), %eax movl 0x40(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2ec(%rsp) movl 0x2ec(%rsp), %eax movl %eax, 0x59c(%rsp) movl %eax, 0x598(%rsp) movl %eax, 0x594(%rsp) movl %eax, 0x590(%rsp) movd 0x59c(%rsp), %xmm0 movd 0x598(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x594(%rsp), %xmm2 movd 0x590(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x580(%rsp) movaps 0x580(%rsp), %xmm0 movaps %xmm0, 0x260(%rsp) movq 0x2b8(%rsp), %rcx movl 0x40(%rcx), %eax movl 0xc0(%rcx), %ecx negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2e8(%rsp) movl 0x2e8(%rsp), %eax movl %eax, 0x5bc(%rsp) movl %eax, 0x5b8(%rsp) movl %eax, 0x5b4(%rsp) movl %eax, 0x5b0(%rsp) movd 0x5bc(%rsp), %xmm0 movd 0x5b8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x5b4(%rsp), %xmm2 movd 0x5b0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x5a0(%rsp) movaps 0x5a0(%rsp), %xmm0 movaps %xmm0, 0x250(%rsp) movq 0x2b8(%rsp), %rcx movzwl 0xe0(%rcx), %eax movl 0x20(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2e4(%rsp) movl 0x2e4(%rsp), %eax movl %eax, 0x5dc(%rsp) movl %eax, 0x5d8(%rsp) movl %eax, 0x5d4(%rsp) movl %eax, 0x5d0(%rsp) movd 0x5dc(%rsp), %xmm0 movd 0x5d8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x5d4(%rsp), %xmm2 movd 0x5d0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x5c0(%rsp) movaps 0x5c0(%rsp), %xmm0 movaps %xmm0, 0x240(%rsp) movq 0x2b8(%rsp), %rcx movl 0x20(%rcx), %eax movl 0xe0(%rcx), %ecx negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2e0(%rsp) movl 0x2e0(%rsp), %eax movl %eax, 0x5fc(%rsp) movl %eax, 0x5f8(%rsp) movl %eax, 0x5f4(%rsp) movl %eax, 0x5f0(%rsp) movd 0x5fc(%rsp), %xmm0 movd 0x5f8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x5f4(%rsp), %xmm2 movd 0x5f0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x5e0(%rsp) movaps 0x5e0(%rsp), %xmm0 movaps %xmm0, 0x230(%rsp) movq 0x2b8(%rsp), %rcx movzwl 0x60(%rcx), %eax movl 0xa0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2dc(%rsp) movl 0x2dc(%rsp), %eax movl %eax, 0x61c(%rsp) movl %eax, 0x618(%rsp) movl %eax, 0x614(%rsp) movl %eax, 0x610(%rsp) movd 0x61c(%rsp), %xmm0 movd 0x618(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x614(%rsp), %xmm2 movd 0x610(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x600(%rsp) movaps 0x600(%rsp), %xmm0 movaps %xmm0, 0x220(%rsp) movq 0x2b8(%rsp), %rax movl 0x60(%rax), %ecx movl 0xa0(%rax), %eax negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2d8(%rsp) movl 0x2d8(%rsp), %eax movl %eax, 0x644(%rsp) movl %eax, 0x640(%rsp) movl %eax, 0x63c(%rsp) movl %eax, 0x638(%rsp) movd 0x644(%rsp), %xmm0 movd 0x640(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x63c(%rsp), %xmm2 movd 0x638(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x620(%rsp) movdqa 0x620(%rsp), %xmm0 movdqa %xmm0, 0x210(%rsp) movq 0x2d0(%rsp), %rax movdqa (%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm1, 0x3f0(%rsp) movdqa %xmm0, 0x3e0(%rsp) movdqa 0x3f0(%rsp), %xmm0 movdqa 0x3e0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x190(%rsp) movq 0x2d0(%rsp), %rax movdqa (%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm1, 0x4f0(%rsp) movdqa %xmm0, 0x4e0(%rsp) movdqa 0x4f0(%rsp), %xmm0 movdqa 0x4e0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x200(%rsp) movq 0x2d0(%rsp), %rax movdqa 0x10(%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm1, 0x3d0(%rsp) movdqa %xmm0, 0x3c0(%rsp) movdqa 0x3d0(%rsp), %xmm0 movdqa 0x3c0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x1a0(%rsp) movq 0x2d0(%rsp), %rax movdqa 0x10(%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm1, 0x4d0(%rsp) movdqa %xmm0, 0x4c0(%rsp) movdqa 0x4d0(%rsp), %xmm0 movdqa 0x4c0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movq 0x2d0(%rsp), %rax movdqa 0x20(%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm1, 0x3b0(%rsp) movdqa %xmm0, 0x3a0(%rsp) movdqa 0x3b0(%rsp), %xmm0 movdqa 0x3a0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x1b0(%rsp) movq 0x2d0(%rsp), %rax movdqa 0x20(%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm1, 0x4b0(%rsp) movdqa %xmm0, 0x4a0(%rsp) movdqa 0x4b0(%rsp), %xmm0 movdqa 0x4a0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x1e0(%rsp) movq 0x2d0(%rsp), %rax movdqa 0x30(%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm1, 0x390(%rsp) movdqa %xmm0, 0x380(%rsp) movdqa 0x390(%rsp), %xmm0 movdqa 0x380(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x1c0(%rsp) movq 0x2d0(%rsp), %rax movdqa 0x30(%rax), %xmm1 movq 0x2d0(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm1, 0x490(%rsp) movdqa %xmm0, 0x480(%rsp) movdqa 0x490(%rsp), %xmm0 movdqa 0x480(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x1d0(%rsp) movdqa 0x190(%rsp), %xmm1 movdqa 0x1c0(%rsp), %xmm0 movdqa %xmm1, 0x370(%rsp) movdqa %xmm0, 0x360(%rsp) movdqa 0x370(%rsp), %xmm0 movdqa 0x360(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x190(%rsp), %xmm1 movdqa 0x1c0(%rsp), %xmm0 movdqa %xmm1, 0x470(%rsp) movdqa %xmm0, 0x460(%rsp) movdqa 0x470(%rsp), %xmm0 movdqa 0x460(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x140(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x1b0(%rsp), %xmm0 movdqa %xmm1, 0x350(%rsp) movdqa %xmm0, 0x340(%rsp) movdqa 0x350(%rsp), %xmm0 movdqa 0x340(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x120(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x1b0(%rsp), %xmm0 movdqa %xmm1, 0x450(%rsp) movdqa %xmm0, 0x440(%rsp) movdqa 0x450(%rsp), %xmm0 movdqa 0x440(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x130(%rsp) movdqa 0x1d0(%rsp), %xmm0 movdqa %xmm0, 0x150(%rsp) movdqa 0x2a0(%rsp), %xmm0 movb 0x2c7(%rsp), %dl leaq 0x190(%rsp), %rcx addq $0x50, %rcx leaq 0x190(%rsp), %r8 addq $0x60, %r8 leaq 0x110(%rsp), %r9 addq $0x50, %r9 leaq 0x110(%rsp), %rax addq $0x60, %rax leaq 0x290(%rsp), %rdi leaq 0x280(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x200(%rsp), %xmm0 movdqa %xmm0, 0x180(%rsp) movdqa 0x2a0(%rsp), %xmm0 movb 0x2c7(%rsp), %dl leaq 0x110(%rsp), %rcx leaq 0x110(%rsp), %r8 addq $0x10, %r8 leaq 0x90(%rsp), %r9 leaq 0x90(%rsp), %rax addq $0x10, %rax leaq 0x280(%rsp), %rdi leaq 0x270(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x2a0(%rsp), %xmm0 movb 0x2c7(%rsp), %dl leaq 0x110(%rsp), %rcx addq $0x20, %rcx leaq 0x110(%rsp), %r8 addq $0x30, %r8 leaq 0x90(%rsp), %r9 addq $0x20, %r9 leaq 0x90(%rsp), %rax addq $0x30, %rax leaq 0x260(%rsp), %rdi leaq 0x250(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x150(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x330(%rsp) movdqa %xmm0, 0x320(%rsp) movdqa 0x330(%rsp), %xmm0 movdqa 0x320(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0xd0(%rsp) movdqa 0x150(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x430(%rsp) movdqa %xmm0, 0x420(%rsp) movdqa 0x430(%rsp), %xmm0 movdqa 0x420(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0xe0(%rsp) movdqa 0x180(%rsp), %xmm1 movdqa 0x170(%rsp), %xmm0 movdqa %xmm1, 0x410(%rsp) movdqa %xmm0, 0x400(%rsp) movdqa 0x410(%rsp), %xmm0 movdqa 0x400(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0xf0(%rsp) movdqa 0x180(%rsp), %xmm1 movdqa 0x170(%rsp), %xmm0 movdqa %xmm1, 0x310(%rsp) movdqa %xmm0, 0x300(%rsp) movdqa 0x310(%rsp), %xmm0 movdqa 0x300(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x90(%rsp), %xmm0 movdqa %xmm0, 0x10(%rsp) movdqa 0xa0(%rsp), %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0xb0(%rsp), %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0xc0(%rsp), %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x2a0(%rsp), %xmm0 movb 0x2c7(%rsp), %dl leaq 0x90(%rsp), %rcx addq $0x40, %rcx leaq 0x90(%rsp), %r8 addq $0x70, %r8 leaq 0x10(%rsp), %r9 addq $0x40, %r9 leaq 0x10(%rsp), %rax addq $0x70, %rax leaq 0x240(%rsp), %rdi leaq 0x230(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x2a0(%rsp), %xmm0 movb 0x2c7(%rsp), %dl leaq 0x90(%rsp), %rcx addq $0x50, %rcx leaq 0x90(%rsp), %r8 addq $0x60, %r8 leaq 0x10(%rsp), %r9 addq $0x50, %r9 leaq 0x10(%rsp), %rax addq $0x60, %rax leaq 0x220(%rsp), %rdi leaq 0x210(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x10(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x50(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x10(%rax) movdqa 0x30(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x20(%rax) movdqa 0x70(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x30(%rax) movdqa 0x20(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x40(%rax) movdqa 0x60(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x50(%rax) movdqa 0x40(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x60(%rax) movdqa 0x80(%rsp), %xmm0 movq 0x2c8(%rsp), %rax movdqa %xmm0, 0x70(%rax) addq $0x648, %rsp # imm = 0x648 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
fadst4x8_new_sse2
static void fadst4x8_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { const int32_t *cospi = cospi_arr(cos_bit); const __m128i __zero = _mm_setzero_si128(); const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1)); __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]); __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]); __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]); __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]); __m128i cospi_m48_p16 = pair_set_epi16(-cospi[48], cospi[16]); __m128i cospi_p04_p60 = pair_set_epi16(cospi[4], cospi[60]); __m128i cospi_p60_m04 = pair_set_epi16(cospi[60], -cospi[4]); __m128i cospi_p20_p44 = pair_set_epi16(cospi[20], cospi[44]); __m128i cospi_p44_m20 = pair_set_epi16(cospi[44], -cospi[20]); __m128i cospi_p36_p28 = pair_set_epi16(cospi[36], cospi[28]); __m128i cospi_p28_m36 = pair_set_epi16(cospi[28], -cospi[36]); __m128i cospi_p52_p12 = pair_set_epi16(cospi[52], cospi[12]); __m128i cospi_p12_m52 = pair_set_epi16(cospi[12], -cospi[52]); // stage 1 __m128i x1[8]; x1[0] = input[0]; x1[1] = _mm_subs_epi16(__zero, input[7]); x1[2] = _mm_subs_epi16(__zero, input[3]); x1[3] = input[4]; x1[4] = _mm_subs_epi16(__zero, input[1]); x1[5] = input[6]; x1[6] = input[2]; x1[7] = _mm_subs_epi16(__zero, input[5]); // stage 2 __m128i x2[8]; x2[0] = x1[0]; x2[1] = x1[1]; btf_16_w4_sse2(&cospi_p32_p32, &cospi_p32_m32, __rounding, cos_bit, &x1[2], &x1[3], &x2[2], &x2[3]); x2[4] = x1[4]; x2[5] = x1[5]; btf_16_w4_sse2(&cospi_p32_p32, &cospi_p32_m32, __rounding, cos_bit, &x1[6], &x1[7], &x2[6], &x2[7]); // stage 3 __m128i x3[8]; x3[0] = _mm_adds_epi16(x2[0], x2[2]); x3[2] = _mm_subs_epi16(x2[0], x2[2]); x3[1] = _mm_adds_epi16(x2[1], x2[3]); x3[3] = _mm_subs_epi16(x2[1], x2[3]); x3[4] = _mm_adds_epi16(x2[4], x2[6]); x3[6] = _mm_subs_epi16(x2[4], x2[6]); x3[5] = _mm_adds_epi16(x2[5], x2[7]); x3[7] = _mm_subs_epi16(x2[5], x2[7]); // stage 4 __m128i x4[8]; x4[0] = x3[0]; x4[1] = x3[1]; x4[2] = x3[2]; x4[3] = x3[3]; btf_16_w4_sse2(&cospi_p16_p48, &cospi_p48_m16, __rounding, cos_bit, &x3[4], &x3[5], &x4[4], &x4[5]); btf_16_w4_sse2(&cospi_m48_p16, &cospi_p16_p48, __rounding, cos_bit, &x3[6], &x3[7], &x4[6], &x4[7]); // stage 5 __m128i x5[8]; x5[0] = _mm_adds_epi16(x4[0], x4[4]); x5[4] = _mm_subs_epi16(x4[0], x4[4]); x5[1] = _mm_adds_epi16(x4[1], x4[5]); x5[5] = _mm_subs_epi16(x4[1], x4[5]); x5[2] = _mm_adds_epi16(x4[2], x4[6]); x5[6] = _mm_subs_epi16(x4[2], x4[6]); x5[3] = _mm_adds_epi16(x4[3], x4[7]); x5[7] = _mm_subs_epi16(x4[3], x4[7]); // stage 6 __m128i x6[8]; btf_16_w4_sse2(&cospi_p04_p60, &cospi_p60_m04, __rounding, cos_bit, &x5[0], &x5[1], &x6[0], &x6[1]); btf_16_w4_sse2(&cospi_p20_p44, &cospi_p44_m20, __rounding, cos_bit, &x5[2], &x5[3], &x6[2], &x6[3]); btf_16_w4_sse2(&cospi_p36_p28, &cospi_p28_m36, __rounding, cos_bit, &x5[4], &x5[5], &x6[4], &x6[5]); btf_16_w4_sse2(&cospi_p52_p12, &cospi_p12_m52, __rounding, cos_bit, &x5[6], &x5[7], &x6[6], &x6[7]); // stage 7 output[0] = x6[1]; output[1] = x6[6]; output[2] = x6[3]; output[3] = x6[4]; output[4] = x6[5]; output[5] = x6[2]; output[6] = x6[7]; output[7] = x6[0]; }
subq $0x8b8, %rsp # imm = 0x8B8 movb %dl, %al movq %rdi, 0x420(%rsp) movq %rsi, 0x418(%rsp) movb %al, 0x417(%rsp) movsbl 0x417(%rsp), %edi callq 0xa21c60 movq %rax, 0x408(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x8a0(%rsp) movaps 0x8a0(%rsp), %xmm0 movaps %xmm0, 0x3f0(%rsp) movb 0x417(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0x45c(%rsp) movl 0x45c(%rsp), %eax movl %eax, 0x6fc(%rsp) movl %eax, 0x6f8(%rsp) movl %eax, 0x6f4(%rsp) movl %eax, 0x6f0(%rsp) movd 0x6fc(%rsp), %xmm0 movd 0x6f8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x6f4(%rsp), %xmm2 movd 0x6f0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x6e0(%rsp) movaps 0x6e0(%rsp), %xmm0 movaps %xmm0, 0x3e0(%rsp) movq 0x408(%rsp), %rax movzwl 0x80(%rax), %eax movl %eax, %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x458(%rsp) movl 0x458(%rsp), %eax movl %eax, 0x71c(%rsp) movl %eax, 0x718(%rsp) movl %eax, 0x714(%rsp) movl %eax, 0x710(%rsp) movd 0x71c(%rsp), %xmm0 movd 0x718(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x714(%rsp), %xmm2 movd 0x710(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x700(%rsp) movaps 0x700(%rsp), %xmm0 movaps %xmm0, 0x3d0(%rsp) movq 0x408(%rsp), %rax movl 0x80(%rax), %ecx movw %cx, %ax movzwl %ax, %eax shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x454(%rsp) movl 0x454(%rsp), %eax movl %eax, 0x73c(%rsp) movl %eax, 0x738(%rsp) movl %eax, 0x734(%rsp) movl %eax, 0x730(%rsp) movd 0x73c(%rsp), %xmm0 movd 0x738(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x734(%rsp), %xmm2 movd 0x730(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x720(%rsp) movaps 0x720(%rsp), %xmm0 movaps %xmm0, 0x3c0(%rsp) movq 0x408(%rsp), %rcx movzwl 0x40(%rcx), %eax movl 0xc0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x450(%rsp) movl 0x450(%rsp), %eax movl %eax, 0x75c(%rsp) movl %eax, 0x758(%rsp) movl %eax, 0x754(%rsp) movl %eax, 0x750(%rsp) movd 0x75c(%rsp), %xmm0 movd 0x758(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x754(%rsp), %xmm2 movd 0x750(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x740(%rsp) movaps 0x740(%rsp), %xmm0 movaps %xmm0, 0x3b0(%rsp) movq 0x408(%rsp), %rcx movzwl 0xc0(%rcx), %eax movl 0x40(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x44c(%rsp) movl 0x44c(%rsp), %eax movl %eax, 0x77c(%rsp) movl %eax, 0x778(%rsp) movl %eax, 0x774(%rsp) movl %eax, 0x770(%rsp) movd 0x77c(%rsp), %xmm0 movd 0x778(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x774(%rsp), %xmm2 movd 0x770(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x760(%rsp) movaps 0x760(%rsp), %xmm0 movaps %xmm0, 0x3a0(%rsp) movq 0x408(%rsp), %rax movl 0x40(%rax), %ecx movl 0xc0(%rax), %eax negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x448(%rsp) movl 0x448(%rsp), %eax movl %eax, 0x79c(%rsp) movl %eax, 0x798(%rsp) movl %eax, 0x794(%rsp) movl %eax, 0x790(%rsp) movd 0x79c(%rsp), %xmm0 movd 0x798(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x794(%rsp), %xmm2 movd 0x790(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x780(%rsp) movaps 0x780(%rsp), %xmm0 movaps %xmm0, 0x390(%rsp) movq 0x408(%rsp), %rcx movzwl 0x10(%rcx), %eax movl 0xf0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x444(%rsp) movl 0x444(%rsp), %eax movl %eax, 0x7bc(%rsp) movl %eax, 0x7b8(%rsp) movl %eax, 0x7b4(%rsp) movl %eax, 0x7b0(%rsp) movd 0x7bc(%rsp), %xmm0 movd 0x7b8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x7b4(%rsp), %xmm2 movd 0x7b0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x7a0(%rsp) movaps 0x7a0(%rsp), %xmm0 movaps %xmm0, 0x380(%rsp) movq 0x408(%rsp), %rcx movzwl 0xf0(%rcx), %eax movl 0x10(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x440(%rsp) movl 0x440(%rsp), %eax movl %eax, 0x7dc(%rsp) movl %eax, 0x7d8(%rsp) movl %eax, 0x7d4(%rsp) movl %eax, 0x7d0(%rsp) movd 0x7dc(%rsp), %xmm0 movd 0x7d8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x7d4(%rsp), %xmm2 movd 0x7d0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x7c0(%rsp) movaps 0x7c0(%rsp), %xmm0 movaps %xmm0, 0x370(%rsp) movq 0x408(%rsp), %rcx movzwl 0x50(%rcx), %eax movl 0xb0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x43c(%rsp) movl 0x43c(%rsp), %eax movl %eax, 0x7fc(%rsp) movl %eax, 0x7f8(%rsp) movl %eax, 0x7f4(%rsp) movl %eax, 0x7f0(%rsp) movd 0x7fc(%rsp), %xmm0 movd 0x7f8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x7f4(%rsp), %xmm2 movd 0x7f0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x7e0(%rsp) movaps 0x7e0(%rsp), %xmm0 movaps %xmm0, 0x360(%rsp) movq 0x408(%rsp), %rcx movzwl 0xb0(%rcx), %eax movl 0x50(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x438(%rsp) movl 0x438(%rsp), %eax movl %eax, 0x81c(%rsp) movl %eax, 0x818(%rsp) movl %eax, 0x814(%rsp) movl %eax, 0x810(%rsp) movd 0x81c(%rsp), %xmm0 movd 0x818(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x814(%rsp), %xmm2 movd 0x810(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x800(%rsp) movaps 0x800(%rsp), %xmm0 movaps %xmm0, 0x350(%rsp) movq 0x408(%rsp), %rcx movzwl 0x90(%rcx), %eax movl 0x70(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x434(%rsp) movl 0x434(%rsp), %eax movl %eax, 0x83c(%rsp) movl %eax, 0x838(%rsp) movl %eax, 0x834(%rsp) movl %eax, 0x830(%rsp) movd 0x83c(%rsp), %xmm0 movd 0x838(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x834(%rsp), %xmm2 movd 0x830(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x820(%rsp) movaps 0x820(%rsp), %xmm0 movaps %xmm0, 0x340(%rsp) movq 0x408(%rsp), %rcx movzwl 0x70(%rcx), %eax movl 0x90(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x430(%rsp) movl 0x430(%rsp), %eax movl %eax, 0x85c(%rsp) movl %eax, 0x858(%rsp) movl %eax, 0x854(%rsp) movl %eax, 0x850(%rsp) movd 0x85c(%rsp), %xmm0 movd 0x858(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x854(%rsp), %xmm2 movd 0x850(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x840(%rsp) movaps 0x840(%rsp), %xmm0 movaps %xmm0, 0x330(%rsp) movq 0x408(%rsp), %rcx movzwl 0xd0(%rcx), %eax movl 0x30(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x42c(%rsp) movl 0x42c(%rsp), %eax movl %eax, 0x87c(%rsp) movl %eax, 0x878(%rsp) movl %eax, 0x874(%rsp) movl %eax, 0x870(%rsp) movd 0x87c(%rsp), %xmm0 movd 0x878(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x874(%rsp), %xmm2 movd 0x870(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x860(%rsp) movaps 0x860(%rsp), %xmm0 movaps %xmm0, 0x320(%rsp) movq 0x408(%rsp), %rcx movzwl 0x30(%rcx), %eax movl 0xd0(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x428(%rsp) movl 0x428(%rsp), %eax movl %eax, 0x89c(%rsp) movl %eax, 0x898(%rsp) movl %eax, 0x894(%rsp) movl %eax, 0x890(%rsp) movd 0x89c(%rsp), %xmm0 movd 0x898(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x894(%rsp), %xmm2 movd 0x890(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x880(%rsp) movdqa 0x880(%rsp), %xmm0 movdqa %xmm0, 0x310(%rsp) movq 0x420(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm0, 0x290(%rsp) movdqa 0x3f0(%rsp), %xmm1 movq 0x420(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm1, 0x6d0(%rsp) movdqa %xmm0, 0x6c0(%rsp) movdqa 0x6d0(%rsp), %xmm0 movdqa 0x6c0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x2a0(%rsp) movdqa 0x3f0(%rsp), %xmm1 movq 0x420(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm1, 0x6b0(%rsp) movdqa %xmm0, 0x6a0(%rsp) movdqa 0x6b0(%rsp), %xmm0 movdqa 0x6a0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x2b0(%rsp) movq 0x420(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm0, 0x2c0(%rsp) movdqa 0x3f0(%rsp), %xmm1 movq 0x420(%rsp), %rax movdqa 0x10(%rax), %xmm0 movdqa %xmm1, 0x690(%rsp) movdqa %xmm0, 0x680(%rsp) movdqa 0x690(%rsp), %xmm0 movdqa 0x680(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x2d0(%rsp) movq 0x420(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm0, 0x2e0(%rsp) movq 0x420(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm0, 0x2f0(%rsp) movdqa 0x3f0(%rsp), %xmm1 movq 0x420(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm1, 0x670(%rsp) movdqa %xmm0, 0x660(%rsp) movdqa 0x670(%rsp), %xmm0 movdqa 0x660(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x300(%rsp) movdqa 0x290(%rsp), %xmm0 movdqa %xmm0, 0x210(%rsp) movdqa 0x2a0(%rsp), %xmm0 movdqa %xmm0, 0x220(%rsp) movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x290(%rsp), %rcx addq $0x20, %rcx leaq 0x290(%rsp), %r8 addq $0x30, %r8 leaq 0x210(%rsp), %r9 addq $0x20, %r9 leaq 0x210(%rsp), %rax addq $0x30, %rax leaq 0x3d0(%rsp), %rdi leaq 0x3c0(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x2d0(%rsp), %xmm0 movdqa %xmm0, 0x250(%rsp) movdqa 0x2e0(%rsp), %xmm0 movdqa %xmm0, 0x260(%rsp) movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x290(%rsp), %rcx addq $0x60, %rcx leaq 0x290(%rsp), %r8 addq $0x70, %r8 leaq 0x210(%rsp), %r9 addq $0x60, %r9 leaq 0x210(%rsp), %rax addq $0x70, %rax leaq 0x3d0(%rsp), %rdi leaq 0x3c0(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x210(%rsp), %xmm1 movdqa 0x230(%rsp), %xmm0 movdqa %xmm1, 0x550(%rsp) movdqa %xmm0, 0x540(%rsp) movdqa 0x550(%rsp), %xmm0 movdqa 0x540(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x210(%rsp), %xmm1 movdqa 0x230(%rsp), %xmm0 movdqa %xmm1, 0x650(%rsp) movdqa %xmm0, 0x640(%rsp) movdqa 0x650(%rsp), %xmm0 movdqa 0x640(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x1b0(%rsp) movdqa 0x220(%rsp), %xmm1 movdqa 0x240(%rsp), %xmm0 movdqa %xmm1, 0x530(%rsp) movdqa %xmm0, 0x520(%rsp) movdqa 0x530(%rsp), %xmm0 movdqa 0x520(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x1a0(%rsp) movdqa 0x220(%rsp), %xmm1 movdqa 0x240(%rsp), %xmm0 movdqa %xmm1, 0x630(%rsp) movdqa %xmm0, 0x620(%rsp) movdqa 0x630(%rsp), %xmm0 movdqa 0x620(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x1c0(%rsp) movdqa 0x250(%rsp), %xmm1 movdqa 0x270(%rsp), %xmm0 movdqa %xmm1, 0x510(%rsp) movdqa %xmm0, 0x500(%rsp) movdqa 0x510(%rsp), %xmm0 movdqa 0x500(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x1d0(%rsp) movdqa 0x250(%rsp), %xmm1 movdqa 0x270(%rsp), %xmm0 movdqa %xmm1, 0x610(%rsp) movdqa %xmm0, 0x600(%rsp) movdqa 0x610(%rsp), %xmm0 movdqa 0x600(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movdqa 0x260(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x4f0(%rsp) movdqa %xmm0, 0x4e0(%rsp) movdqa 0x4f0(%rsp), %xmm0 movdqa 0x4e0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x1e0(%rsp) movdqa 0x260(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x5f0(%rsp) movdqa %xmm0, 0x5e0(%rsp) movdqa 0x5f0(%rsp), %xmm0 movdqa 0x5e0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x200(%rsp) movdqa 0x190(%rsp), %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x1a0(%rsp), %xmm0 movdqa %xmm0, 0x120(%rsp) movdqa 0x1b0(%rsp), %xmm0 movdqa %xmm0, 0x130(%rsp) movdqa 0x1c0(%rsp), %xmm0 movdqa %xmm0, 0x140(%rsp) movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x190(%rsp), %rcx addq $0x40, %rcx leaq 0x190(%rsp), %r8 addq $0x50, %r8 leaq 0x110(%rsp), %r9 addq $0x40, %r9 leaq 0x110(%rsp), %rax addq $0x50, %rax leaq 0x3b0(%rsp), %rdi leaq 0x3a0(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x190(%rsp), %rcx addq $0x60, %rcx leaq 0x190(%rsp), %r8 addq $0x70, %r8 leaq 0x110(%rsp), %r9 addq $0x60, %r9 leaq 0x110(%rsp), %rax addq $0x70, %rax leaq 0x390(%rsp), %rdi leaq 0x3b0(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x110(%rsp), %xmm1 movdqa 0x150(%rsp), %xmm0 movdqa %xmm1, 0x4d0(%rsp) movdqa %xmm0, 0x4c0(%rsp) movdqa 0x4d0(%rsp), %xmm0 movdqa 0x4c0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x90(%rsp) movdqa 0x110(%rsp), %xmm1 movdqa 0x150(%rsp), %xmm0 movdqa %xmm1, 0x5d0(%rsp) movdqa %xmm0, 0x5c0(%rsp) movdqa 0x5d0(%rsp), %xmm0 movdqa 0x5c0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0xd0(%rsp) movdqa 0x120(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x4b0(%rsp) movdqa %xmm0, 0x4a0(%rsp) movdqa 0x4b0(%rsp), %xmm0 movdqa 0x4a0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0xa0(%rsp) movdqa 0x120(%rsp), %xmm1 movdqa 0x160(%rsp), %xmm0 movdqa %xmm1, 0x5b0(%rsp) movdqa %xmm0, 0x5a0(%rsp) movdqa 0x5b0(%rsp), %xmm0 movdqa 0x5a0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0xe0(%rsp) movdqa 0x130(%rsp), %xmm1 movdqa 0x170(%rsp), %xmm0 movdqa %xmm1, 0x490(%rsp) movdqa %xmm0, 0x480(%rsp) movdqa 0x490(%rsp), %xmm0 movdqa 0x480(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0xb0(%rsp) movdqa 0x130(%rsp), %xmm1 movdqa 0x170(%rsp), %xmm0 movdqa %xmm1, 0x590(%rsp) movdqa %xmm0, 0x580(%rsp) movdqa 0x590(%rsp), %xmm0 movdqa 0x580(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0xf0(%rsp) movdqa 0x140(%rsp), %xmm1 movdqa 0x180(%rsp), %xmm0 movdqa %xmm1, 0x470(%rsp) movdqa %xmm0, 0x460(%rsp) movdqa 0x470(%rsp), %xmm0 movdqa 0x460(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0xc0(%rsp) movdqa 0x140(%rsp), %xmm1 movdqa 0x180(%rsp), %xmm0 movdqa %xmm1, 0x570(%rsp) movdqa %xmm0, 0x560(%rsp) movdqa 0x570(%rsp), %xmm0 movdqa 0x560(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x90(%rsp), %rcx leaq 0x90(%rsp), %r8 addq $0x10, %r8 leaq 0x10(%rsp), %r9 leaq 0x10(%rsp), %rax addq $0x10, %rax leaq 0x380(%rsp), %rdi leaq 0x370(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x90(%rsp), %rcx addq $0x20, %rcx leaq 0x90(%rsp), %r8 addq $0x30, %r8 leaq 0x10(%rsp), %r9 addq $0x20, %r9 leaq 0x10(%rsp), %rax addq $0x30, %rax leaq 0x360(%rsp), %rdi leaq 0x350(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x90(%rsp), %rcx addq $0x40, %rcx leaq 0x90(%rsp), %r8 addq $0x50, %r8 leaq 0x10(%rsp), %r9 addq $0x40, %r9 leaq 0x10(%rsp), %rax addq $0x50, %rax leaq 0x340(%rsp), %rdi leaq 0x330(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x3e0(%rsp), %xmm0 movb 0x417(%rsp), %dl leaq 0x90(%rsp), %rcx addq $0x60, %rcx leaq 0x90(%rsp), %r8 addq $0x70, %r8 leaq 0x10(%rsp), %r9 addq $0x60, %r9 leaq 0x10(%rsp), %rax addq $0x70, %rax leaq 0x320(%rsp), %rdi leaq 0x310(%rsp), %rsi movsbl %dl, %edx movq %rax, (%rsp) callq 0xa4bff0 movdqa 0x20(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x70(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x10(%rax) movdqa 0x40(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x20(%rax) movdqa 0x50(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x30(%rax) movdqa 0x60(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x40(%rax) movdqa 0x30(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x50(%rax) movdqa 0x80(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x60(%rax) movdqa 0x10(%rsp), %xmm0 movq 0x418(%rsp), %rax movdqa %xmm0, 0x70(%rax) addq $0x8b8, %rsp # imm = 0x8B8 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
fidentity8x8_new_sse2
static inline void fidentity8x8_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { (void)cos_bit; output[0] = _mm_adds_epi16(input[0], input[0]); output[1] = _mm_adds_epi16(input[1], input[1]); output[2] = _mm_adds_epi16(input[2], input[2]); output[3] = _mm_adds_epi16(input[3], input[3]); output[4] = _mm_adds_epi16(input[4], input[4]); output[5] = _mm_adds_epi16(input[5], input[5]); output[6] = _mm_adds_epi16(input[6], input[6]); output[7] = _mm_adds_epi16(input[7], input[7]); }
subq $0xa8, %rsp movb %dl, %al movq %rdi, -0x68(%rsp) movq %rsi, -0x70(%rsp) movb %al, -0x71(%rsp) movq -0x68(%rsp), %rax movdqa (%rax), %xmm1 movq -0x68(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm1, 0x90(%rsp) movdqa %xmm0, 0x80(%rsp) movdqa 0x90(%rsp), %xmm0 movdqa 0x80(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, (%rax) movq -0x68(%rsp), %rax movdqa 0x10(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x10(%rax), %xmm0 movdqa %xmm1, 0x70(%rsp) movdqa %xmm0, 0x60(%rsp) movdqa 0x70(%rsp), %xmm0 movdqa 0x60(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x10(%rax) movq -0x68(%rsp), %rax movdqa 0x20(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm1, 0x50(%rsp) movdqa %xmm0, 0x40(%rsp) movdqa 0x50(%rsp), %xmm0 movdqa 0x40(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x20(%rax) movq -0x68(%rsp), %rax movdqa 0x30(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm1, 0x30(%rsp) movdqa %xmm0, 0x20(%rsp) movdqa 0x30(%rsp), %xmm0 movdqa 0x20(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x30(%rax) movq -0x68(%rsp), %rax movdqa 0x40(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm1, 0x10(%rsp) movdqa %xmm0, (%rsp) movdqa 0x10(%rsp), %xmm0 movdqa (%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x40(%rax) movq -0x68(%rsp), %rax movdqa 0x50(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm1, -0x10(%rsp) movdqa %xmm0, -0x20(%rsp) movdqa -0x10(%rsp), %xmm0 movdqa -0x20(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x50(%rax) movq -0x68(%rsp), %rax movdqa 0x60(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm1, -0x30(%rsp) movdqa %xmm0, -0x40(%rsp) movdqa -0x30(%rsp), %xmm0 movdqa -0x40(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x60(%rax) movq -0x68(%rsp), %rax movdqa 0x70(%rax), %xmm1 movq -0x68(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm1, -0x50(%rsp) movdqa %xmm0, -0x60(%rsp) movdqa -0x50(%rsp), %xmm0 movdqa -0x60(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq -0x70(%rsp), %rax movdqa %xmm0, 0x70(%rax) addq $0xa8, %rsp retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
fdct8x4_new_sse2
static void fdct8x4_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { const int32_t *cospi = cospi_arr(cos_bit); const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1)); __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]); __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]); __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]); __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]); // stage 1 __m128i x1[4]; x1[0] = _mm_adds_epi16(input[0], input[3]); x1[3] = _mm_subs_epi16(input[0], input[3]); x1[1] = _mm_adds_epi16(input[1], input[2]); x1[2] = _mm_subs_epi16(input[1], input[2]); // stage 2 __m128i x2[4]; btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[0], x1[1], x2[0], x2[1]); btf_16_sse2(cospi_p48_p16, cospi_m16_p48, x1[2], x1[3], x2[2], x2[3]); // stage 3 output[0] = x2[0]; output[1] = x2[2]; output[2] = x2[1]; output[3] = x2[3]; }
subq $0x7f8, %rsp # imm = 0x7F8 movb %dl, %al movq %rdi, 0x2b0(%rsp) movq %rsi, 0x2a8(%rsp) movb %al, 0x2a7(%rsp) movsbl 0x2a7(%rsp), %edi callq 0xa21c60 movq %rax, 0x298(%rsp) movb 0x2a7(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0x2cc(%rsp) movl 0x2cc(%rsp), %eax movl %eax, 0x76c(%rsp) movl %eax, 0x768(%rsp) movl %eax, 0x764(%rsp) movl %eax, 0x760(%rsp) movd 0x76c(%rsp), %xmm0 movd 0x768(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x764(%rsp), %xmm2 movd 0x760(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x750(%rsp) movaps 0x750(%rsp), %xmm0 movaps %xmm0, 0x280(%rsp) movq 0x298(%rsp), %rax movzwl 0x80(%rax), %eax movl %eax, %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2c8(%rsp) movl 0x2c8(%rsp), %eax movl %eax, 0x78c(%rsp) movl %eax, 0x788(%rsp) movl %eax, 0x784(%rsp) movl %eax, 0x780(%rsp) movd 0x78c(%rsp), %xmm0 movd 0x788(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x784(%rsp), %xmm2 movd 0x780(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x770(%rsp) movaps 0x770(%rsp), %xmm0 movaps %xmm0, 0x270(%rsp) movq 0x298(%rsp), %rax movl 0x80(%rax), %ecx movw %cx, %ax movzwl %ax, %eax shll $0x10, %ecx subl %ecx, %eax movl %eax, 0x2c4(%rsp) movl 0x2c4(%rsp), %eax movl %eax, 0x7ac(%rsp) movl %eax, 0x7a8(%rsp) movl %eax, 0x7a4(%rsp) movl %eax, 0x7a0(%rsp) movd 0x7ac(%rsp), %xmm0 movd 0x7a8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x7a4(%rsp), %xmm2 movd 0x7a0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x790(%rsp) movaps 0x790(%rsp), %xmm0 movaps %xmm0, 0x260(%rsp) movq 0x298(%rsp), %rcx movzwl 0xc0(%rcx), %eax movl 0x40(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2c0(%rsp) movl 0x2c0(%rsp), %eax movl %eax, 0x7cc(%rsp) movl %eax, 0x7c8(%rsp) movl %eax, 0x7c4(%rsp) movl %eax, 0x7c0(%rsp) movd 0x7cc(%rsp), %xmm0 movd 0x7c8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x7c4(%rsp), %xmm2 movd 0x7c0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x7b0(%rsp) movaps 0x7b0(%rsp), %xmm0 movaps %xmm0, 0x250(%rsp) movq 0x298(%rsp), %rcx movl 0x40(%rcx), %eax movl 0xc0(%rcx), %ecx negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0x2bc(%rsp) movl 0x2bc(%rsp), %eax movl %eax, 0x7f4(%rsp) movl %eax, 0x7f0(%rsp) movl %eax, 0x7ec(%rsp) movl %eax, 0x7e8(%rsp) movd 0x7f4(%rsp), %xmm0 movd 0x7f0(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x7ec(%rsp), %xmm2 movd 0x7e8(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x7d0(%rsp) movdqa 0x7d0(%rsp), %xmm0 movdqa %xmm0, 0x240(%rsp) movq 0x2b0(%rsp), %rax movdqa (%rax), %xmm1 movq 0x2b0(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm1, 0x300(%rsp) movdqa %xmm0, 0x2f0(%rsp) movdqa 0x300(%rsp), %xmm0 movdqa 0x2f0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x200(%rsp) movq 0x2b0(%rsp), %rax movdqa (%rax), %xmm1 movq 0x2b0(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm1, 0x340(%rsp) movdqa %xmm0, 0x330(%rsp) movdqa 0x340(%rsp), %xmm0 movdqa 0x330(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x230(%rsp) movq 0x2b0(%rsp), %rax movdqa 0x10(%rax), %xmm1 movq 0x2b0(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm1, 0x2e0(%rsp) movdqa %xmm0, 0x2d0(%rsp) movdqa 0x2e0(%rsp), %xmm0 movdqa 0x2d0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x210(%rsp) movq 0x2b0(%rsp), %rax movdqa 0x10(%rax), %xmm1 movq 0x2b0(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm1, 0x320(%rsp) movdqa %xmm0, 0x310(%rsp) movdqa 0x320(%rsp), %xmm0 movdqa 0x310(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x220(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x210(%rsp), %xmm0 movaps %xmm1, 0x380(%rsp) movaps %xmm0, 0x370(%rsp) movaps 0x380(%rsp), %xmm0 movaps 0x370(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x1b0(%rsp) movaps 0x200(%rsp), %xmm1 movaps 0x210(%rsp), %xmm0 movaps %xmm1, 0x3c0(%rsp) movaps %xmm0, 0x3b0(%rsp) movaps 0x3c0(%rsp), %xmm0 movaps 0x3b0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x1a0(%rsp) movdqa 0x1b0(%rsp), %xmm1 movdqa 0x270(%rsp), %xmm0 movdqa %xmm1, 0x4c0(%rsp) movdqa %xmm0, 0x4b0(%rsp) movdqa 0x4c0(%rsp), %xmm0 movdqa 0x4b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x270(%rsp), %xmm0 movdqa %xmm1, 0x4a0(%rsp) movdqa %xmm0, 0x490(%rsp) movdqa 0x4a0(%rsp), %xmm0 movdqa 0x490(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x180(%rsp) movdqa 0x1b0(%rsp), %xmm1 movdqa 0x260(%rsp), %xmm0 movdqa %xmm1, 0x480(%rsp) movdqa %xmm0, 0x470(%rsp) movdqa 0x480(%rsp), %xmm0 movdqa 0x470(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x170(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x260(%rsp), %xmm0 movdqa %xmm1, 0x460(%rsp) movdqa %xmm0, 0x450(%rsp) movdqa 0x460(%rsp), %xmm0 movdqa 0x450(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x160(%rsp) movdqa 0x190(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x5c0(%rsp) movdqa %xmm0, 0x5b0(%rsp) movdqa 0x5c0(%rsp), %xmm0 movdqa 0x5b0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x150(%rsp) movdqa 0x180(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x5a0(%rsp) movdqa %xmm0, 0x590(%rsp) movdqa 0x5a0(%rsp), %xmm0 movdqa 0x590(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x140(%rsp) movdqa 0x170(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x580(%rsp) movdqa %xmm0, 0x570(%rsp) movdqa 0x580(%rsp), %xmm0 movdqa 0x570(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x130(%rsp) movdqa 0x160(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x560(%rsp) movdqa %xmm0, 0x550(%rsp) movdqa 0x560(%rsp), %xmm0 movdqa 0x550(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x120(%rsp) movdqa 0x150(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x6c0(%rsp) movl %eax, 0x6bc(%rsp) movdqa 0x6c0(%rsp), %xmm0 movl 0x6bc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x140(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x6a0(%rsp) movl %eax, 0x69c(%rsp) movdqa 0x6a0(%rsp), %xmm0 movl 0x69c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x130(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x680(%rsp) movl %eax, 0x67c(%rsp) movdqa 0x680(%rsp), %xmm0 movl 0x67c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0xf0(%rsp) movdqa 0x120(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x660(%rsp) movl %eax, 0x65c(%rsp) movdqa 0x660(%rsp), %xmm0 movl 0x65c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0xe0(%rsp) movdqa 0x110(%rsp), %xmm1 movdqa 0x100(%rsp), %xmm0 movdqa %xmm1, 0x740(%rsp) movdqa %xmm0, 0x730(%rsp) movdqa 0x740(%rsp), %xmm0 movdqa 0x730(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x1c0(%rsp) movdqa 0xf0(%rsp), %xmm1 movdqa 0xe0(%rsp), %xmm0 movdqa %xmm1, 0x720(%rsp) movdqa %xmm0, 0x710(%rsp) movdqa 0x720(%rsp), %xmm0 movdqa 0x710(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x1d0(%rsp) jmp 0xa4ca5e movaps 0x220(%rsp), %xmm1 movaps 0x230(%rsp), %xmm0 movaps %xmm1, 0x360(%rsp) movaps %xmm0, 0x350(%rsp) movaps 0x360(%rsp), %xmm0 movaps 0x350(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0xd0(%rsp) movaps 0x220(%rsp), %xmm1 movaps 0x230(%rsp), %xmm0 movaps %xmm1, 0x3a0(%rsp) movaps %xmm0, 0x390(%rsp) movaps 0x3a0(%rsp), %xmm0 movaps 0x390(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0xc0(%rsp) movdqa 0xd0(%rsp), %xmm1 movdqa 0x250(%rsp), %xmm0 movdqa %xmm1, 0x440(%rsp) movdqa %xmm0, 0x430(%rsp) movdqa 0x440(%rsp), %xmm0 movdqa 0x430(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0xb0(%rsp) movdqa 0xc0(%rsp), %xmm1 movdqa 0x250(%rsp), %xmm0 movdqa %xmm1, 0x420(%rsp) movdqa %xmm0, 0x410(%rsp) movdqa 0x420(%rsp), %xmm0 movdqa 0x410(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0xa0(%rsp) movdqa 0xd0(%rsp), %xmm1 movdqa 0x240(%rsp), %xmm0 movdqa %xmm1, 0x400(%rsp) movdqa %xmm0, 0x3f0(%rsp) movdqa 0x400(%rsp), %xmm0 movdqa 0x3f0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x90(%rsp) movdqa 0xc0(%rsp), %xmm1 movdqa 0x240(%rsp), %xmm0 movdqa %xmm1, 0x3e0(%rsp) movdqa %xmm0, 0x3d0(%rsp) movdqa 0x3e0(%rsp), %xmm0 movdqa 0x3d0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x80(%rsp) movdqa 0xb0(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x540(%rsp) movdqa %xmm0, 0x530(%rsp) movdqa 0x540(%rsp), %xmm0 movdqa 0x530(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x70(%rsp) movdqa 0xa0(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x520(%rsp) movdqa %xmm0, 0x510(%rsp) movdqa 0x520(%rsp), %xmm0 movdqa 0x510(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x60(%rsp) movdqa 0x90(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x500(%rsp) movdqa %xmm0, 0x4f0(%rsp) movdqa 0x500(%rsp), %xmm0 movdqa 0x4f0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x50(%rsp) movdqa 0x80(%rsp), %xmm1 movdqa 0x280(%rsp), %xmm0 movdqa %xmm1, 0x4e0(%rsp) movdqa %xmm0, 0x4d0(%rsp) movdqa 0x4e0(%rsp), %xmm0 movdqa 0x4d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x70(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x640(%rsp) movl %eax, 0x63c(%rsp) movdqa 0x640(%rsp), %xmm0 movl 0x63c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x60(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x620(%rsp) movl %eax, 0x61c(%rsp) movdqa 0x620(%rsp), %xmm0 movl 0x61c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x50(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x600(%rsp) movl %eax, 0x5fc(%rsp) movdqa 0x600(%rsp), %xmm0 movl 0x5fc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x10(%rsp) movdqa 0x40(%rsp), %xmm0 movsbl 0x2a7(%rsp), %eax movdqa %xmm0, 0x5e0(%rsp) movl %eax, 0x5dc(%rsp) movdqa 0x5e0(%rsp), %xmm0 movl 0x5dc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, (%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x20(%rsp), %xmm0 movdqa %xmm1, 0x700(%rsp) movdqa %xmm0, 0x6f0(%rsp) movdqa 0x700(%rsp), %xmm0 movdqa 0x6f0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x1e0(%rsp) movdqa 0x10(%rsp), %xmm1 movdqa (%rsp), %xmm0 movdqa %xmm1, 0x6e0(%rsp) movdqa %xmm0, 0x6d0(%rsp) movdqa 0x6e0(%rsp), %xmm0 movdqa 0x6d0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movdqa 0x1c0(%rsp), %xmm0 movq 0x2a8(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x1e0(%rsp), %xmm0 movq 0x2a8(%rsp), %rax movdqa %xmm0, 0x10(%rax) movdqa 0x1d0(%rsp), %xmm0 movq 0x2a8(%rsp), %rax movdqa %xmm0, 0x20(%rax) movdqa 0x1f0(%rsp), %xmm0 movq 0x2a8(%rsp), %rax movdqa %xmm0, 0x30(%rax) addq $0x7f8, %rsp # imm = 0x7F8 retq nopw (%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
fidentity8x4_new_sse2
static inline void fidentity8x4_new_sse2(const __m128i *const input, __m128i *const output, const int8_t cos_bit) { (void)cos_bit; const __m128i one = _mm_set1_epi16(1); for (int i = 0; i < 4; ++i) { const __m128i a_lo = _mm_unpacklo_epi16(input[i], one); const __m128i a_hi = _mm_unpackhi_epi16(input[i], one); const __m128i b_lo = scale_round_sse2(a_lo, NewSqrt2); const __m128i b_hi = scale_round_sse2(a_hi, NewSqrt2); output[i] = _mm_packs_epi32(b_lo, b_hi); } }
subq $0x128, %rsp # imm = 0x128 movb %dl, %al movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movb %al, 0x7f(%rsp) movw $0x1, 0xfe(%rsp) movw 0xfe(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0x126(%rsp) movw %ax, 0x124(%rsp) movw %ax, 0x122(%rsp) movw %ax, 0x120(%rsp) movw %ax, 0x11e(%rsp) movw %ax, 0x11c(%rsp) movw %ax, 0x11a(%rsp) movw %ax, 0x118(%rsp) movzwl 0x126(%rsp), %eax movd %eax, %xmm1 movzwl 0x124(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x122(%rsp), %eax movd %eax, %xmm2 movzwl 0x120(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x11e(%rsp), %eax movd %eax, %xmm0 movzwl 0x11c(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x11a(%rsp), %eax movd %eax, %xmm3 movzwl 0x118(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x100(%rsp) movdqa 0x100(%rsp), %xmm0 movdqa %xmm0, 0x60(%rsp) movl $0x0, 0x5c(%rsp) cmpl $0x4, 0x5c(%rsp) jge 0xa4e3aa movq 0x88(%rsp), %rax movslq 0x5c(%rsp), %rcx shlq $0x4, %rcx movaps (%rax,%rcx), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x40(%rsp) movq 0x88(%rsp), %rax movslq 0x5c(%rsp), %rcx shlq $0x4, %rcx movaps (%rax,%rcx), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x30(%rsp) movdqa 0x40(%rsp), %xmm0 movl $0x16a1, %edi # imm = 0x16A1 callq 0xa49e60 movdqa %xmm0, 0x20(%rsp) movdqa 0x30(%rsp), %xmm0 movl $0x16a1, %edi # imm = 0x16A1 callq 0xa49e60 movdqa %xmm0, 0x10(%rsp) movdqa 0x20(%rsp), %xmm1 movdqa 0x10(%rsp), %xmm0 movdqa %xmm1, 0xe0(%rsp) movdqa %xmm0, 0xd0(%rsp) movdqa 0xe0(%rsp), %xmm0 movdqa 0xd0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0x80(%rsp), %rax movslq 0x5c(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl 0x5c(%rsp), %eax addl $0x1, %eax movl %eax, 0x5c(%rsp) jmp 0xa4e290 addq $0x128, %rsp # imm = 0x128 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
store_rect_16bit_to_32bit
static inline void store_rect_16bit_to_32bit(const __m128i a, int32_t *const b) { const __m128i one = _mm_set1_epi16(1); const __m128i a_lo = _mm_unpacklo_epi16(a, one); const __m128i a_hi = _mm_unpackhi_epi16(a, one); const __m128i b_lo = scale_round_sse2(a_lo, NewSqrt2); const __m128i b_hi = scale_round_sse2(a_hi, NewSqrt2); _mm_store_si128((__m128i *)b, b_lo); _mm_store_si128((__m128i *)(b + 4), b_hi); }
subq $0x118, %rsp # imm = 0x118 movaps %xmm0, 0x60(%rsp) movq %rdi, 0x58(%rsp) movw $0x1, 0xbe(%rsp) movw 0xbe(%rsp), %ax movw %ax, 0xde(%rsp) movw %ax, 0xdc(%rsp) movw %ax, 0xda(%rsp) movw %ax, 0xd8(%rsp) movw %ax, 0xd6(%rsp) movw %ax, 0xd4(%rsp) movw %ax, 0xd2(%rsp) movw %ax, 0xd0(%rsp) movzwl 0xde(%rsp), %eax movd %eax, %xmm1 movzwl 0xdc(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0xda(%rsp), %eax movd %eax, %xmm2 movzwl 0xd8(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0xd6(%rsp), %eax movd %eax, %xmm0 movzwl 0xd4(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0xd2(%rsp), %eax movd %eax, %xmm3 movzwl 0xd0(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0xc0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps %xmm0, 0x40(%rsp) movaps 0x60(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0x80(%rsp) movaps %xmm0, 0x70(%rsp) movaps 0x80(%rsp), %xmm0 movaps 0x70(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x30(%rsp) movaps 0x60(%rsp), %xmm1 movaps 0x40(%rsp), %xmm0 movaps %xmm1, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x20(%rsp) movdqa 0x30(%rsp), %xmm0 movl $0x16a1, %edi # imm = 0x16A1 callq 0xa49e60 movdqa %xmm0, 0x10(%rsp) movdqa 0x20(%rsp), %xmm0 movl $0x16a1, %edi # imm = 0x16A1 callq 0xa49e60 movdqa %xmm0, (%rsp) movq 0x58(%rsp), %rax movdqa 0x10(%rsp), %xmm0 movq %rax, 0x110(%rsp) movdqa %xmm0, 0x100(%rsp) movdqa 0x100(%rsp), %xmm0 movq 0x110(%rsp), %rax movdqa %xmm0, (%rax) movq 0x58(%rsp), %rax addq $0x10, %rax movdqa (%rsp), %xmm0 movq %rax, 0xf8(%rsp) movdqa %xmm0, 0xe0(%rsp) movdqa 0xe0(%rsp), %xmm0 movq 0xf8(%rsp), %rax movdqa %xmm0, (%rax) addq $0x118, %rsp # imm = 0x118 retq nopl (%rax)
/m-ab-s[P]aom/av1/common/x86/av1_txfm_sse2.h
fidentity8x16_new_sse2
static inline void fidentity8x16_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { (void)cos_bit; const __m128i one = _mm_set1_epi16(1); for (int i = 0; i < 16; ++i) { const __m128i a_lo = _mm_unpacklo_epi16(input[i], one); const __m128i a_hi = _mm_unpackhi_epi16(input[i], one); const __m128i b_lo = scale_round_sse2(a_lo, 2 * NewSqrt2); const __m128i b_hi = scale_round_sse2(a_hi, 2 * NewSqrt2); output[i] = _mm_packs_epi32(b_lo, b_hi); } }
subq $0x128, %rsp # imm = 0x128 movb %dl, %al movq %rdi, 0x88(%rsp) movq %rsi, 0x80(%rsp) movb %al, 0x7f(%rsp) movw $0x1, 0xfe(%rsp) movw 0xfe(%rsp), %ax movw %ax, 0xe(%rsp) movw %ax, 0x126(%rsp) movw %ax, 0x124(%rsp) movw %ax, 0x122(%rsp) movw %ax, 0x120(%rsp) movw %ax, 0x11e(%rsp) movw %ax, 0x11c(%rsp) movw %ax, 0x11a(%rsp) movw %ax, 0x118(%rsp) movzwl 0x126(%rsp), %eax movd %eax, %xmm1 movzwl 0x124(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movzwl 0x122(%rsp), %eax movd %eax, %xmm2 movzwl 0x120(%rsp), %eax movd %eax, %xmm1 punpcklwd %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movzwl 0x11e(%rsp), %eax movd %eax, %xmm0 movzwl 0x11c(%rsp), %eax movd %eax, %xmm2 punpcklwd %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] movzwl 0x11a(%rsp), %eax movd %eax, %xmm3 movzwl 0x118(%rsp), %eax movd %eax, %xmm0 punpcklwd %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x100(%rsp) movdqa 0x100(%rsp), %xmm0 movdqa %xmm0, 0x60(%rsp) movl $0x0, 0x5c(%rsp) cmpl $0x10, 0x5c(%rsp) jge 0xa55c4a movq 0x88(%rsp), %rax movslq 0x5c(%rsp), %rcx shlq $0x4, %rcx movaps (%rax,%rcx), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0xa0(%rsp) movaps %xmm0, 0x90(%rsp) movaps 0xa0(%rsp), %xmm0 movaps 0x90(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x40(%rsp) movq 0x88(%rsp), %rax movslq 0x5c(%rsp), %rcx shlq $0x4, %rcx movaps (%rax,%rcx), %xmm1 movaps 0x60(%rsp), %xmm0 movaps %xmm1, 0xc0(%rsp) movaps %xmm0, 0xb0(%rsp) movaps 0xc0(%rsp), %xmm0 movaps 0xb0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x30(%rsp) movdqa 0x40(%rsp), %xmm0 movl $0x2d42, %edi # imm = 0x2D42 callq 0xa49e60 movdqa %xmm0, 0x20(%rsp) movdqa 0x30(%rsp), %xmm0 movl $0x2d42, %edi # imm = 0x2D42 callq 0xa49e60 movdqa %xmm0, 0x10(%rsp) movdqa 0x20(%rsp), %xmm1 movdqa 0x10(%rsp), %xmm0 movdqa %xmm1, 0xe0(%rsp) movdqa %xmm0, 0xd0(%rsp) movdqa 0xe0(%rsp), %xmm0 movdqa 0xd0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0x80(%rsp), %rax movslq 0x5c(%rsp), %rcx shlq $0x4, %rcx addq %rcx, %rax movdqa %xmm0, (%rax) movl 0x5c(%rsp), %eax addl $0x1, %eax movl %eax, 0x5c(%rsp) jmp 0xa55b30 addq $0x128, %rsp # imm = 0x128 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
store_16bit_to_32bit
static inline void store_16bit_to_32bit(__m128i a, int32_t *b) { const __m128i a_lo = _mm_unpacklo_epi16(a, a); const __m128i a_hi = _mm_unpackhi_epi16(a, a); const __m128i a_1 = _mm_srai_epi32(a_lo, 16); const __m128i a_2 = _mm_srai_epi32(a_hi, 16); _mm_store_si128((__m128i *)b, a_1); _mm_store_si128((__m128i *)(b + 4), a_2); }
subq $0x98, %rsp movaps %xmm0, -0x30(%rsp) movq %rdi, -0x38(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, -0x10(%rsp) movaps %xmm0, -0x20(%rsp) movaps -0x10(%rsp), %xmm0 movaps -0x20(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, -0x50(%rsp) movaps -0x30(%rsp), %xmm0 movaps %xmm0, 0x10(%rsp) movaps %xmm0, (%rsp) movaps 0x10(%rsp), %xmm0 movaps (%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, -0x60(%rsp) movdqa -0x50(%rsp), %xmm0 movdqa %xmm0, 0x50(%rsp) movl $0x10, 0x4c(%rsp) movdqa 0x50(%rsp), %xmm0 movl 0x4c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, -0x70(%rsp) movdqa -0x60(%rsp), %xmm0 movdqa %xmm0, 0x30(%rsp) movl $0x10, 0x2c(%rsp) movdqa 0x30(%rsp), %xmm0 movl 0x2c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, -0x80(%rsp) movq -0x38(%rsp), %rax movdqa -0x70(%rsp), %xmm0 movq %rax, 0x90(%rsp) movdqa %xmm0, 0x80(%rsp) movdqa 0x80(%rsp), %xmm0 movq 0x90(%rsp), %rax movdqa %xmm0, (%rax) movq -0x38(%rsp), %rax addq $0x10, %rax movdqa -0x80(%rsp), %xmm0 movq %rax, 0x78(%rsp) movdqa %xmm0, 0x60(%rsp) movdqa 0x60(%rsp), %xmm0 movq 0x78(%rsp), %rax movdqa %xmm0, (%rax) addq $0x98, %rsp retq nop
/m-ab-s[P]aom/av1/common/x86/av1_txfm_sse2.h
fadst8x8_new_sse2
static inline void fadst8x8_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) { const int32_t *cospi = cospi_arr(cos_bit); const __m128i __zero = _mm_setzero_si128(); const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1)); const __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]); const __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]); const __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]); const __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]); const __m128i cospi_m48_p16 = pair_set_epi16(-cospi[48], cospi[16]); const __m128i cospi_p04_p60 = pair_set_epi16(cospi[4], cospi[60]); const __m128i cospi_p60_m04 = pair_set_epi16(cospi[60], -cospi[4]); const __m128i cospi_p20_p44 = pair_set_epi16(cospi[20], cospi[44]); const __m128i cospi_p44_m20 = pair_set_epi16(cospi[44], -cospi[20]); const __m128i cospi_p36_p28 = pair_set_epi16(cospi[36], cospi[28]); const __m128i cospi_p28_m36 = pair_set_epi16(cospi[28], -cospi[36]); const __m128i cospi_p52_p12 = pair_set_epi16(cospi[52], cospi[12]); const __m128i cospi_p12_m52 = pair_set_epi16(cospi[12], -cospi[52]); // stage 1 __m128i x1[8]; x1[0] = input[0]; x1[1] = _mm_subs_epi16(__zero, input[7]); x1[2] = _mm_subs_epi16(__zero, input[3]); x1[3] = input[4]; x1[4] = _mm_subs_epi16(__zero, input[1]); x1[5] = input[6]; x1[6] = input[2]; x1[7] = _mm_subs_epi16(__zero, input[5]); // stage 2 __m128i x2[8]; x2[0] = x1[0]; x2[1] = x1[1]; btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[2], x1[3], x2[2], x2[3]); x2[4] = x1[4]; x2[5] = x1[5]; btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[6], x1[7], x2[6], x2[7]); // stage 3 __m128i x3[8]; x3[0] = _mm_adds_epi16(x2[0], x2[2]); x3[2] = _mm_subs_epi16(x2[0], x2[2]); x3[1] = _mm_adds_epi16(x2[1], x2[3]); x3[3] = _mm_subs_epi16(x2[1], x2[3]); x3[4] = _mm_adds_epi16(x2[4], x2[6]); x3[6] = _mm_subs_epi16(x2[4], x2[6]); x3[5] = _mm_adds_epi16(x2[5], x2[7]); x3[7] = _mm_subs_epi16(x2[5], x2[7]); // stage 4 __m128i x4[8]; x4[0] = x3[0]; x4[1] = x3[1]; x4[2] = x3[2]; x4[3] = x3[3]; btf_16_sse2(cospi_p16_p48, cospi_p48_m16, x3[4], x3[5], x4[4], x4[5]); btf_16_sse2(cospi_m48_p16, cospi_p16_p48, x3[6], x3[7], x4[6], x4[7]); // stage 5, 6 and 7 output[7] = _mm_adds_epi16(x4[0], x4[4]); output[3] = _mm_subs_epi16(x4[0], x4[4]); output[0] = _mm_adds_epi16(x4[1], x4[5]); output[4] = _mm_subs_epi16(x4[1], x4[5]); output[5] = _mm_adds_epi16(x4[2], x4[6]); output[1] = _mm_subs_epi16(x4[2], x4[6]); output[2] = _mm_adds_epi16(x4[3], x4[7]); output[6] = _mm_subs_epi16(x4[3], x4[7]); btf_16_sse2(cospi_p04_p60, cospi_p60_m04, output[7], output[0], output[7], output[0]); btf_16_sse2(cospi_p20_p44, cospi_p44_m20, output[5], output[2], output[5], output[2]); btf_16_sse2(cospi_p36_p28, cospi_p28_m36, output[3], output[4], output[3], output[4]); btf_16_sse2(cospi_p52_p12, cospi_p12_m52, output[1], output[6], output[1], output[6]); }
subq $0x1ea8, %rsp # imm = 0x1EA8 movb %dl, %al movq %rdi, 0xa10(%rsp) movq %rsi, 0xa08(%rsp) movb %al, 0xa07(%rsp) movsbl 0xa07(%rsp), %edi callq 0xa21c60 movq %rax, 0x9f8(%rsp) xorps %xmm0, %xmm0 movaps %xmm0, 0x1e90(%rsp) movaps 0x1e90(%rsp), %xmm0 movaps %xmm0, 0x9e0(%rsp) movb 0xa07(%rsp), %cl decb %cl movl $0x1, %eax shll %cl, %eax movl %eax, 0xa4c(%rsp) movl 0xa4c(%rsp), %eax movl %eax, 0x1cec(%rsp) movl %eax, 0x1ce8(%rsp) movl %eax, 0x1ce4(%rsp) movl %eax, 0x1ce0(%rsp) movd 0x1cec(%rsp), %xmm0 movd 0x1ce8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1ce4(%rsp), %xmm2 movd 0x1ce0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1cd0(%rsp) movaps 0x1cd0(%rsp), %xmm0 movaps %xmm0, 0x9d0(%rsp) movq 0x9f8(%rsp), %rax movzwl 0x80(%rax), %eax movl %eax, %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa48(%rsp) movl 0xa48(%rsp), %eax movl %eax, 0x1d0c(%rsp) movl %eax, 0x1d08(%rsp) movl %eax, 0x1d04(%rsp) movl %eax, 0x1d00(%rsp) movd 0x1d0c(%rsp), %xmm0 movd 0x1d08(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1d04(%rsp), %xmm2 movd 0x1d00(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1cf0(%rsp) movaps 0x1cf0(%rsp), %xmm0 movaps %xmm0, 0x9c0(%rsp) movq 0x9f8(%rsp), %rax movl 0x80(%rax), %ecx movw %cx, %ax movzwl %ax, %eax shll $0x10, %ecx subl %ecx, %eax movl %eax, 0xa44(%rsp) movl 0xa44(%rsp), %eax movl %eax, 0x1d2c(%rsp) movl %eax, 0x1d28(%rsp) movl %eax, 0x1d24(%rsp) movl %eax, 0x1d20(%rsp) movd 0x1d2c(%rsp), %xmm0 movd 0x1d28(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1d24(%rsp), %xmm2 movd 0x1d20(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1d10(%rsp) movaps 0x1d10(%rsp), %xmm0 movaps %xmm0, 0x9b0(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0x40(%rcx), %eax movl 0xc0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa40(%rsp) movl 0xa40(%rsp), %eax movl %eax, 0x1d4c(%rsp) movl %eax, 0x1d48(%rsp) movl %eax, 0x1d44(%rsp) movl %eax, 0x1d40(%rsp) movd 0x1d4c(%rsp), %xmm0 movd 0x1d48(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1d44(%rsp), %xmm2 movd 0x1d40(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1d30(%rsp) movaps 0x1d30(%rsp), %xmm0 movaps %xmm0, 0x9a0(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0xc0(%rcx), %eax movl 0x40(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0xa3c(%rsp) movl 0xa3c(%rsp), %eax movl %eax, 0x1d6c(%rsp) movl %eax, 0x1d68(%rsp) movl %eax, 0x1d64(%rsp) movl %eax, 0x1d60(%rsp) movd 0x1d6c(%rsp), %xmm0 movd 0x1d68(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1d64(%rsp), %xmm2 movd 0x1d60(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1d50(%rsp) movaps 0x1d50(%rsp), %xmm0 movaps %xmm0, 0x990(%rsp) movq 0x9f8(%rsp), %rax movl 0x40(%rax), %ecx movl 0xc0(%rax), %eax negl %eax movzwl %ax, %eax shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa38(%rsp) movl 0xa38(%rsp), %eax movl %eax, 0x1d8c(%rsp) movl %eax, 0x1d88(%rsp) movl %eax, 0x1d84(%rsp) movl %eax, 0x1d80(%rsp) movd 0x1d8c(%rsp), %xmm0 movd 0x1d88(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1d84(%rsp), %xmm2 movd 0x1d80(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1d70(%rsp) movaps 0x1d70(%rsp), %xmm0 movaps %xmm0, 0x980(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0x10(%rcx), %eax movl 0xf0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa34(%rsp) movl 0xa34(%rsp), %eax movl %eax, 0x1dac(%rsp) movl %eax, 0x1da8(%rsp) movl %eax, 0x1da4(%rsp) movl %eax, 0x1da0(%rsp) movd 0x1dac(%rsp), %xmm0 movd 0x1da8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1da4(%rsp), %xmm2 movd 0x1da0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1d90(%rsp) movaps 0x1d90(%rsp), %xmm0 movaps %xmm0, 0x970(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0xf0(%rcx), %eax movl 0x10(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0xa30(%rsp) movl 0xa30(%rsp), %eax movl %eax, 0x1dcc(%rsp) movl %eax, 0x1dc8(%rsp) movl %eax, 0x1dc4(%rsp) movl %eax, 0x1dc0(%rsp) movd 0x1dcc(%rsp), %xmm0 movd 0x1dc8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1dc4(%rsp), %xmm2 movd 0x1dc0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1db0(%rsp) movaps 0x1db0(%rsp), %xmm0 movaps %xmm0, 0x960(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0x50(%rcx), %eax movl 0xb0(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa2c(%rsp) movl 0xa2c(%rsp), %eax movl %eax, 0x1dec(%rsp) movl %eax, 0x1de8(%rsp) movl %eax, 0x1de4(%rsp) movl %eax, 0x1de0(%rsp) movd 0x1dec(%rsp), %xmm0 movd 0x1de8(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1de4(%rsp), %xmm2 movd 0x1de0(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1dd0(%rsp) movaps 0x1dd0(%rsp), %xmm0 movaps %xmm0, 0x950(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0xb0(%rcx), %eax movl 0x50(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0xa28(%rsp) movl 0xa28(%rsp), %eax movl %eax, 0x1e0c(%rsp) movl %eax, 0x1e08(%rsp) movl %eax, 0x1e04(%rsp) movl %eax, 0x1e00(%rsp) movd 0x1e0c(%rsp), %xmm0 movd 0x1e08(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1e04(%rsp), %xmm2 movd 0x1e00(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1df0(%rsp) movaps 0x1df0(%rsp), %xmm0 movaps %xmm0, 0x940(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0x90(%rcx), %eax movl 0x70(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa24(%rsp) movl 0xa24(%rsp), %eax movl %eax, 0x1e2c(%rsp) movl %eax, 0x1e28(%rsp) movl %eax, 0x1e24(%rsp) movl %eax, 0x1e20(%rsp) movd 0x1e2c(%rsp), %xmm0 movd 0x1e28(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1e24(%rsp), %xmm2 movd 0x1e20(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1e10(%rsp) movaps 0x1e10(%rsp), %xmm0 movaps %xmm0, 0x930(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0x70(%rcx), %eax movl 0x90(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0xa20(%rsp) movl 0xa20(%rsp), %eax movl %eax, 0x1e4c(%rsp) movl %eax, 0x1e48(%rsp) movl %eax, 0x1e44(%rsp) movl %eax, 0x1e40(%rsp) movd 0x1e4c(%rsp), %xmm0 movd 0x1e48(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1e44(%rsp), %xmm2 movd 0x1e40(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1e30(%rsp) movaps 0x1e30(%rsp), %xmm0 movaps %xmm0, 0x920(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0xd0(%rcx), %eax movl 0x30(%rcx), %ecx shll $0x10, %ecx orl %ecx, %eax movl %eax, 0xa1c(%rsp) movl 0xa1c(%rsp), %eax movl %eax, 0x1e6c(%rsp) movl %eax, 0x1e68(%rsp) movl %eax, 0x1e64(%rsp) movl %eax, 0x1e60(%rsp) movd 0x1e6c(%rsp), %xmm0 movd 0x1e68(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1e64(%rsp), %xmm2 movd 0x1e60(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movaps %xmm0, 0x1e50(%rsp) movaps 0x1e50(%rsp), %xmm0 movaps %xmm0, 0x910(%rsp) movq 0x9f8(%rsp), %rcx movzwl 0x30(%rcx), %eax movl 0xd0(%rcx), %ecx shll $0x10, %ecx subl %ecx, %eax movl %eax, 0xa18(%rsp) movl 0xa18(%rsp), %eax movl %eax, 0x1e8c(%rsp) movl %eax, 0x1e88(%rsp) movl %eax, 0x1e84(%rsp) movl %eax, 0x1e80(%rsp) movd 0x1e8c(%rsp), %xmm0 movd 0x1e88(%rsp), %xmm1 punpckldq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] movd 0x1e84(%rsp), %xmm2 movd 0x1e80(%rsp), %xmm0 punpckldq %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] punpcklqdq %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0] movdqa %xmm0, 0x1e70(%rsp) movdqa 0x1e70(%rsp), %xmm0 movdqa %xmm0, 0x900(%rsp) movq 0xa10(%rsp), %rax movdqa (%rax), %xmm0 movdqa %xmm0, 0x880(%rsp) movdqa 0x9e0(%rsp), %xmm1 movq 0xa10(%rsp), %rax movdqa 0x70(%rax), %xmm0 movdqa %xmm1, 0xcc0(%rsp) movdqa %xmm0, 0xcb0(%rsp) movdqa 0xcc0(%rsp), %xmm0 movdqa 0xcb0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x890(%rsp) movdqa 0x9e0(%rsp), %xmm1 movq 0xa10(%rsp), %rax movdqa 0x30(%rax), %xmm0 movdqa %xmm1, 0xca0(%rsp) movdqa %xmm0, 0xc90(%rsp) movdqa 0xca0(%rsp), %xmm0 movdqa 0xc90(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x8a0(%rsp) movq 0xa10(%rsp), %rax movdqa 0x40(%rax), %xmm0 movdqa %xmm0, 0x8b0(%rsp) movdqa 0x9e0(%rsp), %xmm1 movq 0xa10(%rsp), %rax movdqa 0x10(%rax), %xmm0 movdqa %xmm1, 0xc80(%rsp) movdqa %xmm0, 0xc70(%rsp) movdqa 0xc80(%rsp), %xmm0 movdqa 0xc70(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x8c0(%rsp) movq 0xa10(%rsp), %rax movdqa 0x60(%rax), %xmm0 movdqa %xmm0, 0x8d0(%rsp) movq 0xa10(%rsp), %rax movdqa 0x20(%rax), %xmm0 movdqa %xmm0, 0x8e0(%rsp) movdqa 0x9e0(%rsp), %xmm1 movq 0xa10(%rsp), %rax movdqa 0x50(%rax), %xmm0 movdqa %xmm1, 0xc60(%rsp) movdqa %xmm0, 0xc50(%rsp) movdqa 0xc60(%rsp), %xmm0 movdqa 0xc50(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x8f0(%rsp) movdqa 0x880(%rsp), %xmm0 movdqa %xmm0, 0x800(%rsp) movdqa 0x890(%rsp), %xmm0 movdqa %xmm0, 0x810(%rsp) movaps 0x8a0(%rsp), %xmm1 movaps 0x8b0(%rsp), %xmm0 movaps %xmm1, 0xdc0(%rsp) movaps %xmm0, 0xdb0(%rsp) movaps 0xdc0(%rsp), %xmm0 movaps 0xdb0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x7f0(%rsp) movaps 0x8a0(%rsp), %xmm1 movaps 0x8b0(%rsp), %xmm0 movaps %xmm1, 0xec0(%rsp) movaps %xmm0, 0xeb0(%rsp) movaps 0xec0(%rsp), %xmm0 movaps 0xeb0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x7e0(%rsp) movdqa 0x7f0(%rsp), %xmm1 movdqa 0x9c0(%rsp), %xmm0 movdqa %xmm1, 0x12c0(%rsp) movdqa %xmm0, 0x12b0(%rsp) movdqa 0x12c0(%rsp), %xmm0 movdqa 0x12b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x7d0(%rsp) movdqa 0x7e0(%rsp), %xmm1 movdqa 0x9c0(%rsp), %xmm0 movdqa %xmm1, 0x12a0(%rsp) movdqa %xmm0, 0x1290(%rsp) movdqa 0x12a0(%rsp), %xmm0 movdqa 0x1290(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x7c0(%rsp) movdqa 0x7f0(%rsp), %xmm1 movdqa 0x9b0(%rsp), %xmm0 movdqa %xmm1, 0x1280(%rsp) movdqa %xmm0, 0x1270(%rsp) movdqa 0x1280(%rsp), %xmm0 movdqa 0x1270(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x7b0(%rsp) movdqa 0x7e0(%rsp), %xmm1 movdqa 0x9b0(%rsp), %xmm0 movdqa %xmm1, 0x1260(%rsp) movdqa %xmm0, 0x1250(%rsp) movdqa 0x1260(%rsp), %xmm0 movdqa 0x1250(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x7a0(%rsp) movdqa 0x7d0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x16c0(%rsp) movdqa %xmm0, 0x16b0(%rsp) movdqa 0x16c0(%rsp), %xmm0 movdqa 0x16b0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x790(%rsp) movdqa 0x7c0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x16a0(%rsp) movdqa %xmm0, 0x1690(%rsp) movdqa 0x16a0(%rsp), %xmm0 movdqa 0x1690(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x780(%rsp) movdqa 0x7b0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1680(%rsp) movdqa %xmm0, 0x1670(%rsp) movdqa 0x1680(%rsp), %xmm0 movdqa 0x1670(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x770(%rsp) movdqa 0x7a0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1660(%rsp) movdqa %xmm0, 0x1650(%rsp) movdqa 0x1660(%rsp), %xmm0 movdqa 0x1650(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x760(%rsp) movdqa 0x790(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1ac0(%rsp) movl %eax, 0x1abc(%rsp) movdqa 0x1ac0(%rsp), %xmm0 movl 0x1abc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x750(%rsp) movdqa 0x780(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1aa0(%rsp) movl %eax, 0x1a9c(%rsp) movdqa 0x1aa0(%rsp), %xmm0 movl 0x1a9c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x740(%rsp) movdqa 0x770(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1a80(%rsp) movl %eax, 0x1a7c(%rsp) movdqa 0x1a80(%rsp), %xmm0 movl 0x1a7c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x730(%rsp) movdqa 0x760(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1a60(%rsp) movl %eax, 0x1a5c(%rsp) movdqa 0x1a60(%rsp), %xmm0 movl 0x1a5c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x720(%rsp) movdqa 0x750(%rsp), %xmm1 movdqa 0x740(%rsp), %xmm0 movdqa %xmm1, 0x1cc0(%rsp) movdqa %xmm0, 0x1cb0(%rsp) movdqa 0x1cc0(%rsp), %xmm0 movdqa 0x1cb0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x820(%rsp) movdqa 0x730(%rsp), %xmm1 movdqa 0x720(%rsp), %xmm0 movdqa %xmm1, 0x1ca0(%rsp) movdqa %xmm0, 0x1c90(%rsp) movdqa 0x1ca0(%rsp), %xmm0 movdqa 0x1c90(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x830(%rsp) movdqa 0x8c0(%rsp), %xmm0 movdqa %xmm0, 0x840(%rsp) movdqa 0x8d0(%rsp), %xmm0 movdqa %xmm0, 0x850(%rsp) movaps 0x8e0(%rsp), %xmm1 movaps 0x8f0(%rsp), %xmm0 movaps %xmm1, 0xda0(%rsp) movaps %xmm0, 0xd90(%rsp) movaps 0xda0(%rsp), %xmm0 movaps 0xd90(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x710(%rsp) movaps 0x8e0(%rsp), %xmm1 movaps 0x8f0(%rsp), %xmm0 movaps %xmm1, 0xea0(%rsp) movaps %xmm0, 0xe90(%rsp) movaps 0xea0(%rsp), %xmm0 movaps 0xe90(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x700(%rsp) movdqa 0x710(%rsp), %xmm1 movdqa 0x9c0(%rsp), %xmm0 movdqa %xmm1, 0x1240(%rsp) movdqa %xmm0, 0x1230(%rsp) movdqa 0x1240(%rsp), %xmm0 movdqa 0x1230(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x6f0(%rsp) movdqa 0x700(%rsp), %xmm1 movdqa 0x9c0(%rsp), %xmm0 movdqa %xmm1, 0x1220(%rsp) movdqa %xmm0, 0x1210(%rsp) movdqa 0x1220(%rsp), %xmm0 movdqa 0x1210(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x6e0(%rsp) movdqa 0x710(%rsp), %xmm1 movdqa 0x9b0(%rsp), %xmm0 movdqa %xmm1, 0x1200(%rsp) movdqa %xmm0, 0x11f0(%rsp) movdqa 0x1200(%rsp), %xmm0 movdqa 0x11f0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x6d0(%rsp) movdqa 0x700(%rsp), %xmm1 movdqa 0x9b0(%rsp), %xmm0 movdqa %xmm1, 0x11e0(%rsp) movdqa %xmm0, 0x11d0(%rsp) movdqa 0x11e0(%rsp), %xmm0 movdqa 0x11d0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x6c0(%rsp) movdqa 0x6f0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1640(%rsp) movdqa %xmm0, 0x1630(%rsp) movdqa 0x1640(%rsp), %xmm0 movdqa 0x1630(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x6b0(%rsp) movdqa 0x6e0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1620(%rsp) movdqa %xmm0, 0x1610(%rsp) movdqa 0x1620(%rsp), %xmm0 movdqa 0x1610(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x6a0(%rsp) movdqa 0x6d0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1600(%rsp) movdqa %xmm0, 0x15f0(%rsp) movdqa 0x1600(%rsp), %xmm0 movdqa 0x15f0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x690(%rsp) movdqa 0x6c0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x15e0(%rsp) movdqa %xmm0, 0x15d0(%rsp) movdqa 0x15e0(%rsp), %xmm0 movdqa 0x15d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x680(%rsp) movdqa 0x6b0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1a40(%rsp) movl %eax, 0x1a3c(%rsp) movdqa 0x1a40(%rsp), %xmm0 movl 0x1a3c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x670(%rsp) movdqa 0x6a0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1a20(%rsp) movl %eax, 0x1a1c(%rsp) movdqa 0x1a20(%rsp), %xmm0 movl 0x1a1c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x660(%rsp) movdqa 0x690(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1a00(%rsp) movl %eax, 0x19fc(%rsp) movdqa 0x1a00(%rsp), %xmm0 movl 0x19fc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x650(%rsp) movdqa 0x680(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x19e0(%rsp) movl %eax, 0x19dc(%rsp) movdqa 0x19e0(%rsp), %xmm0 movl 0x19dc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x640(%rsp) movdqa 0x670(%rsp), %xmm1 movdqa 0x660(%rsp), %xmm0 movdqa %xmm1, 0x1c80(%rsp) movdqa %xmm0, 0x1c70(%rsp) movdqa 0x1c80(%rsp), %xmm0 movdqa 0x1c70(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x860(%rsp) movdqa 0x650(%rsp), %xmm1 movdqa 0x640(%rsp), %xmm0 movdqa %xmm1, 0x1c60(%rsp) movdqa %xmm0, 0x1c50(%rsp) movdqa 0x1c60(%rsp), %xmm0 movdqa 0x1c50(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x870(%rsp) movdqa 0x800(%rsp), %xmm1 movdqa 0x820(%rsp), %xmm0 movdqa %xmm1, 0xb40(%rsp) movdqa %xmm0, 0xb30(%rsp) movdqa 0xb40(%rsp), %xmm0 movdqa 0xb30(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x5c0(%rsp) movdqa 0x800(%rsp), %xmm1 movdqa 0x820(%rsp), %xmm0 movdqa %xmm1, 0xc40(%rsp) movdqa %xmm0, 0xc30(%rsp) movdqa 0xc40(%rsp), %xmm0 movdqa 0xc30(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x5e0(%rsp) movdqa 0x810(%rsp), %xmm1 movdqa 0x830(%rsp), %xmm0 movdqa %xmm1, 0xb20(%rsp) movdqa %xmm0, 0xb10(%rsp) movdqa 0xb20(%rsp), %xmm0 movdqa 0xb10(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x5d0(%rsp) movdqa 0x810(%rsp), %xmm1 movdqa 0x830(%rsp), %xmm0 movdqa %xmm1, 0xc20(%rsp) movdqa %xmm0, 0xc10(%rsp) movdqa 0xc20(%rsp), %xmm0 movdqa 0xc10(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x5f0(%rsp) movdqa 0x840(%rsp), %xmm1 movdqa 0x860(%rsp), %xmm0 movdqa %xmm1, 0xb00(%rsp) movdqa %xmm0, 0xaf0(%rsp) movdqa 0xb00(%rsp), %xmm0 movdqa 0xaf0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x600(%rsp) movdqa 0x840(%rsp), %xmm1 movdqa 0x860(%rsp), %xmm0 movdqa %xmm1, 0xc00(%rsp) movdqa %xmm0, 0xbf0(%rsp) movdqa 0xc00(%rsp), %xmm0 movdqa 0xbf0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x620(%rsp) movdqa 0x850(%rsp), %xmm1 movdqa 0x870(%rsp), %xmm0 movdqa %xmm1, 0xae0(%rsp) movdqa %xmm0, 0xad0(%rsp) movdqa 0xae0(%rsp), %xmm0 movdqa 0xad0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movdqa %xmm0, 0x610(%rsp) movdqa 0x850(%rsp), %xmm1 movdqa 0x870(%rsp), %xmm0 movdqa %xmm1, 0xbe0(%rsp) movdqa %xmm0, 0xbd0(%rsp) movdqa 0xbe0(%rsp), %xmm0 movdqa 0xbd0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movdqa %xmm0, 0x630(%rsp) movdqa 0x5c0(%rsp), %xmm0 movdqa %xmm0, 0x540(%rsp) movdqa 0x5d0(%rsp), %xmm0 movdqa %xmm0, 0x550(%rsp) movdqa 0x5e0(%rsp), %xmm0 movdqa %xmm0, 0x560(%rsp) movdqa 0x5f0(%rsp), %xmm0 movdqa %xmm0, 0x570(%rsp) movaps 0x600(%rsp), %xmm1 movaps 0x610(%rsp), %xmm0 movaps %xmm1, 0xd80(%rsp) movaps %xmm0, 0xd70(%rsp) movaps 0xd80(%rsp), %xmm0 movaps 0xd70(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x530(%rsp) movaps 0x600(%rsp), %xmm1 movaps 0x610(%rsp), %xmm0 movaps %xmm1, 0xe80(%rsp) movaps %xmm0, 0xe70(%rsp) movaps 0xe80(%rsp), %xmm0 movaps 0xe70(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x520(%rsp) movdqa 0x530(%rsp), %xmm1 movdqa 0x9a0(%rsp), %xmm0 movdqa %xmm1, 0x11c0(%rsp) movdqa %xmm0, 0x11b0(%rsp) movdqa 0x11c0(%rsp), %xmm0 movdqa 0x11b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x510(%rsp) movdqa 0x520(%rsp), %xmm1 movdqa 0x9a0(%rsp), %xmm0 movdqa %xmm1, 0x11a0(%rsp) movdqa %xmm0, 0x1190(%rsp) movdqa 0x11a0(%rsp), %xmm0 movdqa 0x1190(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x500(%rsp) movdqa 0x530(%rsp), %xmm1 movdqa 0x990(%rsp), %xmm0 movdqa %xmm1, 0x1180(%rsp) movdqa %xmm0, 0x1170(%rsp) movdqa 0x1180(%rsp), %xmm0 movdqa 0x1170(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x4f0(%rsp) movdqa 0x520(%rsp), %xmm1 movdqa 0x990(%rsp), %xmm0 movdqa %xmm1, 0x1160(%rsp) movdqa %xmm0, 0x1150(%rsp) movdqa 0x1160(%rsp), %xmm0 movdqa 0x1150(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x4e0(%rsp) movdqa 0x510(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x15c0(%rsp) movdqa %xmm0, 0x15b0(%rsp) movdqa 0x15c0(%rsp), %xmm0 movdqa 0x15b0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4d0(%rsp) movdqa 0x500(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x15a0(%rsp) movdqa %xmm0, 0x1590(%rsp) movdqa 0x15a0(%rsp), %xmm0 movdqa 0x1590(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4c0(%rsp) movdqa 0x4f0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1580(%rsp) movdqa %xmm0, 0x1570(%rsp) movdqa 0x1580(%rsp), %xmm0 movdqa 0x1570(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4b0(%rsp) movdqa 0x4e0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1560(%rsp) movdqa %xmm0, 0x1550(%rsp) movdqa 0x1560(%rsp), %xmm0 movdqa 0x1550(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x4a0(%rsp) movdqa 0x4d0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x19c0(%rsp) movl %eax, 0x19bc(%rsp) movdqa 0x19c0(%rsp), %xmm0 movl 0x19bc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x490(%rsp) movdqa 0x4c0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x19a0(%rsp) movl %eax, 0x199c(%rsp) movdqa 0x19a0(%rsp), %xmm0 movl 0x199c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x480(%rsp) movdqa 0x4b0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1980(%rsp) movl %eax, 0x197c(%rsp) movdqa 0x1980(%rsp), %xmm0 movl 0x197c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x470(%rsp) movdqa 0x4a0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1960(%rsp) movl %eax, 0x195c(%rsp) movdqa 0x1960(%rsp), %xmm0 movl 0x195c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x460(%rsp) movdqa 0x490(%rsp), %xmm1 movdqa 0x480(%rsp), %xmm0 movdqa %xmm1, 0x1c40(%rsp) movdqa %xmm0, 0x1c30(%rsp) movdqa 0x1c40(%rsp), %xmm0 movdqa 0x1c30(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x580(%rsp) movdqa 0x470(%rsp), %xmm1 movdqa 0x460(%rsp), %xmm0 movdqa %xmm1, 0x1c20(%rsp) movdqa %xmm0, 0x1c10(%rsp) movdqa 0x1c20(%rsp), %xmm0 movdqa 0x1c10(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x590(%rsp) jmp 0xa59659 movaps 0x620(%rsp), %xmm1 movaps 0x630(%rsp), %xmm0 movaps %xmm1, 0xd60(%rsp) movaps %xmm0, 0xd50(%rsp) movaps 0xd60(%rsp), %xmm0 movaps 0xd50(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x450(%rsp) movaps 0x620(%rsp), %xmm1 movaps 0x630(%rsp), %xmm0 movaps %xmm1, 0xe60(%rsp) movaps %xmm0, 0xe50(%rsp) movaps 0xe60(%rsp), %xmm0 movaps 0xe50(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x440(%rsp) movdqa 0x450(%rsp), %xmm1 movdqa 0x980(%rsp), %xmm0 movdqa %xmm1, 0x1140(%rsp) movdqa %xmm0, 0x1130(%rsp) movdqa 0x1140(%rsp), %xmm0 movdqa 0x1130(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x430(%rsp) movdqa 0x440(%rsp), %xmm1 movdqa 0x980(%rsp), %xmm0 movdqa %xmm1, 0x1120(%rsp) movdqa %xmm0, 0x1110(%rsp) movdqa 0x1120(%rsp), %xmm0 movdqa 0x1110(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x420(%rsp) movdqa 0x450(%rsp), %xmm1 movdqa 0x9a0(%rsp), %xmm0 movdqa %xmm1, 0x1100(%rsp) movdqa %xmm0, 0x10f0(%rsp) movdqa 0x1100(%rsp), %xmm0 movdqa 0x10f0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x410(%rsp) movdqa 0x440(%rsp), %xmm1 movdqa 0x9a0(%rsp), %xmm0 movdqa %xmm1, 0x10e0(%rsp) movdqa %xmm0, 0x10d0(%rsp) movdqa 0x10e0(%rsp), %xmm0 movdqa 0x10d0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x400(%rsp) movdqa 0x430(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1540(%rsp) movdqa %xmm0, 0x1530(%rsp) movdqa 0x1540(%rsp), %xmm0 movdqa 0x1530(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x3f0(%rsp) movdqa 0x420(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1520(%rsp) movdqa %xmm0, 0x1510(%rsp) movdqa 0x1520(%rsp), %xmm0 movdqa 0x1510(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x3e0(%rsp) movdqa 0x410(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1500(%rsp) movdqa %xmm0, 0x14f0(%rsp) movdqa 0x1500(%rsp), %xmm0 movdqa 0x14f0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x3d0(%rsp) movdqa 0x400(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x14e0(%rsp) movdqa %xmm0, 0x14d0(%rsp) movdqa 0x14e0(%rsp), %xmm0 movdqa 0x14d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x3c0(%rsp) movdqa 0x3f0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1940(%rsp) movl %eax, 0x193c(%rsp) movdqa 0x1940(%rsp), %xmm0 movl 0x193c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x3b0(%rsp) movdqa 0x3e0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1920(%rsp) movl %eax, 0x191c(%rsp) movdqa 0x1920(%rsp), %xmm0 movl 0x191c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x3a0(%rsp) movdqa 0x3d0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1900(%rsp) movl %eax, 0x18fc(%rsp) movdqa 0x1900(%rsp), %xmm0 movl 0x18fc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x390(%rsp) movdqa 0x3c0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x18e0(%rsp) movl %eax, 0x18dc(%rsp) movdqa 0x18e0(%rsp), %xmm0 movl 0x18dc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x380(%rsp) movdqa 0x3b0(%rsp), %xmm1 movdqa 0x3a0(%rsp), %xmm0 movdqa %xmm1, 0x1c00(%rsp) movdqa %xmm0, 0x1bf0(%rsp) movdqa 0x1c00(%rsp), %xmm0 movdqa 0x1bf0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5a0(%rsp) movdqa 0x390(%rsp), %xmm1 movdqa 0x380(%rsp), %xmm0 movdqa %xmm1, 0x1be0(%rsp) movdqa %xmm0, 0x1bd0(%rsp) movdqa 0x1be0(%rsp), %xmm0 movdqa 0x1bd0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movdqa %xmm0, 0x5b0(%rsp) movdqa 0x540(%rsp), %xmm1 movdqa 0x580(%rsp), %xmm0 movdqa %xmm1, 0xac0(%rsp) movdqa %xmm0, 0xab0(%rsp) movdqa 0xac0(%rsp), %xmm0 movdqa 0xab0(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x70(%rax) movdqa 0x540(%rsp), %xmm1 movdqa 0x580(%rsp), %xmm0 movdqa %xmm1, 0xbc0(%rsp) movdqa %xmm0, 0xbb0(%rsp) movdqa 0xbc0(%rsp), %xmm0 movdqa 0xbb0(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x30(%rax) movdqa 0x550(%rsp), %xmm1 movdqa 0x590(%rsp), %xmm0 movdqa %xmm1, 0xaa0(%rsp) movdqa %xmm0, 0xa90(%rsp) movdqa 0xaa0(%rsp), %xmm0 movdqa 0xa90(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, (%rax) movdqa 0x550(%rsp), %xmm1 movdqa 0x590(%rsp), %xmm0 movdqa %xmm1, 0xba0(%rsp) movdqa %xmm0, 0xb90(%rsp) movdqa 0xba0(%rsp), %xmm0 movdqa 0xb90(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x40(%rax) movdqa 0x560(%rsp), %xmm1 movdqa 0x5a0(%rsp), %xmm0 movdqa %xmm1, 0xa80(%rsp) movdqa %xmm0, 0xa70(%rsp) movdqa 0xa80(%rsp), %xmm0 movdqa 0xa70(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x50(%rax) movdqa 0x560(%rsp), %xmm1 movdqa 0x5a0(%rsp), %xmm0 movdqa %xmm1, 0xb80(%rsp) movdqa %xmm0, 0xb70(%rsp) movdqa 0xb80(%rsp), %xmm0 movdqa 0xb70(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x10(%rax) movdqa 0x570(%rsp), %xmm1 movdqa 0x5b0(%rsp), %xmm0 movdqa %xmm1, 0xa60(%rsp) movdqa %xmm0, 0xa50(%rsp) movdqa 0xa60(%rsp), %xmm0 movdqa 0xa50(%rsp), %xmm1 paddsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x20(%rax) movdqa 0x570(%rsp), %xmm1 movdqa 0x5b0(%rsp), %xmm0 movdqa %xmm1, 0xb60(%rsp) movdqa %xmm0, 0xb50(%rsp) movdqa 0xb60(%rsp), %xmm0 movdqa 0xb50(%rsp), %xmm1 psubsw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x60(%rax) movq 0xa08(%rsp), %rax movaps (%rax), %xmm0 movaps 0x70(%rax), %xmm1 movaps %xmm1, 0xd40(%rsp) movaps %xmm0, 0xd30(%rsp) movaps 0xd40(%rsp), %xmm0 movaps 0xd30(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x370(%rsp) movq 0xa08(%rsp), %rax movaps (%rax), %xmm0 movaps 0x70(%rax), %xmm1 movaps %xmm1, 0xe40(%rsp) movaps %xmm0, 0xe30(%rsp) movaps 0xe40(%rsp), %xmm0 movaps 0xe30(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x360(%rsp) movdqa 0x370(%rsp), %xmm1 movdqa 0x970(%rsp), %xmm0 movdqa %xmm1, 0x10c0(%rsp) movdqa %xmm0, 0x10b0(%rsp) movdqa 0x10c0(%rsp), %xmm0 movdqa 0x10b0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x350(%rsp) movdqa 0x360(%rsp), %xmm1 movdqa 0x970(%rsp), %xmm0 movdqa %xmm1, 0x10a0(%rsp) movdqa %xmm0, 0x1090(%rsp) movdqa 0x10a0(%rsp), %xmm0 movdqa 0x1090(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x340(%rsp) movdqa 0x370(%rsp), %xmm1 movdqa 0x960(%rsp), %xmm0 movdqa %xmm1, 0x1080(%rsp) movdqa %xmm0, 0x1070(%rsp) movdqa 0x1080(%rsp), %xmm0 movdqa 0x1070(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x330(%rsp) movdqa 0x360(%rsp), %xmm1 movdqa 0x960(%rsp), %xmm0 movdqa %xmm1, 0x1060(%rsp) movdqa %xmm0, 0x1050(%rsp) movdqa 0x1060(%rsp), %xmm0 movdqa 0x1050(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x320(%rsp) movdqa 0x350(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x14c0(%rsp) movdqa %xmm0, 0x14b0(%rsp) movdqa 0x14c0(%rsp), %xmm0 movdqa 0x14b0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x310(%rsp) movdqa 0x340(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x14a0(%rsp) movdqa %xmm0, 0x1490(%rsp) movdqa 0x14a0(%rsp), %xmm0 movdqa 0x1490(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x300(%rsp) movdqa 0x330(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1480(%rsp) movdqa %xmm0, 0x1470(%rsp) movdqa 0x1480(%rsp), %xmm0 movdqa 0x1470(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x2f0(%rsp) movdqa 0x320(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1460(%rsp) movdqa %xmm0, 0x1450(%rsp) movdqa 0x1460(%rsp), %xmm0 movdqa 0x1450(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x2e0(%rsp) movdqa 0x310(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x18c0(%rsp) movl %eax, 0x18bc(%rsp) movdqa 0x18c0(%rsp), %xmm0 movl 0x18bc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2d0(%rsp) movdqa 0x300(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x18a0(%rsp) movl %eax, 0x189c(%rsp) movdqa 0x18a0(%rsp), %xmm0 movl 0x189c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2c0(%rsp) movdqa 0x2f0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1880(%rsp) movl %eax, 0x187c(%rsp) movdqa 0x1880(%rsp), %xmm0 movl 0x187c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2b0(%rsp) movdqa 0x2e0(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1860(%rsp) movl %eax, 0x185c(%rsp) movdqa 0x1860(%rsp), %xmm0 movl 0x185c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x2a0(%rsp) movdqa 0x2d0(%rsp), %xmm1 movdqa 0x2c0(%rsp), %xmm0 movdqa %xmm1, 0x1bc0(%rsp) movdqa %xmm0, 0x1bb0(%rsp) movdqa 0x1bc0(%rsp), %xmm0 movdqa 0x1bb0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x70(%rax) movdqa 0x2b0(%rsp), %xmm1 movdqa 0x2a0(%rsp), %xmm0 movdqa %xmm1, 0x1ba0(%rsp) movdqa %xmm0, 0x1b90(%rsp) movdqa 0x1ba0(%rsp), %xmm0 movdqa 0x1b90(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, (%rax) jmp 0xa5a0d5 movq 0xa08(%rsp), %rax movaps 0x20(%rax), %xmm0 movaps 0x50(%rax), %xmm1 movaps %xmm1, 0xd20(%rsp) movaps %xmm0, 0xd10(%rsp) movaps 0xd20(%rsp), %xmm0 movaps 0xd10(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x290(%rsp) movq 0xa08(%rsp), %rax movaps 0x20(%rax), %xmm0 movaps 0x50(%rax), %xmm1 movaps %xmm1, 0xe20(%rsp) movaps %xmm0, 0xe10(%rsp) movaps 0xe20(%rsp), %xmm0 movaps 0xe10(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x280(%rsp) movdqa 0x290(%rsp), %xmm1 movdqa 0x950(%rsp), %xmm0 movdqa %xmm1, 0x1040(%rsp) movdqa %xmm0, 0x1030(%rsp) movdqa 0x1040(%rsp), %xmm0 movdqa 0x1030(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x270(%rsp) movdqa 0x280(%rsp), %xmm1 movdqa 0x950(%rsp), %xmm0 movdqa %xmm1, 0x1020(%rsp) movdqa %xmm0, 0x1010(%rsp) movdqa 0x1020(%rsp), %xmm0 movdqa 0x1010(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x260(%rsp) movdqa 0x290(%rsp), %xmm1 movdqa 0x940(%rsp), %xmm0 movdqa %xmm1, 0x1000(%rsp) movdqa %xmm0, 0xff0(%rsp) movdqa 0x1000(%rsp), %xmm0 movdqa 0xff0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x250(%rsp) movdqa 0x280(%rsp), %xmm1 movdqa 0x940(%rsp), %xmm0 movdqa %xmm1, 0xfe0(%rsp) movdqa %xmm0, 0xfd0(%rsp) movdqa 0xfe0(%rsp), %xmm0 movdqa 0xfd0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x240(%rsp) movdqa 0x270(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1440(%rsp) movdqa %xmm0, 0x1430(%rsp) movdqa 0x1440(%rsp), %xmm0 movdqa 0x1430(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x230(%rsp) movdqa 0x260(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1420(%rsp) movdqa %xmm0, 0x1410(%rsp) movdqa 0x1420(%rsp), %xmm0 movdqa 0x1410(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x220(%rsp) movdqa 0x250(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1400(%rsp) movdqa %xmm0, 0x13f0(%rsp) movdqa 0x1400(%rsp), %xmm0 movdqa 0x13f0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x210(%rsp) movdqa 0x240(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x13e0(%rsp) movdqa %xmm0, 0x13d0(%rsp) movdqa 0x13e0(%rsp), %xmm0 movdqa 0x13d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x200(%rsp) movdqa 0x230(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1840(%rsp) movl %eax, 0x183c(%rsp) movdqa 0x1840(%rsp), %xmm0 movl 0x183c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x1f0(%rsp) movdqa 0x220(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1820(%rsp) movl %eax, 0x181c(%rsp) movdqa 0x1820(%rsp), %xmm0 movl 0x181c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x1e0(%rsp) movdqa 0x210(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1800(%rsp) movl %eax, 0x17fc(%rsp) movdqa 0x1800(%rsp), %xmm0 movl 0x17fc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x1d0(%rsp) movdqa 0x200(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x17e0(%rsp) movl %eax, 0x17dc(%rsp) movdqa 0x17e0(%rsp), %xmm0 movl 0x17dc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x1c0(%rsp) movdqa 0x1f0(%rsp), %xmm1 movdqa 0x1e0(%rsp), %xmm0 movdqa %xmm1, 0x1b80(%rsp) movdqa %xmm0, 0x1b70(%rsp) movdqa 0x1b80(%rsp), %xmm0 movdqa 0x1b70(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x50(%rax) movdqa 0x1d0(%rsp), %xmm1 movdqa 0x1c0(%rsp), %xmm0 movdqa %xmm1, 0x1b60(%rsp) movdqa %xmm0, 0x1b50(%rsp) movdqa 0x1b60(%rsp), %xmm0 movdqa 0x1b50(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x20(%rax) jmp 0xa5a4fe movq 0xa08(%rsp), %rax movaps 0x30(%rax), %xmm1 movaps 0x40(%rax), %xmm0 movaps %xmm1, 0xd00(%rsp) movaps %xmm0, 0xcf0(%rsp) movaps 0xd00(%rsp), %xmm0 movaps 0xcf0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0x1b0(%rsp) movq 0xa08(%rsp), %rax movaps 0x30(%rax), %xmm1 movaps 0x40(%rax), %xmm0 movaps %xmm1, 0xe00(%rsp) movaps %xmm0, 0xdf0(%rsp) movaps 0xe00(%rsp), %xmm0 movaps 0xdf0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0x1a0(%rsp) movdqa 0x1b0(%rsp), %xmm1 movdqa 0x930(%rsp), %xmm0 movdqa %xmm1, 0xfc0(%rsp) movdqa %xmm0, 0xfb0(%rsp) movdqa 0xfc0(%rsp), %xmm0 movdqa 0xfb0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x190(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x930(%rsp), %xmm0 movdqa %xmm1, 0xfa0(%rsp) movdqa %xmm0, 0xf90(%rsp) movdqa 0xfa0(%rsp), %xmm0 movdqa 0xf90(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x180(%rsp) movdqa 0x1b0(%rsp), %xmm1 movdqa 0x920(%rsp), %xmm0 movdqa %xmm1, 0xf80(%rsp) movdqa %xmm0, 0xf70(%rsp) movdqa 0xf80(%rsp), %xmm0 movdqa 0xf70(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x170(%rsp) movdqa 0x1a0(%rsp), %xmm1 movdqa 0x920(%rsp), %xmm0 movdqa %xmm1, 0xf60(%rsp) movdqa %xmm0, 0xf50(%rsp) movdqa 0xf60(%rsp), %xmm0 movdqa 0xf50(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x160(%rsp) movdqa 0x190(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x13c0(%rsp) movdqa %xmm0, 0x13b0(%rsp) movdqa 0x13c0(%rsp), %xmm0 movdqa 0x13b0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x150(%rsp) movdqa 0x180(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x13a0(%rsp) movdqa %xmm0, 0x1390(%rsp) movdqa 0x13a0(%rsp), %xmm0 movdqa 0x1390(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x140(%rsp) movdqa 0x170(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1380(%rsp) movdqa %xmm0, 0x1370(%rsp) movdqa 0x1380(%rsp), %xmm0 movdqa 0x1370(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x130(%rsp) movdqa 0x160(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1360(%rsp) movdqa %xmm0, 0x1350(%rsp) movdqa 0x1360(%rsp), %xmm0 movdqa 0x1350(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x120(%rsp) movdqa 0x150(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x17c0(%rsp) movl %eax, 0x17bc(%rsp) movdqa 0x17c0(%rsp), %xmm0 movl 0x17bc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x110(%rsp) movdqa 0x140(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x17a0(%rsp) movl %eax, 0x179c(%rsp) movdqa 0x17a0(%rsp), %xmm0 movl 0x179c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x100(%rsp) movdqa 0x130(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1780(%rsp) movl %eax, 0x177c(%rsp) movdqa 0x1780(%rsp), %xmm0 movl 0x177c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0xf0(%rsp) movdqa 0x120(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1760(%rsp) movl %eax, 0x175c(%rsp) movdqa 0x1760(%rsp), %xmm0 movl 0x175c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0xe0(%rsp) movdqa 0x110(%rsp), %xmm1 movdqa 0x100(%rsp), %xmm0 movdqa %xmm1, 0x1b40(%rsp) movdqa %xmm0, 0x1b30(%rsp) movdqa 0x1b40(%rsp), %xmm0 movdqa 0x1b30(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x30(%rax) movdqa 0xf0(%rsp), %xmm1 movdqa 0xe0(%rsp), %xmm0 movdqa %xmm1, 0x1b20(%rsp) movdqa %xmm0, 0x1b10(%rsp) movdqa 0x1b20(%rsp), %xmm0 movdqa 0x1b10(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x40(%rax) jmp 0xa5a927 movq 0xa08(%rsp), %rax movaps 0x10(%rax), %xmm1 movaps 0x60(%rax), %xmm0 movaps %xmm1, 0xce0(%rsp) movaps %xmm0, 0xcd0(%rsp) movaps 0xce0(%rsp), %xmm0 movaps 0xcd0(%rsp), %xmm1 punpcklwd %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] movaps %xmm0, 0xd0(%rsp) movq 0xa08(%rsp), %rax movaps 0x10(%rax), %xmm1 movaps 0x60(%rax), %xmm0 movaps %xmm1, 0xde0(%rsp) movaps %xmm0, 0xdd0(%rsp) movaps 0xde0(%rsp), %xmm0 movaps 0xdd0(%rsp), %xmm1 punpckhwd %xmm1, %xmm0 # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] movdqa %xmm0, 0xc0(%rsp) movdqa 0xd0(%rsp), %xmm1 movdqa 0x910(%rsp), %xmm0 movdqa %xmm1, 0xf40(%rsp) movdqa %xmm0, 0xf30(%rsp) movdqa 0xf40(%rsp), %xmm0 movdqa 0xf30(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0xb0(%rsp) movdqa 0xc0(%rsp), %xmm1 movdqa 0x910(%rsp), %xmm0 movdqa %xmm1, 0xf20(%rsp) movdqa %xmm0, 0xf10(%rsp) movdqa 0xf20(%rsp), %xmm0 movdqa 0xf10(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0xa0(%rsp) movdqa 0xd0(%rsp), %xmm1 movdqa 0x900(%rsp), %xmm0 movdqa %xmm1, 0xf00(%rsp) movdqa %xmm0, 0xef0(%rsp) movdqa 0xf00(%rsp), %xmm0 movdqa 0xef0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x90(%rsp) movdqa 0xc0(%rsp), %xmm1 movdqa 0x900(%rsp), %xmm0 movdqa %xmm1, 0xee0(%rsp) movdqa %xmm0, 0xed0(%rsp) movdqa 0xee0(%rsp), %xmm0 movdqa 0xed0(%rsp), %xmm1 pmaddwd %xmm1, %xmm0 movdqa %xmm0, 0x80(%rsp) movdqa 0xb0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1340(%rsp) movdqa %xmm0, 0x1330(%rsp) movdqa 0x1340(%rsp), %xmm0 movdqa 0x1330(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x70(%rsp) movdqa 0xa0(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1320(%rsp) movdqa %xmm0, 0x1310(%rsp) movdqa 0x1320(%rsp), %xmm0 movdqa 0x1310(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x60(%rsp) movdqa 0x90(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x1300(%rsp) movdqa %xmm0, 0x12f0(%rsp) movdqa 0x1300(%rsp), %xmm0 movdqa 0x12f0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x50(%rsp) movdqa 0x80(%rsp), %xmm1 movdqa 0x9d0(%rsp), %xmm0 movdqa %xmm1, 0x12e0(%rsp) movdqa %xmm0, 0x12d0(%rsp) movdqa 0x12e0(%rsp), %xmm0 movdqa 0x12d0(%rsp), %xmm1 paddd %xmm1, %xmm0 movdqa %xmm0, 0x40(%rsp) movdqa 0x70(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1740(%rsp) movl %eax, 0x173c(%rsp) movdqa 0x1740(%rsp), %xmm0 movl 0x173c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x30(%rsp) movdqa 0x60(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1720(%rsp) movl %eax, 0x171c(%rsp) movdqa 0x1720(%rsp), %xmm0 movl 0x171c(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x20(%rsp) movdqa 0x50(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x1700(%rsp) movl %eax, 0x16fc(%rsp) movdqa 0x1700(%rsp), %xmm0 movl 0x16fc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, 0x10(%rsp) movdqa 0x40(%rsp), %xmm0 movsbl 0xa07(%rsp), %eax movdqa %xmm0, 0x16e0(%rsp) movl %eax, 0x16dc(%rsp) movdqa 0x16e0(%rsp), %xmm0 movl 0x16dc(%rsp), %eax movd %eax, %xmm1 psrad %xmm1, %xmm0 movdqa %xmm0, (%rsp) movdqa 0x30(%rsp), %xmm1 movdqa 0x20(%rsp), %xmm0 movdqa %xmm1, 0x1b00(%rsp) movdqa %xmm0, 0x1af0(%rsp) movdqa 0x1b00(%rsp), %xmm0 movdqa 0x1af0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x10(%rax) movdqa 0x10(%rsp), %xmm1 movdqa (%rsp), %xmm0 movdqa %xmm1, 0x1ae0(%rsp) movdqa %xmm0, 0x1ad0(%rsp) movdqa 0x1ae0(%rsp), %xmm0 movdqa 0x1ad0(%rsp), %xmm1 packssdw %xmm1, %xmm0 movq 0xa08(%rsp), %rax movdqa %xmm0, 0x60(%rax) addq $0x1ea8, %rsp # imm = 0x1EA8 retq nopw %cs:(%rax,%rax)
/m-ab-s[P]aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
algorithm_suite::copy_if_sequence()
void copy_if_sequence() { std::vector<double> input = { 1.0, 2.0, 3.0, 4.0, 5.0 }; cumulative::moment<double> filter; std::copy_if(input.begin(), input.end(), push_inserter(filter), less_than<double>(3.0)); TRIAL_TEST_EQ(filter.size(), 2); TRIAL_TEST_EQ(filter.mean(), 1.5); }
pushq %r14 pushq %rbx subq $0x58, %rsp movups 0x2475(%rip), %xmm0 # 0xdb88 leaq 0x30(%rsp), %rbx movaps %xmm0, 0x10(%rbx) movups 0x2455(%rip), %xmm0 # 0xdb78 movaps %xmm0, (%rbx) movabsq $0x4014000000000000, %rax # imm = 0x4014000000000000 movq %rax, 0x20(%rbx) leaq 0x18(%rsp), %r14 pushq $0x5 popq %rdx leaq 0x8(%rsp), %rcx movq %r14, %rdi movq %rbx, %rsi callq 0xc5ae xorps %xmm0, %xmm0 movaps %xmm0, (%rbx) movq (%r14), %rdi movq 0x8(%r14), %rsi leaq 0x30(%rsp), %rdx movsd 0x18fa(%rip), %xmm0 # 0xd060 callq 0xc5e4 movq 0x38(%rsp), %rax leaq 0x8(%rsp), %r9 movq %rax, (%r9) leaq 0x10(%rsp), %rbx movl $0x2, (%rbx) movq %rbx, (%rsp) leaq 0x1972(%rip), %rdi # 0xd100 leaq 0x1b38(%rip), %rsi # 0xd2cd leaq 0x1972(%rip), %rdx # 0xd10e leaq 0x2241(%rip), %r8 # 0xd9e4 movl $0x2dd, %ecx # imm = 0x2DD callq 0xbc10 movsd 0x30(%rsp), %xmm0 leaq 0x8(%rsp), %r9 movsd %xmm0, (%r9) movabsq $0x3ff8000000000000, %rax # imm = 0x3FF8000000000000 movq %rax, 0x10(%rsp) movq %rbx, (%rsp) leaq 0x19e8(%rip), %rdi # 0xd1bf leaq 0x1a0a(%rip), %rsi # 0xd1e8 leaq 0x1929(%rip), %rdx # 0xd10e leaq 0x21f8(%rip), %r8 # 0xd9e4 movl $0x2de, %ecx # imm = 0x2DE callq 0xbdab leaq 0x18(%rsp), %rdi callq 0xc754 addq $0x58, %rsp popq %rbx popq %r14 retq jmp 0xb80c jmp 0xb80c movq %rax, %rbx leaq 0x18(%rsp), %rdi callq 0xc754 movq %rbx, %rdi callq 0x3150
/breese[P]trial/test/cumulative/moment_suite.cpp
green_loop_release
int green_loop_release(green_loop_t loop) { green_assert(loop != NULL); green_assert(loop->refs > 0); if (--(loop->refs) > 0) { return GREEN_SUCCESS; } green_assert(loop != NULL); green_assert(loop->coroutines == 0); green_free(loop); return GREEN_SUCCESS; }
pushq %rbx testq %rdi, %rdi je 0x1347 movl (%rdi), %eax testl %eax, %eax jle 0x136e leal -0x1(%rax), %ecx movl %ecx, (%rdi) cmpl $0x1, %eax jne 0x1343 cmpl $0x0, 0x4(%rdi) jne 0x1395 callq 0x1030 xorl %eax, %eax popq %rbx retq movq 0x3caa(%rip), %rbx # 0x4ff8 movq (%rbx), %rdi leaq 0x1d88(%rip), %rsi # 0x30e0 leaq 0x1f4a(%rip), %rdx # 0x32a9 leaq 0x1d9c(%rip), %rcx # 0x3102 movl $0xaf, %r8d jmp 0x13ba movq 0x3c83(%rip), %rbx # 0x4ff8 movq (%rbx), %rdi leaq 0x1d61(%rip), %rsi # 0x30e0 leaq 0x1de2(%rip), %rdx # 0x3168 leaq 0x1d75(%rip), %rcx # 0x3102 movl $0xb0, %r8d jmp 0x13ba movq 0x3c5c(%rip), %rbx # 0x4ff8 movq (%rbx), %rdi leaq 0x1d3a(%rip), %rsi # 0x30e0 leaq 0x1dca(%rip), %rdx # 0x3177 leaq 0x1d4e(%rip), %rcx # 0x3102 movl $0xb8, %r8d xorl %eax, %eax callq 0x1080 movq (%rbx), %rdi callq 0x10a0 callq 0x1040
/AndreLouisCaron[P]libgreen/src/green.c
green_future_result
int green_future_result(green_future_t future, void ** p, int * i) { if (future == NULL) { return GREEN_EINVAL; } green_assert(future->refs > 0); if (future->state == green_future_pending) { return GREEN_EBUSY; } if (future->state == green_future_aborted) { return GREEN_EBADFD; } if (p) { *p = future->result.p; } if (i) { *i = future->result.i; } return GREEN_SUCCESS; }
testq %rdi, %rdi je 0x25d3 cmpl $0x0, 0x8(%rdi) jle 0x25f4 movl 0x20(%rdi), %eax testl %eax, %eax je 0x25d7 cmpl $0x1, %eax jne 0x25db pushq $0x8 jmp 0x25d9 pushq $0x1 jmp 0x25d9 pushq $0x3 popq %rax retq testq %rsi, %rsi je 0x25e7 movq 0x10(%rdi), %rax movq %rax, (%rsi) xorl %eax, %eax testq %rdx, %rdx je 0x25f3 movl 0x18(%rdi), %ecx movl %ecx, (%rdx) retq pushq %rbx movq 0x29fc(%rip), %rbx # 0x4ff8 movq (%rbx), %rdi leaq 0xada(%rip), %rsi # 0x30e0 leaq 0xd5b(%rip), %rdx # 0x3368 leaq 0xaee(%rip), %rcx # 0x3102 movl $0x27d, %r8d # imm = 0x27D xorl %eax, %eax callq 0x1080 movq (%rbx), %rdi callq 0x10a0 callq 0x1040
/AndreLouisCaron[P]libgreen/src/green.c
void Catch::Capturer::captureValues<char [4], int, char [6]>(unsigned long, char const (&) [4], int const&, char const (&) [6])
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x4, %ebp movl $0x4, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9911b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x995e4 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9915c movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [4], int, int>(unsigned long, char const (&) [4], int const&, int const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x4, %ebp movl $0x4, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x991db movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x99674 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9921c movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [7], int, unsigned short>(unsigned long, char const (&) [7], int const&, unsigned short const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x7, %ebp movl $0x7, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9929b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x99704 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x992dc movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [7], int, unsigned int>(unsigned long, char const (&) [7], int const&, unsigned int const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x7, %ebp movl $0x7, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9935b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x99794 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9939c movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [7], int, unsigned long>(unsigned long, char const (&) [7], int const&, unsigned long const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x7, %ebp movl $0x7, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9941b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x99824 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9945c movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [4], int, unsigned int>(unsigned long, char const (&) [4], int const&, unsigned int const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x4, %ebp movl $0x4, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x994db movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x99794 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9951c movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [4], int, char>(unsigned long, char const (&) [4], int const&, char const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x4, %ebp movl $0x4, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9959b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x998b4 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x995dc movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<int, char [6]>(unsigned long, int const&, char const (&) [6])
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdx), %esi leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0xa4cac movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99630 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x98f8c addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9966b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<int, int>(unsigned long, int const&, int const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdx), %esi leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0xa4cac movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x996c0 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x93948 addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x996fb movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<int, unsigned short>(unsigned long, int const&, unsigned short const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdx), %esi leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0xa4cac movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99750 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97b6e addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9978b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<int, unsigned long>(unsigned long, int const&, unsigned long const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdx), %esi leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0xa4cac movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99870 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97c3e addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x998ab movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<int, char>(unsigned long, int const&, char const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl (%rdx), %esi leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0xa4cac movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99900 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97cb4 addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9993b movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], char [6]>(unsigned long, char const (&) [6], char const (&) [6])
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x6, %r13d movl $0x6, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x999b6 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x98f8c addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x999f3 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], unsigned int>(unsigned long, char const (&) [6], unsigned int const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x6, %r13d movl $0x6, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99a6e movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x9595e addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99aab movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], char>(unsigned long, char const (&) [6], char const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x6, %r13d movl $0x6, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99b26 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97cb4 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99b63 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [20], int, int, int>(unsigned long, char const (&) [20], int const&, int const&, int const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r9, (%rsp) movq %r8, %r14 movq %rcx, %r15 movq %rdx, %rbp movq %rsi, %r12 movq %rdi, %r13 movl $0x14, %ebx movl $0x14, %edx movq %rbp, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %rbp, %rdx testq %rax, %rax cmoveq %rbx, %rdx leaq 0x8(%rsp), %rbx movq %rbx, %rdi movq %rbp, %rsi callq 0xa47a1 movq %r13, %rdi movq %r12, %rsi movq %rbx, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99caf movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r12 movq %r13, %rdi movq %r12, %rsi movq %r15, %rdx movq %r14, %rcx movq (%rsp), %r8 callq 0x99cfc addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99cf4 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [7], double>(unsigned long, char const (&) [7], double const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x7, %r13d movl $0x7, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99f76 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97604 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x99fb3 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [5], char [14]>(unsigned long, char const (&) [5], char const (&) [14])
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x5, %r13d movl $0x5, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a02e movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97acc addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a06b movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [7], char [14]>(unsigned long, char const (&) [7], char const (&) [14])
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x7, %r13d movl $0x7, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a0e6 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97acc addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a123 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], char [14]>(unsigned long, char const (&) [6], char const (&) [14])
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x6, %r13d movl $0x6, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a19e movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97acc addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a1db movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [4], double>(unsigned long, char const (&) [4], double const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x4, %r13d movl $0x4, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a316 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97604 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a353 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [5], double>(unsigned long, char const (&) [5], double const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x5, %r13d movl $0x5, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a3ce movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97604 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a40b movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], double>(unsigned long, char const (&) [6], double const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x6, %r13d movl $0x6, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a486 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97604 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a4c3 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [11], double>(unsigned long, char const (&) [11], double const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0xb, %r13d movl $0xb, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a6b6 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97604 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a6f3 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], long double>(unsigned long, char const (&) [6], long double const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x6, %r13d movl $0x6, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a826 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x9a86c addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9a863 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [5], long long>(unsigned long, char const (&) [5], long long const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x5, %r13d movl $0x5, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9aa70 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x98430 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9aaad movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [3], char>(unsigned long, char const (&) [3], char const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x3, %r13d movl $0x3, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9ac98 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97cb4 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9acd5 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [4], unsigned short>(unsigned long, char const (&) [4], unsigned short const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x4, %r13d movl $0x4, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9aec0 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x97b6e addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9aefd movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [7]>(unsigned long, char const (&) [7])
void captureValues( size_t index, T const& value ) { captureValue( index, Catch::Detail::stringify( value ) ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rdx, %rbx movq %rsi, %r14 movq %rdi, %r15 movl $0x7, %r12d movl $0x7, %edx movq %rbx, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %rbx, %rdx testq %rax, %rax cmoveq %r12, %rdx leaq 0x8(%rsp), %r12 movq %r12, %rdi movq %rbx, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b03d movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b067 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<unsigned char>(unsigned long, unsigned char const&)
void captureValues( size_t index, T const& value ) { captureValue( index, Catch::Detail::stringify( value ) ); }
pushq %r15 pushq %r14 pushq %rbx subq $0x20, %rsp movq %rsi, %rbx movq %rdi, %r14 movzbl (%rdx), %esi movq %rsp, %r15 movq %r15, %rdi callq 0xa4f3e movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b0b6 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 addq $0x20, %rsp popq %rbx popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b0de movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [3], void const*>(unsigned long, char const (&) [3], void const* const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x20, %rsp movq %rcx, %rbx movq %rdx, %r12 movq %rsi, %r14 movq %rdi, %r15 movl $0x3, %r13d movl $0x3, %edx movq %r12, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r12, %rdx testq %rax, %rax cmoveq %r13, %rdx movq %rsp, %r13 movq %r13, %rdi movq %r12, %rsi callq 0xa47a1 movq %r15, %rdi movq %r14, %rsi movq %r13, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b430 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x9b740 addq $0x20, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b46d movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0 nop
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<void*>(unsigned long, void* const&)
void captureValues( size_t index, T const& value ) { captureValue( index, Catch::Detail::stringify( value ) ); }
pushq %r15 pushq %r14 pushq %rbx subq $0x20, %rsp movq %rsi, %rbx movq %rdi, %r14 movq (%rdx), %rsi movq %rsp, %r15 movq %r15, %rdi callq 0x9b5a4 movq %r14, %rdi movq %rbx, %rsi movq %r15, %rdx callq 0x9d94c leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b574 movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 addq $0x20, %rsp popq %rbx popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x10(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b59c movq 0x10(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<char [6], void*, void*>(unsigned long, char const (&) [6], void* const&, void* const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x28, %rsp movq %r8, %rbx movq %rcx, %r14 movq %rdx, %r13 movq %rsi, %r15 movq %rdi, %r12 movl $0x6, %ebp movl $0x6, %edx movq %r13, %rdi xorl %esi, %esi callq 0x10560 movq %rax, %rdx subq %r13, %rdx testq %rax, %rax cmoveq %rbp, %rdx leaq 0x8(%rsp), %rbp movq %rbp, %rdi movq %r13, %rsi callq 0xa47a1 movq %r12, %rdi movq %r15, %rsi movq %rbp, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b667 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r15 movq %r12, %rdi movq %r15, %rsi movq %r14, %rdx movq %rbx, %rcx callq 0x9b6b0 addq $0x28, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b6a8 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
void Catch::Capturer::captureValues<void*, void*>(unsigned long, void* const&, void* const&)
void captureValues( size_t index, T const& value, Ts const&... values ) { captureValue( index, Catch::Detail::stringify(value) ); captureValues( index+1, values... ); }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x28, %rsp movq %rcx, %rbx movq %rsi, %r14 movq %rdi, %r15 movq (%rdx), %rsi leaq 0x8(%rsp), %r12 movq %r12, %rdi callq 0x9b5a4 movq %r15, %rdi movq %r14, %rsi movq %r12, %rdx callq 0x9d94c leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b6fd movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 incq %r14 movq %r15, %rdi movq %r14, %rsi movq %rbx, %rdx callq 0x9b52e addq $0x28, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movq %rax, %rbx leaq 0x18(%rsp), %rax movq -0x10(%rax), %rdi cmpq %rax, %rdi je 0x9b738 movq 0x18(%rsp), %rsi incq %rsi callq 0x104d0 movq %rbx, %rdi callq 0x108d0
/embeddedartistry[P]printf/build_O3/_deps/catch2-src/src/catch2/../catch2/catch_message.hpp
dropt_misuse
void dropt_misuse(const char* message, const char* filename, int line) { #ifdef NDEBUG fprintf(stderr, "dropt: %s\n", message); #else fprintf(stderr, "dropt: %s (%s: %d)\n", message, filename, line); abort(); #endif }
pushq %rax movl %edx, %r8d movq %rsi, %rcx movq %rdi, %rdx movq 0x5a2c(%rip), %rax # 0x7ff8 movq (%rax), %rdi leaq 0x2c7e(%rip), %rsi # 0x5254 xorl %eax, %eax callq 0x2140 callq 0x2060
/jamesderlin[P]dropt/src/dropt.c
dropt_get_error_message
const dropt_char* dropt_get_error_message(dropt_context* context) { if (context == NULL) { DROPT_MISUSE("No dropt context specified."); return DROPT_TEXT_LITERAL(""); } if (context->errorDetails.err == dropt_error_none) { return DROPT_TEXT_LITERAL(""); } if (context->errorDetails.message == NULL) { if (context->errorHandler != NULL) { context->errorDetails.message = context->errorHandler(context->errorDetails.err, context->errorDetails.optionName, context->errorDetails.optionArgument, context->errorHandlerData); } else { #ifndef DROPT_NO_STRING_BUFFERS context->errorDetails.message = dropt_default_error_handler(context->errorDetails.err, context->errorDetails.optionName, context->errorDetails.optionArgument); #endif } } return (context->errorDetails.message == NULL) ? DROPT_TEXT_LITERAL("Unknown error") : context->errorDetails.message; }
pushq %rbx testq %rdi, %rdi je 0x2651 movq %rdi, %rbx movl 0x38(%rdi), %edi testl %edi, %edi je 0x262b cmpq $0x0, 0x50(%rbx) jne 0x263d movq 0x28(%rbx), %rax movq 0x40(%rbx), %rsi movq 0x48(%rbx), %rdx testq %rax, %rax je 0x2634 movq 0x30(%rbx), %rcx callq *%rax jmp 0x2639 leaq 0x2c35(%rip), %rax # 0x5267 jmp 0x264f callq 0x2190 movq %rax, 0x50(%rbx) movq 0x50(%rbx), %rcx testq %rcx, %rcx leaq 0x2a63(%rip), %rax # 0x50ae cmovneq %rcx, %rax popq %rbx retq callq 0x22c2
/jamesderlin[P]dropt/src/dropt.c
dropt_get_help
dropt_char* dropt_get_help(const dropt_context* context, const dropt_help_params* helpParams) { dropt_char* helpText = NULL; dropt_stringstream* ss = dropt_ssopen(); if (context == NULL) { DROPT_MISUSE("No dropt context specified."); } else if (ss != NULL) { const dropt_option* option; dropt_help_params hp; if (helpParams == NULL) { dropt_init_help_params(&hp); } else { hp = *helpParams; } for (option = context->options; is_valid_option(option); option++) { bool hasLongName = option->long_name != NULL && option->long_name[0] != DROPT_TEXT_LITERAL('\0'); bool hasShortName = option->short_name != DROPT_TEXT_LITERAL('\0'); /* The number of characters printed on the current line so far. */ int n; if ( option->description == NULL || (option->attr & dropt_attr_hidden)) { /* Undocumented option. Ignore it and move on. */ continue; } else if (hasLongName && hasShortName) { n = dropt_ssprintf(ss, DROPT_TEXT_LITERAL("%*s-%c, --%s"), hp.indent, DROPT_TEXT_LITERAL(""), option->short_name, option->long_name); } else if (hasLongName) { n = dropt_ssprintf(ss, DROPT_TEXT_LITERAL("%*s--%s"), hp.indent, DROPT_TEXT_LITERAL(""), option->long_name); } else if (hasShortName) { n = dropt_ssprintf(ss, DROPT_TEXT_LITERAL("%*s-%c"), hp.indent, DROPT_TEXT_LITERAL(""), option->short_name); } else { /* Comment text. Don't bother with indentation. */ assert(option->description != NULL); dropt_ssprintf(ss, DROPT_TEXT_LITERAL("%s\n"), option->description); goto next; } if (n < 0) { n = 0; } if (option->arg_description != NULL) { int m = dropt_ssprintf(ss, (option->attr & dropt_attr_optional_val) ? DROPT_TEXT_LITERAL("[=%s]") : DROPT_TEXT_LITERAL("=%s"), option->arg_description); if (m > 0) { n += m; } } /* Check for equality to make sure that there's at least one * space between the option name and its description. */ if ((unsigned int) n >= hp.description_start_column) { dropt_ssprintf(ss, DROPT_TEXT_LITERAL("\n")); n = 0; } { const dropt_char* line = option->description; while (line != NULL) { int lineLen; const dropt_char* nextLine; const dropt_char* newline = dropt_strchr(line, DROPT_TEXT_LITERAL('\n')); if (newline == NULL) { lineLen = dropt_strlen(line); nextLine = NULL; } else { lineLen = newline - line; nextLine = newline + 1; } dropt_ssprintf(ss, DROPT_TEXT_LITERAL("%*s%.*s\n"), hp.description_start_column - n, DROPT_TEXT_LITERAL(""), lineLen, line); n = 0; line = nextLine; } } next: if (hp.blank_lines_between_options) { dropt_ssprintf(ss, DROPT_TEXT_LITERAL("\n")); } } helpText = dropt_ssfinalize(ss); } return helpText; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax movq %rsi, %r15 movq %rdi, %r14 callq 0x2240 testq %r14, %r14 je 0x2956 movq %rax, %rbx testq %rax, %rax je 0x276d testq %r15, %r15 je 0x277e movl (%r15), %eax movl %eax, (%rsp) movl 0x4(%r15), %r12d cmpb $0x0, 0x8(%r15) sete %al movl %eax, 0x4(%rsp) jmp 0x2793 xorl %eax, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movl $0x2, (%rsp) movl $0x6, %r12d movl $0x0, 0x4(%rsp) movq (%r14), %r14 movq %r14, %rdi callq 0x2978 testb %al, %al je 0x2940 xorl %r13d, %r13d movq 0x8(%r14), %r9 testq %r9, %r9 je 0x27bb cmpb $0x0, (%r9) setne %al jmp 0x27bd xorl %eax, %eax movq 0x10(%r14), %rdx testq %rdx, %rdx je 0x291a testb $0x2, 0x30(%r14) jne 0x291a movsbl (%r14), %r8d testb %r8b, %r8b setne %cl andb %al, %cl movq %rbx, %rdi cmpb $0x1, %cl jne 0x2803 leaq 0x29b7(%rip), %rsi # 0x51a7 movl (%rsp), %edx leaq 0x2a6d(%rip), %rcx # 0x5267 xorl %eax, %eax callq 0x2200 jmp 0x2845 testb %al, %al je 0x2824 leaq 0x29a6(%rip), %rsi # 0x51b4 movl (%rsp), %edx leaq 0x2a4f(%rip), %rcx # 0x5267 movq %r9, %r8 xorl %eax, %eax callq 0x2200 jmp 0x2845 testb %r8b, %r8b je 0x2930 leaq 0x2988(%rip), %rsi # 0x51bc movl (%rsp), %edx leaq 0x2a29(%rip), %rcx # 0x5267 xorl %eax, %eax callq 0x2200 movl %eax, %r15d testl %eax, %eax cmovlel %r13d, %r15d movq 0x18(%r14), %rdx testq %rdx, %rdx je 0x2881 testb $0x4, 0x30(%r14) leaq 0x2964(%rip), %rsi # 0x51c7 leaq 0x2963(%rip), %rax # 0x51cd cmoveq %rax, %rsi movq %rbx, %rdi xorl %eax, %eax callq 0x2200 testl %eax, %eax cmovlel %r13d, %eax addl %eax, %r15d cmpl %r12d, %r15d jb 0x289a xorl %r15d, %r15d movq %rbx, %rdi leaq 0x29d3(%rip), %rsi # 0x5266 xorl %eax, %eax callq 0x2200 movq 0x10(%r14), %r13 testq %r13, %r13 je 0x28fc movq %r13, %rdi movl $0xa, %esi callq 0x20f0 testq %rax, %rax je 0x28c3 movq %rax, %rbp movq %rax, %r8 subq %r13, %r8 incq %rbp jmp 0x28d0 movq %r13, %rdi callq 0x20e0 movq %rax, %r8 xorl %ebp, %ebp movl %r12d, %edx subl %r15d, %edx xorl %r15d, %r15d movq %rbx, %rdi leaq 0x28ee(%rip), %rsi # 0x51d1 leaq 0x297d(%rip), %rcx # 0x5267 movq %r13, %r9 xorl %eax, %eax callq 0x2200 movq %rbp, %r13 testq %rbp, %rbp jne 0x28a3 cmpb $0x0, 0x4(%rsp) movl $0x0, %r13d jne 0x291a movq %rbx, %rdi leaq 0x2953(%rip), %rsi # 0x5266 xorl %eax, %eax callq 0x2200 addq $0x40, %r14 movq %r14, %rdi callq 0x2978 testb %al, %al jne 0x27a9 jmp 0x2940 leaq 0x288c(%rip), %rsi # 0x51c3 xorl %eax, %eax callq 0x2200 jmp 0x28fc movq %rbx, %rdi addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp jmp 0x2230 callq 0x22f4
/jamesderlin[P]dropt/src/dropt.c
dropt_init_help_params
void dropt_init_help_params(dropt_help_params* helpParams) { if (helpParams == NULL) { DROPT_MISUSE("No dropt help parameters specified."); return; } helpParams->indent = default_help_indent; helpParams->description_start_column = default_description_start_column; helpParams->blank_lines_between_options = true; }
testq %rdi, %rdi je 0x2972 movabsq $0x600000002, %rax # imm = 0x600000002 movq %rax, (%rdi) movb $0x1, 0x8(%rdi) retq pushq %rax callq 0x2326
/jamesderlin[P]dropt/src/dropt.c
is_valid_option
static bool is_valid_option(const dropt_option* option) { return option != NULL && !( option->long_name == NULL && option->short_name == DROPT_TEXT_LITERAL('\0') && option->description == NULL && option->arg_description == NULL && option->handler == NULL && option->dest == NULL && option->attr == 0 && option->extra_data == 0); }
testq %rdi, %rdi je 0x2987 movb $0x1, %al cmpq $0x0, 0x8(%rdi) je 0x298a retq xorl %eax, %eax retq cmpb $0x0, (%rdi) jne 0x2986 cmpq $0x0, 0x10(%rdi) jne 0x2986 cmpq $0x0, 0x18(%rdi) jne 0x2986 cmpq $0x0, 0x20(%rdi) jne 0x2986 cmpq $0x0, 0x28(%rdi) jne 0x2986 cmpl $0x0, 0x30(%rdi) jne 0x2986 cmpq $0x0, 0x38(%rdi) setne %al retq
/jamesderlin[P]dropt/src/dropt.c
dropt_parse
dropt_char** dropt_parse(dropt_context* context, int argc, dropt_char** argv) { dropt_char* arg; parse_state ps; ps.option = NULL; ps.optionArgument = NULL; ps.argNext = argv; if (argv == NULL) { /* Nothing to do. */ goto exit; } if (context == NULL) { DROPT_MISUSE("No dropt context specified."); set_error_details(context, dropt_error_bad_configuration, make_char_array(DROPT_TEXT_LITERAL(""), 0), NULL); goto exit; } #ifdef DROPT_NO_STRING_BUFFERS if (context->errorHandler == NULL) { DROPT_MISUSE("No error handler specified."); set_error_details(context, dropt_error_bad_configuration, make_char_array(DROPT_TEXT_LITERAL(""), 0), NULL); goto exit; } #endif if (argc == -1) { argc = 0; while (argv[argc] != NULL) { argc++; } } if (argc == 0) { /* Nothing to do. */ goto exit; } init_lookup_tables(context); ps.argsLeft = argc; while ( ps.argsLeft-- > 0 && (arg = *ps.argNext) != NULL && arg[0] == DROPT_TEXT_LITERAL('-')) { if (arg[1] == DROPT_TEXT_LITERAL('\0')) { /* - */ /* This intentionally leaves "-" unprocessed for the caller to * deal with. This allows construction of programs that treat * "-" to mean `stdin`. */ goto exit; } ps.argNext++; if (arg[1] == DROPT_TEXT_LITERAL('-')) { if (!parse_long_option(context, &ps, arg)) { goto exit; } } else { /* Short name. (-x) */ if (!parse_short_option(context, &ps, arg)) { goto exit; } } ps.option = NULL; ps.optionArgument = NULL; } exit: return ps.argNext; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $0x48, %rsp xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movq %rdx, 0x10(%rsp) testq %rdx, %rdx je 0x2e3f movq %rdi, %rbx testq %rdi, %rdi je 0x2ed9 movl %esi, %r14d cmpl $-0x1, %esi jne 0x2a40 movl $0xffffffff, %r14d # imm = 0xFFFFFFFF incl %r14d cmpq $0x0, (%rdx) leaq 0x8(%rdx), %rdx jne 0x2a33 testl %r14d, %r14d je 0x2e3f movq (%rbx), %r12 movq 0x8(%rbx), %r15 cmpq $0x0, 0x10(%rbx) jne 0x2aaa movl $0x10, %esi movq %r15, %rdi callq 0x20d0 movq %rax, 0x10(%rbx) testq %rax, %rax je 0x2aaa testq %r15, %r15 je 0x2a93 movq %rax, %rcx addq $0x8, %rcx movq %r12, %rdx movq %r15, %rsi movq %rdx, -0x8(%rcx) movq %rbx, (%rcx) addq $0x10, %rcx addq $0x40, %rdx decq %rsi jne 0x2a7f leaq 0x6bf(%rip), %rcx # 0x3159 movl $0x10, %edx movq %rax, %rdi movq %r15, %rsi callq 0x2090 cmpq $0x0, 0x18(%rbx) jne 0x2b01 movl $0x10, %esi movq %r15, %rdi callq 0x20d0 movq %rax, 0x18(%rbx) testq %rax, %rax je 0x2b01 testq %r15, %r15 je 0x2aea movq %rax, %rcx addq $0x8, %rcx movq %r15, %rdx movq %r12, -0x8(%rcx) movq %rbx, (%rcx) addq $0x10, %rcx addq $0x40, %r12 decq %rdx jne 0x2ad6 leaq 0x776(%rip), %rcx # 0x3267 movl $0x10, %edx movq %rax, %rdi movq %r15, %rsi callq 0x2090 leal -0x1(%r14), %eax movl %eax, 0x18(%rsp) testl %r14d, %r14d jle 0x2e3f movq 0x10(%rsp), %rax movq (%rax), %r15 testq %r15, %r15 je 0x2e3f cmpb $0x2d, (%r15) jne 0x2e3f movb 0x1(%r15), %cl testb %cl, %cl je 0x2e3f addq $0x8, %rax movq %rax, 0x10(%rsp) cmpb $0x2d, %cl jne 0x2b88 cmpb $0x0, 0x2(%r15) je 0x2e3f leaq 0x2(%r15), %r13 movq %r13, %rdi movl $0x3d, %esi callq 0x20f0 cmpq %r13, %rax je 0x2e53 movq %rax, %r12 testq %rax, %rax je 0x2cc8 movq %r12, %rax incq %rax movq %rax, 0x8(%rsp) jmp 0x2ce2 leaq 0x1(%r15), %r13 movq %r13, %rdi movl $0x3d, %esi callq 0x20f0 movq %rax, %r12 subq %r13, %r12 je 0x2e53 testq %rax, %rax je 0x2bb4 incq %rax movq %rax, 0x8(%rsp) jmp 0x2bcb movq %r13, %rdi callq 0x20e0 cmpq $0x0, 0x8(%rsp) jne 0x2ede movq %rax, %r12 xorl %r15d, %r15d cmpq %r12, %r15 je 0x2d9b movb (%r13,%r15), %al movb %al, 0x38(%rsp) testb %al, %al je 0x2eba cmpq $0x0, 0x58(%rbx) je 0x2e9b movq 0x18(%rbx), %rsi testq %rsi, %rsi leaq 0x38(%rsp), %r14 je 0x2c23 movq 0x8(%rbx), %rdx movl $0x10, %ecx movq %r14, %rdi leaq 0x838(%rip), %r8 # 0x344c callq 0x2260 testq %rax, %rax je 0x2c4a movq (%rax), %rbp jmp 0x2c4c movq (%rbx), %rbp movq %rbp, %rdi callq 0x2978 testb %al, %al je 0x2c4a movl $0x1, %edx movq %r14, %rdi movq %rbp, %rsi callq *0x58(%rbx) testl %eax, %eax je 0x2c4c addq $0x40, %rbp jmp 0x2c26 xorl %ebp, %ebp movq %rbp, (%rsp) testq %rbp, %rbp je 0x2dfb leaq 0x1(%r15), %r14 cmpq %r12, %r14 jne 0x2c76 movq %rbx, %rdi movq %rsp, %rsi callq 0x351b testl %eax, %eax je 0x2cb2 jmp 0x2e0b cmpb $0x1, 0x20(%rbx) jne 0x2c8c testq %r15, %r15 jne 0x2c8c cmpq $0x0, 0x18(%rbp) jne 0x2dba cmpq $0x0, 0x18(%rbp) je 0x2c9d testb $0x4, 0x30(%rbp) je 0x2e2a movq %rbx, %rdi movq %rbp, %rsi xorl %edx, %edx callq 0x35bd testl %eax, %eax jne 0x2e1d movq (%rsp), %rax testb $0x1, 0x30(%rax) movq %r14, %r15 je 0x2bce jmp 0x2e3f movq %r13, %rdi callq 0x20e0 cmpq $0x0, 0x8(%rsp) jne 0x2efd movq %rax, %r12 addq %r13, %r12 leaq 0x38(%rsp), %r14 movq %r12, %rax subq %r13, %rax movq %r13, 0x38(%rsp) movq %rax, 0x40(%rsp) movq 0x10(%rbx), %rsi testq %rsi, %rsi je 0x2d22 movq 0x8(%rbx), %rdx movl $0x10, %ecx movq %r14, %rdi leaq 0x5f6(%rip), %r8 # 0x3309 callq 0x2260 testq %rax, %rax je 0x2d6b movq (%rax), %rdi jmp 0x2d6d xorps %xmm0, %xmm0 movaps %xmm0, 0x20(%rsp) movq %rbx, 0x28(%rsp) movq (%rbx), %rdi movq %rdi, 0x20(%rsp) callq 0x2978 testb %al, %al leaq 0x20(%rsp), %r13 je 0x2d6b movq %r14, %rdi movq %r13, %rsi callq 0x3309 movq 0x20(%rsp), %rdi testl %eax, %eax je 0x2d6d addq $0x40, %rdi movq %rdi, 0x20(%rsp) callq 0x2978 testb %al, %al jne 0x2d45 xorl %edi, %edi movq %rdi, (%rsp) testq %rdi, %rdi je 0x2e6b movq %rbx, %rdi movq %rsp, %rsi callq 0x351b testl %eax, %eax jne 0x2e81 movq (%rsp), %rax testb $0x1, 0x30(%rax) jne 0x2e3f xorps %xmm0, %xmm0 movaps %xmm0, (%rsp) movl 0x18(%rsp), %eax leal -0x1(%rax), %ecx movl %ecx, 0x18(%rsp) testl %eax, %eax jg 0x2b12 jmp 0x2e3f addq %r13, %r14 movq %rbx, %rdi movq %rbp, %rsi movq %r14, %rdx callq 0x35bd testl %eax, %eax je 0x2de5 movq (%rsp), %rsi testb $0x4, 0x30(%rsi) je 0x2de7 movq %rbx, %rdi xorl %edx, %edx callq 0x35bd jmp 0x2de7 xorl %eax, %eax testl %eax, %eax je 0x2d9b movsbl (%r13,%r15), %edx movq %rbx, %rdi movl %eax, %esi movq %r14, %rcx jmp 0x2e3a movsbl (%r13,%r15), %edx movq %rbx, %rdi movl $0x4, %esi jmp 0x2e38 movsbl (%r13,%r15), %edx movq 0x8(%rsp), %rcx movq %rbx, %rdi movl %eax, %esi jmp 0x2e3a movsbl (%r13,%r15), %edx movq %rbx, %rdi movl %eax, %esi jmp 0x2e38 movsbl (%r13,%r15), %edx movq %rbx, %rdi movl $0x5, %esi xorl %ecx, %ecx callq 0x35f8 movq 0x10(%rsp), %rax addq $0x48, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq movq %r15, %rdi callq 0x20e0 movq %rbx, %rdi movl $0x4, %esi movq %r15, %rdx movq %rax, %rcx jmp 0x2e7c subq %r15, %r12 movq %rbx, %rdi movl $0x4, %esi movq %r15, %rdx movq %r12, %rcx xorl %r8d, %r8d jmp 0x2e94 subq %r15, %r12 movq 0x8(%rsp), %r8 movq %rbx, %rdi movl %eax, %esi movq %r15, %rdx movq %r12, %rcx callq 0x2f1c jmp 0x2e3f leaq 0x2581(%rip), %rdi # 0x5423 leaq 0x21b3(%rip), %rsi # 0x505c leaq 0x273b(%rip), %rcx # 0x55eb movl $0x1ee, %edx # imm = 0x1EE callq 0x2120 leaq 0x2773(%rip), %rdi # 0x5634 leaq 0x2194(%rip), %rsi # 0x505c leaq 0x271c(%rip), %rcx # 0x55eb movl $0x1ed, %edx # imm = 0x1ED callq 0x2120 callq 0x2358 leaq 0x25dd(%rip), %rdi # 0x54c2 leaq 0x2170(%rip), %rsi # 0x505c leaq 0x26ab(%rip), %rcx # 0x559e movl $0x4b5, %edx # imm = 0x4B5 callq 0x2120 leaq 0x25be(%rip), %rdi # 0x54c2 leaq 0x2151(%rip), %rsi # 0x505c leaq 0x25cb(%rip), %rcx # 0x54dd movl $0x455, %edx # imm = 0x455 callq 0x2120
/jamesderlin[P]dropt/src/dropt.c
dropt_new_context
dropt_context* dropt_new_context(const dropt_option* options) { dropt_context* context = NULL; size_t n; if (options == NULL) { DROPT_MISUSE("No option list specified."); goto exit; } /* Sanity-check the options. */ for (n = 0; is_valid_option(&options[n]); n++) { if ( options[n].short_name == DROPT_TEXT_LITERAL('=') || ( options[n].long_name != NULL && dropt_strchr(options[n].long_name, DROPT_TEXT_LITERAL('=')) != NULL)) { DROPT_MISUSE("Invalid option list. '=' may not be used in an option name."); goto exit; } } context = malloc(sizeof *context); if (context == NULL) { goto exit; } else { dropt_context emptyContext = { 0 }; *context = emptyContext; context->options = options; context->numOptions = n; dropt_set_strncmp(context, NULL); } exit: return context; }
pushq %r15 pushq %r14 pushq %rbx testq %rdi, %rdi je 0x309b movq %rdi, %rbx callq 0x2978 testb %al, %al je 0x302b leaq 0x40(%rbx), %r14 xorl %r15d, %r15d cmpb $0x3d, -0x40(%r14) je 0x3096 movq -0x38(%r14), %rdi testq %rdi, %rdi je 0x3016 movl $0x3d, %esi callq 0x20f0 testq %rax, %rax jne 0x3096 incq %r15 movq %r14, %rdi callq 0x2978 addq $0x40, %r14 testb %al, %al jne 0x2fef jmp 0x302e xorl %r15d, %r15d movl $0x60, %edi callq 0x21a0 testq %rax, %rax je 0x308a movq %rax, %r14 xorps %xmm0, %xmm0 movups %xmm0, (%rax) movups %xmm0, 0x50(%rax) movups %xmm0, 0x10(%rax) movups %xmm0, 0x40(%rax) movups %xmm0, 0x30(%rax) movups %xmm0, 0x20(%rax) movq %rbx, (%rax) movq %r15, 0x8(%rax) movq 0x4f68(%rip), %rax # 0x7fd0 movq %rax, 0x58(%r14) movq 0x10(%r14), %rdi callq 0x2050 xorl %ebx, %ebx movq %rbx, 0x10(%r14) movq 0x18(%r14), %rdi callq 0x2050 movq %rbx, 0x18(%r14) jmp 0x308d xorl %r14d, %r14d movq %r14, %rax popq %rbx popq %r14 popq %r15 retq callq 0x238a callq 0x23bc
/jamesderlin[P]dropt/src/dropt.c
dropt_set_strncmp
void dropt_set_strncmp(dropt_context* context, dropt_strncmp_func cmp) { if (context == NULL) { DROPT_MISUSE("No dropt context specified."); return; } if (cmp == NULL) { cmp = dropt_strncmp; } context->ncmpstr = cmp; /* Changing the sort method invalidates our existing lookup tables. */ free_lookup_tables(context); }
pushq %r14 pushq %rbx pushq %rax testq %rdi, %rdi je 0x30e1 movq %rdi, %rbx testq %rsi, %rsi jne 0x30b8 movq 0x4f18(%rip), %rsi # 0x7fd0 movq %rsi, 0x58(%rbx) movq 0x10(%rbx), %rdi callq 0x2050 xorl %r14d, %r14d movq %r14, 0x10(%rbx) movq 0x18(%rbx), %rdi callq 0x2050 movq %r14, 0x18(%rbx) addq $0x8, %rsp popq %rbx popq %r14 retq callq 0x23ee
/jamesderlin[P]dropt/src/dropt.c
dropt_free_context
void dropt_free_context(dropt_context* context) { dropt_clear_error(context); free_lookup_tables(context); free(context); }
pushq %r14 pushq %rbx pushq %rax movq %rdi, %rbx callq 0x20c0 testq %rbx, %rbx je 0x3114 movq 0x10(%rbx), %rdi callq 0x2050 xorl %r14d, %r14d movq %r14, 0x10(%rbx) movq 0x18(%rbx), %rdi callq 0x2050 movq %r14, 0x18(%rbx) movq %rbx, %rdi addq $0x8, %rsp popq %rbx popq %r14 jmp 0x2050
/jamesderlin[P]dropt/src/dropt.c
cmp_key_option_proxy_long
static int cmp_key_option_proxy_long(const void* key, const void* item) { const char_array* longName = key; const option_proxy* op = item; size_t optionLen; int ret; assert(longName != NULL); assert(op != NULL); assert(op->option != NULL); assert(op->context != NULL); assert(op->context->ncmpstr != NULL); if (longName->s == op->option->long_name) { return 0; } else if (longName->s == NULL) { return -1; } else if (op->option->long_name == NULL) { return +1; } /* Although the `longName` key might not be `NUL`-terminated, the * `option_proxy` item we're searching against must be. */ optionLen = dropt_strlen(op->option->long_name); ret = op->context->ncmpstr(longName->s, op->option->long_name, MIN(longName->len, optionLen)); if (ret != 0) { return ret; } if (longName->len < optionLen) { return -1; } else if (longName->len > optionLen) { return +1; } return 0; }
pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx testq %rdi, %rdi je 0x33b1 testq %rsi, %rsi je 0x33d0 movq (%rsi), %rax testq %rax, %rax je 0x33ef movq 0x8(%rsi), %rcx testq %rcx, %rcx je 0x340e movq 0x58(%rcx), %r13 testq %r13, %r13 je 0x342d movq %rdi, %rbx movq (%rdi), %r14 movq 0x8(%rax), %r15 cmpq %r15, %r14 je 0x339e movl $0xffffffff, %eax # imm = 0xFFFFFFFF testq %r14, %r14 je 0x33a7 testq %r15, %r15 je 0x33a2 movq %r15, %rdi callq 0x20e0 movq %rax, %r12 movq 0x8(%rbx), %rdx cmpq %rax, %rdx cmovaeq %rax, %rdx movq %r14, %rdi movq %r15, %rsi callq *%r13 testl %eax, %eax jne 0x33a7 movl $0xffffffff, %eax # imm = 0xFFFFFFFF cmpq %r12, 0x8(%rbx) jb 0x33a7 seta %al movzbl %al, %eax jmp 0x33a7 xorl %eax, %eax jmp 0x33a7 movl $0x1, %eax popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 retq leaq 0x1fea(%rip), %rdi # 0x53a2 leaq 0x1c9d(%rip), %rsi # 0x505c leaq 0x1fed(%rip), %rcx # 0x53b3 movl $0xb7, %edx callq 0x2120 leaq 0x2016(%rip), %rdi # 0x53ed leaq 0x1c7e(%rip), %rsi # 0x505c leaq 0x1fce(%rip), %rcx # 0x53b3 movl $0xb8, %edx callq 0x2120 leaq 0x2002(%rip), %rdi # 0x53f8 leaq 0x1c5f(%rip), %rsi # 0x505c leaq 0x1faf(%rip), %rcx # 0x53b3 movl $0xb9, %edx callq 0x2120 leaq 0x1ff6(%rip), %rdi # 0x540b leaq 0x1c40(%rip), %rsi # 0x505c leaq 0x1f90(%rip), %rcx # 0x53b3 movl $0xba, %edx callq 0x2120 leaq 0x1feb(%rip), %rdi # 0x541f leaq 0x1c21(%rip), %rsi # 0x505c leaq 0x1f71(%rip), %rcx # 0x53b3 movl $0xbb, %edx callq 0x2120
/jamesderlin[P]dropt/src/dropt.c
parse_option_arg
static dropt_error parse_option_arg(dropt_context* context, parse_state* ps) { dropt_error err; bool consumeNextArg = false; if (OPTION_TAKES_ARG(ps->option) && ps->optionArgument == NULL) { /* The option expects an argument, but none was specified with '='. * Try using the next item from the command-line. */ if (ps->argsLeft > 0 && *(ps->argNext) != NULL) { consumeNextArg = true; ps->optionArgument = *(ps->argNext); } else if (!(ps->option->attr & dropt_attr_optional_val)) { err = dropt_error_insufficient_arguments; goto exit; } } /* Even for options that don't ask for arguments, always parse and * consume an argument that was specified with '='. */ err = set_option_value(context, ps->option, ps->optionArgument); if ( err != dropt_error_none && (ps->option->attr & dropt_attr_optional_val) && consumeNextArg && ps->optionArgument != NULL) { /* The option's handler didn't like the argument we fed it. If the * argument was optional, try again without it. */ consumeNextArg = false; ps->optionArgument = NULL; err = set_option_value(context, ps->option, NULL); } exit: if (err == dropt_error_none && consumeNextArg) { ps->argNext++; ps->argsLeft--; } return err; }
pushq %r15 pushq %r14 pushq %rbx movq %rsi, %rbx movq %rdi, %r14 movq (%rsi), %rsi xorl %r15d, %r15d cmpq $0x0, 0x18(%rsi) je 0x353d cmpq $0x0, 0x8(%rbx) je 0x3592 xorl %r15d, %r15d movq 0x8(%rbx), %rdx movq %r14, %rdi callq 0x35bd testl %eax, %eax je 0x3579 movq (%rbx), %rsi testb $0x4, 0x30(%rsi) je 0x357b testb %r15b, %r15b je 0x357b cmpq $0x0, 0x8(%rbx) je 0x357b movq $0x0, 0x8(%rbx) xorl %r15d, %r15d movq %r14, %rdi xorl %edx, %edx callq 0x35bd jmp 0x357b xorl %eax, %eax testl %eax, %eax jne 0x358c testb %r15b, %r15b je 0x358c addq $0x8, 0x10(%rbx) decl 0x18(%rbx) popq %rbx popq %r14 popq %r15 retq cmpl $0x0, 0x18(%rbx) jle 0x35ad movq 0x10(%rbx), %rax movq (%rax), %rax testq %rax, %rax je 0x35ad movq %rax, 0x8(%rbx) movb $0x1, %r15b jmp 0x353d movl $0x5, %eax xorl %r15d, %r15d testb $0x4, 0x30(%rsi) jne 0x353d jmp 0x357b
/jamesderlin[P]dropt/src/dropt.c
set_option_value
static dropt_error set_option_value(dropt_context* context, const dropt_option* option, const dropt_char* optionArgument) { assert(option != NULL); if (option->handler == NULL) { DROPT_MISUSE("No option handler specified."); return dropt_error_bad_configuration; } return option->handler(context, option, optionArgument, option->dest); }
pushq %rax testq %rsi, %rsi je 0x35d4 movq 0x20(%rsi), %rax testq %rax, %rax je 0x35f3 movq 0x28(%rsi), %rcx popq %r8 jmpq *%rax leaq 0x1d9d(%rip), %rdi # 0x5378 leaq 0x1a7a(%rip), %rsi # 0x505c leaq 0x1f40(%rip), %rcx # 0x5529 movl $0x3d4, %edx # imm = 0x3D4 callq 0x2120 callq 0x24b6
/jamesderlin[P]dropt/src/dropt.c
set_short_option_error_details
static void set_short_option_error_details(dropt_context* context, dropt_error err, dropt_char shortName, const dropt_char* optionArgument) { /* "-?" is just a placeholder. */ dropt_char shortNameBuf[] = DROPT_TEXT_LITERAL("-?"); assert(context != NULL); assert(shortName != DROPT_TEXT_LITERAL('\0')); shortNameBuf[1] = shortName; set_error_details(context, err, make_char_array(shortNameBuf, ARRAY_LENGTH(shortNameBuf) - 1), optionArgument); }
pushq %rax movb $0x0, 0x6(%rsp) movw $0x3f2d, 0x4(%rsp) # imm = 0x3F2D testb %dl, %dl je 0x3623 movq %rcx, %r8 leaq 0x4(%rsp), %rax movb %dl, 0x1(%rax) movl $0x2, %ecx movq %rax, %rdx callq 0x2f1c popq %rax retq leaq 0x200a(%rip), %rdi # 0x5634 leaq 0x1a2b(%rip), %rsi # 0x505c leaq 0x2025(%rip), %rcx # 0x565d movl $0x242, %edx # imm = 0x242 callq 0x2120 nop
/jamesderlin[P]dropt/src/dropt.c
dropt_handle_uint
dropt_error dropt_handle_uint(dropt_context* context, const dropt_option* option, const dropt_char* optionArgument, void* dest) { dropt_error err = dropt_error_none; int val = 0; unsigned int* out = dest; if (out == NULL) { DROPT_MISUSE("No handler destination specified."); err = dropt_error_bad_configuration; } else if ( optionArgument == NULL || optionArgument[0] == DROPT_TEXT_LITERAL('\0')) { err = dropt_error_insufficient_arguments; } else if (optionArgument[0] == DROPT_TEXT_LITERAL('-')) { err = dropt_error_mismatch; } else { dropt_char* end; unsigned long n; errno = 0; n = dropt_strtoul(optionArgument, &end, 10); /* Check that we matched at least one digit. * (`strtol`/`strtoul` will return 0 if fed a string with no digits.) */ if (*end == DROPT_TEXT_LITERAL('\0') && end > optionArgument) { if (errno == ERANGE || n > UINT_MAX) { err = dropt_error_overflow; val = UINT_MAX; } else if (errno == 0) { val = (unsigned int) n; } else { err = dropt_error_unknown; } } else { err = dropt_error_mismatch; } } if (err == dropt_error_none) { *out = val; } return err; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rcx, %rbx testq %rcx, %rcx je 0x36f6 movq %rdx, %r14 movl $0x5, %eax xorl %edx, %edx testq %r14, %r14 je 0x3719 movzbl (%r14), %ecx testl %ecx, %ecx je 0x3717 cmpl $0x2d, %ecx jne 0x372b movl $0x6, %eax jmp 0x3719 leaq 0x1fdb(%rip), %rdi # 0x56d8 leaq 0x1ff6(%rip), %rsi # 0x56fa movl $0x123, %edx # imm = 0x123 callq 0x21f0 movl $0x2, %eax xorl %edx, %edx jmp 0x3719 movl %ecx, %edx testl %eax, %eax jne 0x371f movl %edx, (%rbx) addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq callq 0x2070 movq %rax, %r15 movl $0x0, (%rax) movq %rsp, %r12 movq %r14, %rdi movq %r12, %rsi movl $0xa, %edx callq 0x2250 movq %rax, %rcx movq (%r12), %rsi xorl %edx, %edx movl $0x6, %eax cmpq %r14, %rsi jbe 0x3719 cmpb $0x0, (%rsi) jne 0x3719 movl (%r15), %edx cmpl $0x22, %edx sete %al movq %rcx, %rsi shrq $0x20, %rsi setne %sil orb %al, %sil je 0x3789 movl $0x7, %eax movl $0xffffffff, %edx # imm = 0xFFFFFFFF jmp 0x3719 xorl %esi, %esi xorl %eax, %eax testl %edx, %edx setne %al cmovnel %esi, %ecx movl %ecx, %edx jmp 0x3719
/jamesderlin[P]dropt/src/dropt_handlers.c
dropt_handle_double
dropt_error dropt_handle_double(dropt_context* context, const dropt_option* option, const dropt_char* optionArgument, void* dest) { dropt_error err = dropt_error_none; double val = 0.0; double* out = dest; if (out == NULL) { DROPT_MISUSE("No handler destination specified."); err = dropt_error_bad_configuration; } else if ( optionArgument == NULL || optionArgument[0] == DROPT_TEXT_LITERAL('\0')) { err = dropt_error_insufficient_arguments; } else { dropt_char* end; errno = 0; val = dropt_strtod(optionArgument, &end); /* Check that we matched at least one digit. * (`strtod` will return 0 if fed a string with no digits.) */ if (*end == DROPT_TEXT_LITERAL('\0') && end > optionArgument) { if (errno == ERANGE) { /* Note that setting `errno` to `ERANGE` for underflow errors * is implementation-defined behavior, but glibc, BSD's * libc, and Microsoft's CRT all have implementations of * `strtod` documented to return 0 and to set `errno` to * `ERANGE` for such cases. */ err = (ABS(val) <= DBL_MIN) ? dropt_error_underflow : dropt_error_overflow; } else if (errno != 0) { err = dropt_error_unknown; } } else { err = dropt_error_mismatch; } } if (err == dropt_error_none) { *out = val; } return err; }
pushq %r15 pushq %r14 pushq %r12 pushq %rbx pushq %rax movq %rcx, %rbx testq %rcx, %rcx je 0x3989 movq %rdx, %r14 movl $0x5, %eax xorpd %xmm0, %xmm0 testq %rdx, %rdx je 0x39aa cmpb $0x0, (%r14) je 0x39aa callq 0x2070 movq %rax, %r15 movl $0x0, (%rax) movq %rsp, %r12 movq %r14, %rdi movq %r12, %rsi callq 0x20a0 movq (%r12), %rcx movl $0x6, %eax cmpq %r14, %rcx jbe 0x39aa cmpb $0x0, (%rcx) jne 0x39aa movl (%r15), %eax testl %eax, %eax je 0x39aa cmpl $0x22, %eax jne 0x39be movapd 0x1d55(%rip), %xmm1 # 0x56c0 xorpd %xmm0, %xmm1 maxsd %xmm0, %xmm1 movsd 0x1d55(%rip), %xmm2 # 0x56d0 ucomisd %xmm1, %xmm2 movl $0x7, %eax sbbl $-0x1, %eax jmp 0x39aa leaq 0x1d48(%rip), %rdi # 0x56d8 leaq 0x1d63(%rip), %rsi # 0x56fa movl $0x179, %edx # imm = 0x179 callq 0x21f0 movl $0x2, %eax xorpd %xmm0, %xmm0 testl %eax, %eax jne 0x39b2 movsd %xmm0, (%rbx) addq $0x8, %rsp popq %rbx popq %r12 popq %r14 popq %r15 retq movl $0x1, %eax jmp 0x39aa
/jamesderlin[P]dropt/src/dropt_handlers.c
dropt_handle_string
dropt_error dropt_handle_string(dropt_context* context, const dropt_option* option, const dropt_char* optionArgument, void* dest) { dropt_error err = dropt_error_none; const dropt_char** out = dest; if (out == NULL) { DROPT_MISUSE("No handler destination specified."); err = dropt_error_bad_configuration; } else if (optionArgument == NULL) { err = dropt_error_insufficient_arguments; } if (err == dropt_error_none) { *out = optionArgument; } return err; }
pushq %r14 pushq %rbx pushq %rax movq %rcx, %rbx movq %rdx, %r14 testq %rcx, %rcx je 0x39e4 xorl %eax, %eax testq %r14, %r14 setne %cl sete %al leal (%rax,%rax,4), %eax jmp 0x3a03 leaq 0x1ced(%rip), %rdi # 0x56d8 leaq 0x1d08(%rip), %rsi # 0x56fa movl $0x1c9, %edx # imm = 0x1C9 callq 0x21f0 movl $0x2, %eax xorl %ecx, %ecx testb %cl, %cl je 0x3a0a movq %r14, (%rbx) addq $0x8, %rsp popq %rbx popq %r14 retq
/jamesderlin[P]dropt/src/dropt_handlers.c
dropt_safe_realloc
void* dropt_safe_realloc(void* p, size_t numElements, size_t elementSize) { size_t numBytes; /* `elementSize` shouldn't legally be 0, but we check for it in case a * caller got the argument order wrong. */ if (numElements == 0 || elementSize == 0) { /* The behavior of `realloc(p, 0)` is implementation-defined. Let's * enforce a particular behavior. */ free(p); assert(elementSize != 0); return NULL; } numBytes = numElements * elementSize; if (numBytes / elementSize != numElements) { /* Overflow. */ return NULL; } return realloc(p, numBytes); }
pushq %rbx movq %rdx, %rbx testq %rsi, %rsi setne %al testq %rdx, %rdx setne %cl testb %cl, %al jne 0x3aca callq 0x2050 testq %rbx, %rbx jne 0x3ad2 leaq 0x1d3a(%rip), %rdi # 0x57ec leaq 0x1d44(%rip), %rsi # 0x57fd leaq 0x1d96(%rip), %rcx # 0x5856 movl $0x95, %edx callq 0x2120 movq %rbx, %rax mulq %rsi jno 0x3ad6 xorl %eax, %eax popq %rbx retq movq %rax, %rsi popq %rbx jmp 0x21e0
/jamesderlin[P]dropt/src/dropt_string.c
dropt_strndup
dropt_char* dropt_strndup(const dropt_char* s, size_t n) { dropt_char* copy; size_t len = 0; assert(s != NULL); while (len < n && s[len] != DROPT_TEXT_LITERAL('\0')) { len++; } if (len + 1 < len) { /* This overflow check shouldn't be strictly necessary. `len` can be at * most `SIZE_MAX`, so `SIZE_MAX + 1` can wrap around to 0, but * `dropt_safe_malloc` will return `NULL` for a 0-sized allocation. * However, favor defensive paranoia. */ return NULL; } copy = dropt_safe_malloc(len + 1 /* NUL */, sizeof *copy); if (copy != NULL) { memcpy(copy, s, len * sizeof *copy); copy[len] = DROPT_TEXT_LITERAL('\0'); } return copy; }
pushq %r15 pushq %r14 pushq %rbx testq %rdi, %rdi je 0x3b51 movq %rdi, %rbx xorl %r14d, %r14d testq %rsi, %rsi je 0x3b12 cmpb $0x0, (%rbx,%r14) je 0x3b12 incq %r14 cmpq %r14, %rsi jne 0x3b00 movq %rsi, %r14 cmpq $-0x1, %r14 je 0x3b45 leaq 0x1(%r14), %rsi movl $0x1, %edx xorl %edi, %edi callq 0x2220 movq %rax, %r15 testq %rax, %rax je 0x3b48 movq %r15, %rdi movq %rbx, %rsi movq %r14, %rdx callq 0x2170 movb $0x0, (%r15,%r14) jmp 0x3b48 xorl %r15d, %r15d movq %r15, %rax popq %rbx popq %r14 popq %r15 retq leaq 0x1770(%rip), %rdi # 0x52c8 leaq 0x1c9e(%rip), %rsi # 0x57fd leaq 0x1d21(%rip), %rcx # 0x5887 movl $0xcb, %edx callq 0x2120
/jamesderlin[P]dropt/src/dropt_string.c
dropt_strnicmp
int dropt_strnicmp(const dropt_char* s, const dropt_char* t, size_t n) { assert(s != NULL); assert(t != NULL); if (s == t) { return 0; } while (n--) { if (*s == DROPT_TEXT_LITERAL('\0') && *t == DROPT_TEXT_LITERAL('\0')) { break; } else if (*s == *t || dropt_tolower(*s) == dropt_tolower(*t)) { s++; t++; } else { return (dropt_tolower(*s) < dropt_tolower(*t)) ? -1 : +1; } } return 0; }
pushq %rbp pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx pushq %rax testq %rdi, %rdi je 0x3c4f movq %rsi, %r14 testq %rsi, %rsi je 0x3c6e movq %rdx, %rbx movq %rdi, %r15 cmpq %r14, %rdi sete %al testq %rdx, %rdx sete %cl orb %al, %cl jne 0x3c2f xorl %r12d, %r12d movsbq (%r15,%r12), %r13 testq %r13, %r13 jne 0x3c0c cmpb $0x0, (%r14,%r12) je 0x3c2f movsbq (%r14,%r12), %rbp cmpb %bpl, %r13b je 0x3c27 callq 0x2270 movq (%rax), %rax movl (%rax,%rbp,4), %ecx cmpl %ecx, (%rax,%r13,4) jne 0x3c40 incq %r12 cmpq %r12, %rbx jne 0x3bfb xorl %eax, %eax addq $0x8, %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp retq setge %al movzbl %al, %eax leal -0x1(,%rax,2), %eax jmp 0x3c31 leaq 0x1672(%rip), %rdi # 0x52c8 leaq 0x1ba0(%rip), %rsi # 0x57fd leaq 0x1c93(%rip), %rcx # 0x58f7 movl $0x10e, %edx # imm = 0x10E callq 0x2120 leaq 0x1cca(%rip), %rdi # 0x593f leaq 0x1b81(%rip), %rsi # 0x57fd leaq 0x1c74(%rip), %rcx # 0x58f7 movl $0x10f, %edx # imm = 0x10F callq 0x2120
/jamesderlin[P]dropt/src/dropt_string.c
dropt_vsnprintf
int dropt_vsnprintf(dropt_char* s, size_t n, const dropt_char* format, va_list args) { #if __STDC_VERSION__ >= 199901L || __GNUC__ /* ISO C99-compliant. * * As far as I can tell, gcc's implementation of `vsnprintf` has always * matched the behavior required by the C99 standard (which is to return * the necessary buffer size). * * Note that this won't work with `wchar_t` because there is no true, * standard `wchar_t` equivalent of `snprintf`. `swprintf` comes close but * doesn't return the necessary buffer size (and the standard does not * provide a guaranteed way to test if truncation occurred), and its * format string can't be used interchangeably with `snprintf`. * * It's simpler not to support `wchar_t` on non-Windows platforms. */ assert(format != NULL); return vsnprintf(s, n, format, args); #elif defined __BORLANDC__ /* Borland's compiler neglects to `NUL`-terminate. */ int ret; assert(format != NULL); ret = vsnprintf(s, n, format, args); if (n != 0) { s[n - 1] = DROPT_TEXT_LITERAL('\0'); } return ret; #elif defined _MSC_VER /* `_vsntprintf` and `_vsnprintf_s` on Windows don't have C99 semantics; * they return -1 if truncation occurs. */ va_list argsCopy; int ret; assert(format != NULL); va_copy(argsCopy, args); ret = _vsctprintf(format, argsCopy); va_end(argsCopy); if (n != 0) { assert(s != NULL); #if _MSC_VER >= 1400 (void) _vsntprintf_s(s, n, _TRUNCATE, format, args); #else /* This version doesn't necessarily `NUL`-terminate. Sigh. */ (void) _vsnprintf(s, n, format, args); s[n - 1] = DROPT_TEXT_LITERAL('\0'); #endif } return ret; #else #error Unsupported platform. dropt_vsnprintf unimplemented. return -1; #endif }
testq %rdx, %rdx jne 0x20b0 pushq %rax leaq 0x1c9c(%rip), %rdi # 0x593a leaq 0x1b58(%rip), %rsi # 0x57fd leaq 0x1c9d(%rip), %rcx # 0x5949 movl $0x14e, %edx # imm = 0x14E callq 0x2120
/jamesderlin[P]dropt/src/dropt_string.c
dropt_asprintf
dropt_char* dropt_asprintf(const dropt_char* format, ...) { dropt_char* s; va_list args; va_start(args, format); s = dropt_vasprintf(format, args); va_end(args); return s; }
subq $0xd8, %rsp leaq 0x20(%rsp), %r10 movq %rsi, 0x8(%r10) movq %rdx, 0x10(%r10) movq %rcx, 0x18(%r10) movq %r8, 0x20(%r10) movq %r9, 0x28(%r10) testb %al, %al je 0x3e27 movaps %xmm0, 0x50(%rsp) movaps %xmm1, 0x60(%rsp) movaps %xmm2, 0x70(%rsp) movaps %xmm3, 0x80(%rsp) movaps %xmm4, 0x90(%rsp) movaps %xmm5, 0xa0(%rsp) movaps %xmm6, 0xb0(%rsp) movaps %xmm7, 0xc0(%rsp) movq %rsp, %rsi movq %r10, 0x10(%rsi) leaq 0xe0(%rsp), %rax movq %rax, 0x8(%rsi) movabsq $0x3000000008, %rax # imm = 0x3000000008 movq %rax, (%rsi) callq 0x21d0 addq $0xd8, %rsp retq
/jamesderlin[P]dropt/src/dropt_string.c
dropt_ssopen
dropt_stringstream* dropt_ssopen(void) { dropt_stringstream* ss = malloc(sizeof *ss); if (ss != NULL) { ss->used = 0; ss->maxSize = default_stringstream_buffer_size; ss->string = dropt_safe_malloc(ss->maxSize, sizeof *ss->string); if (ss->string == NULL) { free(ss); ss = NULL; } else { ss->string[0] = DROPT_TEXT_LITERAL('\0'); } } return ss; }
pushq %rbx movl $0x18, %edi callq 0x21a0 testq %rax, %rax je 0x3e96 movq %rax, %rbx movq $0x0, 0x10(%rax) movq $0x100, 0x8(%rax) # imm = 0x100 movl $0x100, %edi # imm = 0x100 callq 0x21a0 movq %rax, (%rbx) testq %rax, %rax je 0x3e8e movb $0x0, (%rax) jmp 0x3e98 movq %rbx, %rdi callq 0x2050 xorl %ebx, %ebx movq %rbx, %rax popq %rbx retq
/jamesderlin[P]dropt/src/dropt_string.c
dropt_ssresize
static size_t dropt_ssresize(dropt_stringstream* ss, size_t n) { assert(ss != NULL); /* Don't allow shrinking if it will truncate the string. */ if (n < ss->maxSize) { n = MAX(n, ss->used + 1 /* NUL */); } /* There should always be a buffer to point to. */ assert(n > 0); if (n != ss->maxSize) { dropt_char* p = dropt_safe_realloc(ss->string, n, sizeof *ss->string); if (p != NULL) { ss->string = p; ss->maxSize = n; assert(ss->maxSize > 0); } } return ss->maxSize; }
pushq %r14 pushq %rbx pushq %rax movq %rsi, %rbx movq %rdi, %r14 movq 0x8(%rdi), %rax cmpq %rsi, %rax jbe 0x3f13 movq 0x10(%r14), %rcx incq %rcx cmpq %rbx, %rcx cmovaq %rcx, %rbx testq %rbx, %rbx je 0x3f41 cmpq %rax, %rbx je 0x3f39 movq (%r14), %rdi movl $0x1, %edx movq %rbx, %rsi callq 0x2220 testq %rax, %rax je 0x3f39 movq %rax, (%r14) movq %rbx, 0x8(%r14) addq $0x8, %rsp popq %rbx popq %r14 retq leaq 0x1bde(%rip), %rdi # 0x5b26 leaq 0x18ae(%rip), %rsi # 0x57fd leaq 0x1b9c(%rip), %rcx # 0x5af2 movl $0x219, %edx # imm = 0x219 callq 0x2120
/jamesderlin[P]dropt/src/dropt_string.c
dropt_ssfinalize
dropt_char* dropt_ssfinalize(dropt_stringstream* ss) { dropt_char* s; assert(ss != NULL); /* Shrink to fit. */ dropt_ssresize(ss, 0); s = ss->string; ss->string = NULL; dropt_ssclose(ss); return s; }
pushq %r14 pushq %rbx pushq %rax testq %rdi, %rdi je 0x3f90 movq %rdi, %rbx xorl %esi, %esi callq 0x3ef2 movq (%rbx), %r14 movq $0x0, (%rbx) movq %rbx, %rdi callq 0x2050 movq %r14, %rax addq $0x8, %rsp popq %rbx popq %r14 retq leaq 0x1a50(%rip), %rdi # 0x59e7 leaq 0x185f(%rip), %rsi # 0x57fd leaq 0x1a76(%rip), %rcx # 0x5a1b movl $0x24d, %edx # imm = 0x24D callq 0x2120
/jamesderlin[P]dropt/src/dropt_string.c
dropt_vssprintf
int dropt_vssprintf(dropt_stringstream* ss, const dropt_char* format, va_list args) { int n; va_list argsCopy; assert(ss != NULL); assert(format != NULL); va_copy(argsCopy, args); n = dropt_vsnprintf(NULL, 0, format, argsCopy); va_end(argsCopy); if (n > 0) { size_t available = dropt_ssgetfreespace(ss); if ((unsigned int) n >= available) { /* It's possible that `newSize < ss->maxSize` if * `GROWN_STRINGSTREAM_BUFFER_SIZE()` overflows, but it should be * safe since we'll recompute the available space. */ size_t newSize = GROWN_STRINGSTREAM_BUFFER_SIZE(ss->maxSize, n); dropt_ssresize(ss, newSize); available = dropt_ssgetfreespace(ss); } assert(available > 0); /* Space always is reserved for NUL. */ /* `snprintf`'s family of functions return the number of characters * that would be output with a sufficiently large buffer, excluding * `NUL`. */ n = dropt_vsnprintf(ss->string + ss->used, available, format, args); /* We couldn't allocate enough space. */ if ((unsigned int) n >= available) { n = -1; } if (n > 0) { ss->used += n; } } return n; }
pushq %rbp pushq %r15 pushq %r14 pushq %r12 pushq %rbx subq $0x20, %rsp testq %rdi, %rdi je 0x4099 movq %rsi, %r15 testq %rsi, %rsi je 0x40b8 movq %rdx, %r14 movq %rdi, %rbx movq 0x10(%rdx), %rax movq %rsp, %rcx movq %rax, 0x10(%rcx) movups (%rdx), %xmm0 movaps %xmm0, (%rcx) xorl %edi, %edi xorl %esi, %esi movq %r15, %rdx callq 0x20b0 movl %eax, %ebp testl %eax, %eax jle 0x408a movq %rbx, %rdi callq 0x40f6 movq %rax, %r12 movl %ebp, %esi cmpq %rsi, %rax ja 0x4059 movq 0x8(%rbx), %rax leaq (%rax,%rax), %rcx addq %rax, %rsi cmpq %rsi, %rcx cmovaq %rcx, %rsi movq %rbx, %rdi callq 0x3ef2 movq %rbx, %rdi callq 0x40f6 movq %rax, %r12 testq %r12, %r12 je 0x40d7 movq (%rbx), %rdi addq 0x10(%rbx), %rdi movq %r12, %rsi movq %r15, %rdx movq %r14, %rcx callq 0x20b0 movl %eax, %ecx cmpq %rcx, %r12 movl $0xffffffff, %ebp # imm = 0xFFFFFFFF cmoval %eax, %ebp testl %ebp, %ebp jle 0x408a movl %ebp, %eax addq %rax, 0x10(%rbx) movl %ebp, %eax addq $0x20, %rsp popq %rbx popq %r12 popq %r14 popq %r15 popq %rbp retq leaq 0x1947(%rip), %rdi # 0x59e7 leaq 0x1756(%rip), %rsi # 0x57fd leaq 0x19e0(%rip), %rcx # 0x5a8e movl $0x282, %edx # imm = 0x282 callq 0x2120 leaq 0x187b(%rip), %rdi # 0x593a leaq 0x1737(%rip), %rsi # 0x57fd leaq 0x19c1(%rip), %rcx # 0x5a8e movl $0x283, %edx # imm = 0x283 callq 0x2120 leaq 0x1a06(%rip), %rdi # 0x5ae4 leaq 0x1718(%rip), %rsi # 0x57fd leaq 0x19a2(%rip), %rcx # 0x5a8e movl $0x296, %edx # imm = 0x296 callq 0x2120
/jamesderlin[P]dropt/src/dropt_string.c
dropt_ssprintf
int dropt_ssprintf(dropt_stringstream* ss, const dropt_char* format, ...) { int n; va_list args; va_start(args, format); n = dropt_vssprintf(ss, format, args); va_end(args); return n; }
subq $0xd8, %rsp leaq 0x20(%rsp), %r10 movq %rdx, 0x10(%r10) movq %rcx, 0x18(%r10) movq %r8, 0x20(%r10) movq %r9, 0x28(%r10) testb %al, %al je 0x419d movaps %xmm0, 0x50(%rsp) movaps %xmm1, 0x60(%rsp) movaps %xmm2, 0x70(%rsp) movaps %xmm3, 0x80(%rsp) movaps %xmm4, 0x90(%rsp) movaps %xmm5, 0xa0(%rsp) movaps %xmm6, 0xb0(%rsp) movaps %xmm7, 0xc0(%rsp) movq %rsp, %rdx movq %r10, 0x10(%rdx) leaq 0xe0(%rsp), %rax movq %rax, 0x8(%rdx) movabsq $0x3000000010, %rax # imm = 0x3000000010 movq %rax, (%rdx) callq 0x2100 addq $0xd8, %rsp retq
/jamesderlin[P]dropt/src/dropt_string.c
void mvm::interpreter<(anonymous namespace)::mini_set, mvm::list::mplist<mvm::meta_bytecode<mvm::bytecode_serializer>, mvm::meta_value_stack<mvm::value_stack<mvm::list::mplist<unsigned int>>>>>::interpret_instr<mvm::instr_set<(anonymous namespace)::mini_set>::consumer_instr<mvm::meta_tie<mvm::meta_bytecode, unsigned int>, true, &(anonymous namespace)::mini_set::jump(mvm::ip&, unsigned int), mvm::typestring::type_string<(char)106, (char)117, (char)109, (char)112, (char)0>>>()
void interpreter<Set, InstancesList>::interpret_instr() { if constexpr (concept ::is_producer_v<I>) { this->produce< instance_of_tie_t<instance_list_type, typename traits::producers_traits<I>::producer_type>, typename traits::producers_traits<I>::data_type>(this->consume<I>()); } else { this->consume<I>(); } }
pushq %rbp movq %rsp, %rbp subq $0x10, %rsp movq %rdi, -0x8(%rbp) movq -0x8(%rbp), %rdi callq 0x21c50 addq $0x10, %rsp popq %rbp retq nopl (%rax,%rax)
/kenavolic[P]mvm/include/mvm/interpreter.h
void mvm::interpreter<(anonymous namespace)::mini_set, mvm::list::mplist<mvm::meta_bytecode<mvm::bytecode_serializer>, mvm::meta_value_stack<mvm::value_stack<mvm::list::mplist<unsigned int>>>>>::interpret_instr<mvm::instr_set<(anonymous namespace)::mini_set>::consumer_producer_instr<mvm::meta_tie<mvm::meta_value_stack, unsigned int, unsigned int>, mvm::meta_tie<mvm::meta_value_stack, unsigned int>, false, &(anonymous namespace)::mini_set::add(unsigned int, unsigned int), mvm::typestring::type_string<(char)97, (char)100, (char)100, (char)0>>>()
void interpreter<Set, InstancesList>::interpret_instr() { if constexpr (concept ::is_producer_v<I>) { this->produce< instance_of_tie_t<instance_list_type, typename traits::producers_traits<I>::producer_type>, typename traits::producers_traits<I>::data_type>(this->consume<I>()); } else { this->consume<I>(); } }
pushq %rbp movq %rsp, %rbp subq $0x20, %rsp movq %rdi, -0x8(%rbp) movq -0x8(%rbp), %rdi movq %rdi, -0x18(%rbp) callq 0x21dc0 movq -0x18(%rbp), %rdi movl %eax, -0xc(%rbp) leaq -0xc(%rbp), %rsi callq 0x20f90 addq $0x20, %rsp popq %rbp retq nop
/kenavolic[P]mvm/include/mvm/interpreter.h
void mvm::interpreter<(anonymous namespace)::mini_set, mvm::list::mplist<mvm::meta_bytecode<mvm::bytecode_serializer>, mvm::meta_value_stack<mvm::value_stack<mvm::list::mplist<unsigned int>>>>>::interpret_instr<mvm::instr_set<(anonymous namespace)::mini_set>::consumer_producer_instr<mvm::meta_tie<mvm::meta_value_stack, unsigned int, unsigned int>, mvm::meta_tie<mvm::meta_value_stack, unsigned int, unsigned int>, false, &(anonymous namespace)::mini_set::swap(unsigned int, unsigned int), mvm::typestring::type_string<(char)115, (char)119, (char)97, (char)112, (char)0>>>()
void interpreter<Set, InstancesList>::interpret_instr() { if constexpr (concept ::is_producer_v<I>) { this->produce< instance_of_tie_t<instance_list_type, typename traits::producers_traits<I>::producer_type>, typename traits::producers_traits<I>::data_type>(this->consume<I>()); } else { this->consume<I>(); } }
pushq %rbp movq %rsp, %rbp subq $0x20, %rsp movq %rdi, -0x8(%rbp) movq -0x8(%rbp), %rsi movq %rsi, -0x18(%rbp) leaq -0x10(%rbp), %rdi callq 0x22360 movq -0x18(%rbp), %rdi leaq -0x10(%rbp), %rsi callq 0x21580 addq $0x20, %rsp popq %rbp retq
/kenavolic[P]mvm/include/mvm/interpreter.h